diff --git a/CHANGELOG.rst b/CHANGELOG.rst index edcde01a..09741e70 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,8 +11,8 @@ First official release of TorchPhysics on PyPI. Version 1.0.1 ============= - Updated documentation and error messages - - Simplyfied creation/definition of DeepONets - - Add more evalution types for the DeepONet + - Simplified creation/definition of DeepONets + - Add more evaluation types for the DeepONet Version 1.0.2 ============= diff --git a/README.rst b/README.rst index 10b62d2e..9503867b 100644 --- a/README.rst +++ b/README.rst @@ -80,7 +80,7 @@ to have a look at the following sections: Installation ============ -TorchPhysics reqiueres the follwing dependencies to be installed: +TorchPhysics requires the following dependencies to be installed: - Python >= 3.8 - PyTorch_ >= 2.0.0 @@ -122,7 +122,7 @@ at the Robert Bosch GmbH, for support and supervision while creating this librar Contribute ========== -If you are missing a feature or detect a bug or unexpected behaviour while using this library, feel free to open +If you are missing a feature or detect a bug or unexpected behavior while using this library, feel free to open an issue or a pull request in GitHub_ or contact the authors. Since we developed the code as a student project during a seminar, we cannot guarantee every feature to work properly. However, we are happy about all contributions since we aim to develop a reliable code basis and extend the library to include other approaches. diff --git a/docs/examples.rst b/docs/examples.rst index dc9989ac..6236e3ac 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -20,7 +20,7 @@ One of the simplest applications is the forward solution of a Poisson equation: u &= \sin(\frac{\pi}{2} x_1)\cos(2\pi x_2), \text{ on } \partial \Omega \end{align} -This problem is part of the tutorial and is therefore explained with alot of details. +This problem is part of the tutorial and is therefore explained with a lot of details. The corresponding implementation can be found here_. .. _here : tutorial/solve_pde.html diff --git a/docs/tutorial/differentialoperators.rst b/docs/tutorial/differentialoperators.rst index 1f4516d9..1883c2a8 100644 --- a/docs/tutorial/differentialoperators.rst +++ b/docs/tutorial/differentialoperators.rst @@ -51,7 +51,7 @@ that one has to only evaluate the function once and then can create arbitrary de .. code-block:: python - # Therefore comput now the outputs: + # Therefore compute now the outputs: out = f(x, t) Let us compute the gradient and laplacian: diff --git a/docs/tutorial/model_creation.rst b/docs/tutorial/model_creation.rst index 6abcda34..349110b8 100644 --- a/docs/tutorial/model_creation.rst +++ b/docs/tutorial/model_creation.rst @@ -20,7 +20,7 @@ Parameters that should be learned in a inverse problem can also be easily define .. code-block:: python D_space = tp.spaces.R1('D') # parameters need there own space of corresponding dimension - D = tp.models.Parameter(init=0.0, space=D) # here you have to pass a fitting inital guess + D = tp.models.Parameter(init=0.0, space=D) # here you have to pass a fitting initial guess That are all the basics to the creation of the networks and parameters, they could now be used in a condition to then start the training. diff --git a/docs/tutorial/plotting.rst b/docs/tutorial/plotting.rst index ab5aacc7..7a0ccff2 100644 --- a/docs/tutorial/plotting.rst +++ b/docs/tutorial/plotting.rst @@ -30,14 +30,14 @@ plot_function .. code-block:: python def plot_fn(u): - return u # remeber u is the model outputs not the model itself + return u # remember u is the model outputs not the model itself - If we want to plot the laplcian w.r.t. :math:`x`, or any other kind of derivative: + If we want to plot the Laplacian w.r.t. :math:`x`, or any other kind of derivative: .. code-block:: python def plot_fn(u, x): - return tp.utils.laplcian(u, x) + return tp.utils.laplacian(u, x) Or if we know the exact solution and want the error, we can compute this in there: diff --git a/docs/tutorial/sampler_tutorial.rst b/docs/tutorial/sampler_tutorial.rst index 1a5aab11..f8760897 100644 --- a/docs/tutorial/sampler_tutorial.rst +++ b/docs/tutorial/sampler_tutorial.rst @@ -6,7 +6,7 @@ in these domains. This task is handled by the **PointSampler**-class. Different are implemented, for example: - ``RandomUniformSampler``: samples uniform randomly distributed points, in the given domain. - To assure a efficent sampling, every domain implements are method to create random points. + To assure a efficient sampling, every domain implements are method to create random points. - ``GridSampler``: samples a uniform point grid, in the given domain. Just like the ``RandomUniformSampler`` loosely coupled with the domains, for efficiency. - ``GaussianSampler``: creates points with a normal/Gaussian distribution. diff --git a/docs/tutorial/solve_pde.rst b/docs/tutorial/solve_pde.rst index f0cdb937..2a78e024 100644 --- a/docs/tutorial/solve_pde.rst +++ b/docs/tutorial/solve_pde.rst @@ -85,10 +85,10 @@ equation itself and the boundary condition. Here, we start with the boundary con bound_values = torch.sin(np.pi/2*x[:, :1]) * torch.cos(2*np.pi*x[:, 1:]) return u - bound_values - # the point sampler, for the trainig points: + # the point sampler, for the training points: # here we use grid points any other sampler could also be used bound_sampler = tp.samplers.GridSampler(square.boundary, n_points=5000) - bound_sampler = bound_sampler.make_static() # grid always the same, therfore static for one single computation + bound_sampler = bound_sampler.make_static() # grid always the same, therefore static for one single computation Once all this is defined, we have to combine the residual and sampler in a ``condition``. These condition handle internally the training process. @@ -118,7 +118,7 @@ They can be found under the ``utils`` section. def pde_residual(u, x): return tp.utils.laplacian(u, x) + 4.25*np.pi**2*u - # the point sampler, for the trainig points: + # the point sampler, for the training points: pde_sampler = tp.samplers.GridSampler(square, n_points=15000) # again point grid pde_sampler = pde_sampler.make_static() # wrap everything together in the condition diff --git a/docs/tutorial/solver_info.rst b/docs/tutorial/solver_info.rst index 41907b28..7957d3da 100644 --- a/docs/tutorial/solver_info.rst +++ b/docs/tutorial/solver_info.rst @@ -10,7 +10,7 @@ There, the behavior of the TorchPhysics ``Solver`` is explained and shown. Here we rather want to mention some more details for the trainings process. In general, most of the capabilities of Pytorch Lightning can also be used inside -TorchPhysics. All possiblities can be checked in the `Lightning documentation`_. +TorchPhysics. All possibilities can be checked in the `Lightning documentation`_. .. _`beginning example`: solve_pde.html .. _`Lightning documentation`: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html diff --git a/docs/tutorial/tutorial_domain_basics.rst b/docs/tutorial/tutorial_domain_basics.rst index 9705d022..9e1c33e4 100644 --- a/docs/tutorial/tutorial_domain_basics.rst +++ b/docs/tutorial/tutorial_domain_basics.rst @@ -87,8 +87,8 @@ but for complex domains, the creation of training points will possibly become co cut_domain = R - C The boundary can be again called with ``.boundary``. Since the operation can create -complex domains the voluem can not always be computed. If a exact value is needed, -one has to set it over the ``set_volume`` methode. +complex domains the volume can not always be computed. If a exact value is needed, +one has to set it over the ``set_volume`` method. Again we can have a look at the corresponding geometries: diff --git a/docs/tutorial/tutorial_spaces_and_points.rst b/docs/tutorial/tutorial_spaces_and_points.rst index 599abe95..597155f7 100644 --- a/docs/tutorial/tutorial_spaces_and_points.rst +++ b/docs/tutorial/tutorial_spaces_and_points.rst @@ -2,12 +2,12 @@ Spaces and points in TorchPhysics ================================= In this tutorial, we will cover the starting point for every PDE setting: The involved -spaces which define the names and dimensionlities of all variables. +spaces which define the names and dimensionalities of all variables. Spaces ------ The class **Space** itself is quite lazy and basically consists of a counter collecting -dimensionlities of space variables. It's purpose is to define variable names that can later +dimensionalities of space variables. It's purpose is to define variable names that can later be used, e.g. in user-defined functions. They therefore appear in several parts of TorchPhysics, for example in the definition of domains or models. @@ -55,7 +55,7 @@ Points The ``Points`` object is another central part of TorchPhysics. It consists of a PyTorch-tensor collecting a set of points in a ``Space``. It is generated e.g. by the samplers during training and handed to and from all models as in- and output. However, for standard use-cases, ``Points`` -mostly stay behind the scenes, so if you don't need custom behaviour when using TorchPhysics, feel +mostly stay behind the scenes, so if you don't need custom behavior when using TorchPhysics, feel free to skip this part of the tutorial for now. ``Points`` store data in a tensor with 2-axis, the first corresponding the batch-dimension in a batch @@ -78,9 +78,9 @@ All ``Points`` have a space and therefore also a dimensionality and a variable s >>> points.dim 3 We can access the contents of a ``Points`` object in a single tensor or with the corresponding coordinate -dict using ``.as_tensor`` or ``.coordinates`` attribues. ``Points`` also support most torch functions that +dict using ``.as_tensor`` or ``.coordinates`` attributes. ``Points`` also support most torch functions that work on tensors and support slicing via keys along the ordered variable axis, regarding the last key in slicing -(similar to NumPy or PyTorch-behaviour): +(similar to NumPy or PyTorch-behavior): .. code-block:: python diff --git a/docs/tutorial/tutorial_start.rst b/docs/tutorial/tutorial_start.rst index 05f8f53c..b5c0762e 100644 --- a/docs/tutorial/tutorial_start.rst +++ b/docs/tutorial/tutorial_start.rst @@ -43,7 +43,7 @@ Conditions Solver Handles the training of the defined model, by applying the previously created conditions. - The usage of the solver is shown the beginnig example for `solving a simple PDE`_. More details + The usage of the solver is shown the beginning example for `solving a simple PDE`_. More details of the trainings process are mentioned here_. .. _here: solver_info.html diff --git a/examples/pinn/Young-Laplace-equation-varying-angle.ipynb b/examples/pinn/Young-Laplace-equation-varying-angle.ipynb new file mode 100644 index 00000000..9ae9f3a0 --- /dev/null +++ b/examples/pinn/Young-Laplace-equation-varying-angle.ipynb @@ -0,0 +1,474 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "1ef6d147-2dd4-4547-9fb6-79b3758d7350", + "metadata": {}, + "outputs": [], + "source": [ + "import torchphysics as tp\n", + "import numpy as np\n", + "import torch\n", + "from matplotlib import pyplot as plt\n", + "import pytorch_lightning as pl" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "af0c47bc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Bond number: 0.0340011875\n" + ] + } + ], + "source": [ + "# geometry information\n", + "d = 1 # diameter in mm\n", + "R = d/2/1e3 # radius in m\n", + "h_l = 0.005 # water height in m\n", + "h_c = 0.01 # capillary height\n", + "norm = R # characteristic capillary radius for normalization\n", + "\n", + "# surface tension coefficient\n", + "sigma = 0.072\n", + "\n", + "# densities (liquid/gas)\n", + "rho_l = 998.2\n", + "rho_g = 1.225\n", + "\n", + "# contact angle\n", + "theta_value = 80.0\n", + "theta = theta_value * np.pi/180\n", + "\n", + "# gravitational acceleration\n", + "g = 9.81\n", + "\n", + "# Bond number\n", + "Bo = rho_l*g*R**2/sigma\n", + "print(f'Bond number: {Bo}')\n", + "\n", + "# number of sample points\n", + "N_points_PDE = 10000\n", + "N_points_BC = 5000" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "6af0dba0-d481-4566-a8b7-244098eee713", + "metadata": {}, + "outputs": [], + "source": [ + "# Input and output spaces\n", + "X = tp.spaces.R2(variable_name='x')\n", + "Z = tp.spaces.R1('z')\n", + "LAMB = tp.spaces.R1('lamb')\n", + "Theta = tp.spaces.R1(\"theta\")\n", + "\n", + "Omega = tp.domains.Circle(space=X, center=[0, 0], radius=1)\n", + "interval_theta = tp.domains.Interval(Theta, 10 * np.pi/180.0, 170*np.pi/180.0)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "586dc307", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3.141592653589793\n", + "31.415926535897935\n" + ] + } + ], + "source": [ + "# calculate normalized cross-sectional area and initial volume\n", + "A = np.pi*R**2/norm**2\n", + "V0 = A*h_l/norm\n", + "\n", + "dA = A/N_points_PDE\n", + "dV = V0/N_points_PDE\n", + "print(A)\n", + "print(V0)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d020f7f4-c286-466f-928d-1f80ee64c53f", + "metadata": {}, + "outputs": [], + "source": [ + "sampler_pde_condition = tp.samplers.RandomUniformSampler(Omega*interval_theta, n_points=N_points_PDE)\n", + "domain_boundary_condition = Omega.boundary\n", + "sampler_boundary_condition = tp.samplers.RandomUniformSampler(domain_boundary_condition*interval_theta, \n", + " n_points=N_points_BC)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c29f3f92-d613-470f-ab74-9369e071ea04", + "metadata": {}, + "outputs": [], + "source": [ + "def length_of_grad(grad_z):\n", + " return torch.sqrt(1 + grad_z[:, :1]**2 + grad_z[:, 1:]**2)\n", + "\n", + "def residual_pde_condition(z, x, lamb):\n", + " grad_z = tp.utils.grad(z, x)\n", + " len_grad = length_of_grad(grad_z)\n", + " return tp.utils.div(grad_z/len_grad, x) - Bo*z - lamb\n", + "\n", + "# calculate normal vectors prior to optimization to decrease computational effort\n", + "#normal_vectors = Omega.boundary.normal(sampler_boundary_condition.sample_points()).to('cuda')\n", + "\n", + "def residual_boundary_condition(z, x, theta):\n", + " grad_z = tp.utils.grad(z, x)\n", + " len_grad = length_of_grad(grad_z)\n", + " normal_vectors = Omega.boundary.normal(x)\n", + " normal_grad = torch.sum(normal_vectors*grad_z, dim=1, keepdim=True)\n", + " return normal_grad/len_grad - torch.cos(theta)\n", + "\n", + "def residual_volume_condition(z):\n", + " return torch.sum(z*dA, dim=0, keepdim=True) - V0" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fa15606a-a2c7-40bf-9e41-920c8f6a1bc9", + "metadata": {}, + "outputs": [], + "source": [ + "fcn_layer = tp.models.FCN(input_space=X*Theta, output_space=Z, hidden = (50, 50, 50, 50, 50, 50))\n", + "\n", + "model = tp.models.Sequential(fcn_layer)\n", + "\n", + "model_lamb = tp.models.FCN(input_space=Theta, output_space=LAMB, hidden = (20, 20, 20, 20))\n", + "\n", + "combi_model = tp.models.Parallel(model, model_lamb)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "008c09a7-81f8-41b5-8c10-3892812740ad", + "metadata": {}, + "outputs": [], + "source": [ + "pde_condition = tp.conditions.PINNCondition(module = combi_model, \n", + " sampler = sampler_pde_condition,\n", + " residual_fn = residual_pde_condition,\n", + " )\n", + "\n", + "boundary_condition = tp.conditions.PINNCondition(module = model, \n", + " sampler = sampler_boundary_condition,\n", + " residual_fn = residual_boundary_condition)\n", + "\n", + "volume_condition = tp.conditions.PINNCondition(module = model, \n", + " sampler = sampler_pde_condition,\n", + " residual_fn = residual_volume_condition, \n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "53cf64ac", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "IPU available: False, using: 0 IPUs\n", + "HPU available: False, using: 0 HPUs\n", + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [4]\n", + "\n", + " | Name | Type | Params\n", + "------------------------------------------------\n", + "0 | train_conditions | ModuleList | 14.3 K\n", + "1 | val_conditions | ModuleList | 0 \n", + "------------------------------------------------\n", + "14.3 K Trainable params\n", + "0 Non-trainable params\n", + "14.3 K Total params\n", + "0.057 Total estimated model params size (MB)\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "20fe544955104fca8b3803dd032b2ab6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Sanity Checking: | | 0/? [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "vmin = 9.4\n", + "vmax = 10.7\n", + "theta = 150.0\n", + "\n", + "plot_sampler = tp.samplers.PlotSampler(plot_domain=Omega, n_points=3169, \n", + " data_for_other_variables={\"theta\": theta*np.pi/180.0})\n", + "sample_points = plot_sampler.sample_points()\n", + "output = model.forward(sample_points)\n", + "\n", + "x_vis = sample_points.as_tensor[:,0].detach().numpy()\n", + "y_vis = sample_points.as_tensor[:,1].detach().numpy()\n", + "z_vis = output.as_tensor.detach().numpy().flatten()\n", + "\n", + "elevation = (max(z_vis)-min(z_vis))\n", + "\n", + "print(\"Elevation:\", elevation)\n", + "\n", + "from mpl_toolkits.mplot3d import axes3d\n", + "plt.rcParams['font.size'] = 32\n", + "plt.rcParams['legend.fontsize'] = 24\n", + "plt.rcParams['axes.titlesize'] = 'medium'\n", + "\n", + "fig = plt.figure()\n", + "helper_color = plt.scatter([0, 0], [0, 0], c=[vmin, vmax],)\n", + "plt.tricontourf(x_vis, y_vis, z_vis, vmin=vmin, vmax=vmax)\n", + "plt.colorbar(helper_color)\n", + "plt.savefig(f\"pinns_capillary_theta_{theta}_deg.png\", dpi=300)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "myenv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.23" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/tutorial/Tutorial_PINNs_Parameter_Dependency.ipynb b/examples/tutorial/Tutorial_PINNs_Parameter_Dependency.ipynb index e28d3013..0d10869a 100644 --- a/examples/tutorial/Tutorial_PINNs_Parameter_Dependency.ipynb +++ b/examples/tutorial/Tutorial_PINNs_Parameter_Dependency.ipynb @@ -26,7 +26,7 @@ "$$\n", "\\begin{cases}\n", "\\frac{\\partial}{\\partial t} u(x,t) &= \\color{red}{a} \\Delta_x u(x,t) &&\\text{ on } \\Omega\\times I, \\\\\n", - "u(x, t) &= u_0 &&\\text{ on } \\Omega\\times \\{0\\},\\\\\n", + "u(x, 0) &= u_0 &&\\text{ on } \\Omega\\times \\{0\\},\\\\\n", "u(x,t) &= h(t, \\color{red}{p}) &&\\text{ at } \\partial\\Omega_{heater}\\times I, \\\\\n", "\\nabla_x u(x, t) \\cdot \\overset{\\rightarrow}{n}(x) &= 0 &&\\text{ at } (\\partial \\Omega \\setminus \\partial\\Omega_{heater}) \\times I.\n", "\\end{cases}\n", @@ -36,10 +36,21 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "d6b5fdd2-67c1-4f7e-a185-9d515fb9f3f8", "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAh8AAAGdCAYAAACyzRGfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/xnp5ZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAymklEQVR4nO3dfXSU9Z3//9eEJBNCMhOSkISQBLlRQDAoSGL0aKkgN1qFGi0Szqq7rru60VNx9yzL+bbrut092LqntttV1u9W0f5KgFIFha7y40ZCa7kJCQioREEKCbnjxmRCQmaSmev7R5IpkSRkcjPX3Dwf58w5zsxnLt6XnxmuF9f1ns9YDMMwBAAA4CcRZhcAAADCC+EDAAD4FeEDAAD4FeEDAAD4FeEDAAD4FeEDAAD4FeEDAAD4FeEDAAD4VaTZBXyTx+NRVVWV4uPjZbFYzC4HAAD0gWEYamxsVHp6uiIiej+3EXDho6qqSpmZmWaXAQAA+qGiokIZGRm9jgm48BEfHy+pvXibzWZyNQAAoC8cDocyMzO9x/HeBFz46LzUYrPZCB8AAASZvrRM0HAKAAD8ivABAAD8ivABAAD8ivABAAD8ivABAAD8ivABAAD8ivABAAD8ivABAAD8ivABAAD8akDh46WXXpLFYtFzzz3nfaylpUWFhYVKSkpSXFyc8vPzVVtbO9A6AQBAiOh3+CgpKdHrr7+u7OzsLo8vX75cW7Zs0caNG1VcXKyqqio9+OCDAy4UAACEhn6Fj0uXLmnZsmX6n//5H40cOdL7eENDg9544w399Kc/1d13362ZM2dqzZo1+uMf/6h9+/YNWtEAACB49euH5QoLC3Xfffdp7ty5+rd/+zfv46WlpWptbdXcuXO9j02ePFlZWVnau3evbrvttqu25XQ65XQ6vfcdDkd/SgIGxOMx9NYf/6SKr5vNLgUAhlxynFWF355o2p/vc/hYv369ysrKVFJSctVzNTU1io6OVkJCQpfHU1NTVVNT0+32Vq1apRdffNHXMoBB9U5Zpf5162dmlwEAfjF+1IjgCR8VFRX6/ve/r+3btysmJmZQCli5cqWef/55732Hw6HMzMxB2TbQF5ecbfrJtnJJ0sJpaRo/aoTJFQHA0BoZG23qn+9T+CgtLVVdXZ1mzJjhfcztdmvPnj36r//6L23btk0ul0v19fVdzn7U1tYqLS2t221arVZZrdb+VQ8Mgtc+OqFzjU6NTYrVzx65WdbIYWaXBAAhzafwMWfOHB09erTLY3/5l3+pyZMna8WKFcrMzFRUVJR27typ/Px8SVJ5ebnOnDmjvLy8wasaGCQVF5v1yz+ckiT9n3unEDwAwA98Ch/x8fGaNm1al8dGjBihpKQk7+NPPPGEnn/+eSUmJspms+nZZ59VXl5et82mgNn+756v5Grz6I6JSbrnxlSzywGAsNCvb7v05pVXXlFERITy8/PldDo1f/58vfbaa4P9xwAD1uxq0+ZDZyVJT31rgiwWi8kVAUB4sBiGYZhdxJUcDofsdrsaGhpks9nMLgchbEPJGa1456jGJsXqo7+frYgIwgcA9Jcvx29+2wVhq2j/GUnS0pwsggcA+BHhA2Hp2NkGfVLZoKhhFj00M8PscgAgrBA+EJaKDrSf9Zg/NU3JcXzVGwD8ifCBsHPJ2ab3OhpNC3KzTK4GAMIP4QNh5/3DVWpyuTU+eYTyxieZXQ4AhB3CB8JO0YHTktobTfl6LQD4H+EDYeVIZb2OnXUoeliE8mk0BQBTED4QVjq/XrvwpjQljjD3h5UAIFwRPhA2Glta9f4nVZKkghwaTQHALIQPhI3Nh6vU7HJrYkqccsYlml0OAIQtwgfCgmEYWruPRlMACASED4SFQxX1Ol7TqOjICOXPGGN2OQAQ1ggfCAudjabfuWm0EmJpNAUAMxE+EPIaLrdq65GORlNWNAUA0xE+EPI2lVWqpdWjSanxmjl2pNnlAEDYI3wgpBmG4f0RuYJcGk0BIBAQPhDSSk9/rS9qLykmKkKLb6HRFAACAeEDIa2z0fT+7HTZh0eZXA0AQCJ8IITVN7u09Wi1JBpNASCQED4Qst4pOytXm0dTRtt0c2aC2eUAADoQPhCSDMNQ0f72FU1pNAWAwEL4QEg6cOqiTp5rUmz0MC2+Od3scgAAVyB8ICR1fr32genpio+h0RQAAgnhAyHnYpNLHxytkUSjKQAEIsIHQs47pZVyuT2aNsam7IwEs8sBAHwD4QMhpcuKpjljTa4GANAdwgdCyt6TF3TqfJNGRA/TAzSaAkBAInwgpKztOOux6JYxirNGmlwNAKA7hA+EjPOXnPr/P+1oNM2h0RQAAhXhAyFj48FKtboNTc9M0LQxdrPLAQD0gPCBkODxGFrXccllGWc9ACCgET4QEj4+eV5nLjYr3hqp70wfbXY5AIBeED4QEor2t5/1+O6MMYqNptEUAAIZ4QNBr66xRds/q5XEiqYAEAwIHwh6Gw9Wqs1jaEZWgian2cwuBwBwDYQPBLUrG00LclnRFACCAeEDQW3Pl+dU+fVl2WIi9Z1sGk0BIBgQPhDUOhtNH5yRoZioYSZXAwDoC8IHglato0U7j9dJkpbRaAoAQYPwgaC1oaRCbo+hWdeN1PWp8WaXAwDoI8IHgpLbY2i9t9GUsx4AEEwIHwhKxV/UqaqhRQmxUVo4jUZTAAgmhA8EpbX72s965NNoCgBBh/CBoFNVf1kflbc3mi7lR+QAIOgQPhB01pdUyGNIt41P1MSUOLPLAQD4iPCBoNLm9mhDCSuaAkAwI3wgqOw6Xqdah1OJI6I1f2qq2eUAAPqB8IGgUtTx9dqHZ2bIGkmjKQAEI8IHgkbFxWYVf3FOEo2mABDMCB8IGhtKKmQY0h0Tk3Rd8gizywEA9BPhA0Gh1e3RhoMVkqSCHBpNASCYET4QFHZ+XqtzjU4lx0XrnhtpNAWAYEb4QFBYu7+j0fTWTEVH8rYFgGDG3+IIeGcuNOv3X56XJC2dRaMpAAQ7wgcC3rqORcXuvD5ZWUmxJlcDABgowgcCmqvNo40djabLcjnrAQChgPCBgLb9s1qdv+TSqHir5kyh0RQAQgHhAwGt6MBpSdKSWzMVNYy3KwCEAv42R8A6db5JH5+4IItFeiQn0+xyAACDxKfwsXr1amVnZ8tms8lmsykvL08ffPCB9/nZs2fLYrF0uT311FODXjTCw7qO33GZfcMoZYyk0RQAQkWkL4MzMjL00ksv6frrr5dhGHr77be1aNEiHTp0SFOnTpUkPfnkk/rXf/1X72tiYzlowHfONrd+W1opSSrIZUVTAAglPoWP+++/v8v9f//3f9fq1au1b98+b/iIjY1VWlra4FWIsPThsRpdbHIpzRajb08aZXY5AIBB1O+eD7fbrfXr16upqUl5eXnex9euXavk5GRNmzZNK1euVHNzc6/bcTqdcjgcXW5AUceKpktmZSqSRlMACCk+nfmQpKNHjyovL08tLS2Ki4vTpk2bdOONN0qSCgoKNHbsWKWnp+vIkSNasWKFysvL9e677/a4vVWrVunFF1/s/x4g5Jyou6T9py4qgkZTAAhJFsMwDF9e4HK5dObMGTU0NOi3v/2tfvnLX6q4uNgbQK60a9cuzZkzRydOnNCECRO63Z7T6ZTT6fTedzgcyszMVENDg2w2m4+7g1Dwo62f6Y0/nNLcKSn65WOzzC4HANAHDodDdru9T8dvn898REdHa+LEiZKkmTNnqqSkRD//+c/1+uuvXzU2NzdXknoNH1arVVar1dcyEKJaWt16p6yz0ZQVTQEgFA34YrrH4+ly5uJKhw8fliSNHj16oH8MwsQHx6pV39yqMQnD9a0bUswuBwAwBHw687Fy5UotXLhQWVlZamxsVFFRkXbv3q1t27bp5MmTKioq0r333qukpCQdOXJEy5cv11133aXs7Oyhqh8h5spG02ERFpOrAQAMBZ/CR11dnR599FFVV1fLbrcrOztb27Zt0z333KOKigrt2LFDP/vZz9TU1KTMzEzl5+frBz/4wVDVjhDzRW2jSv70tYZFWLRkFo2mABCqfAofb7zxRo/PZWZmqri4eMAFIXx1nvWYMzlFqbYYk6sBAAwVFlBAQGhpdetdGk0BICwQPhAQth6plqOlTRkjh+uu61nRFABCGeEDAaFo/2lJ0tKcLEXQaAoAIY3wAdN9Xu1Q2Zl6RUZY9PCtGWaXAwAYYoQPmK6z0XTe1FSlxNNoCgChjvABUzW72rT50FlJUkHOWJOrAQD4A+EDptrySZUanW0amxSr2yckmV0OAMAPCB8wVeclFxpNASB8ED5gmmNnG/RJZYOihln00EwaTQEgXBA+YJqiA+1nPeZPTVNyHL9sDADhgvABU1xytum9zkZTVjQFgLBC+IAp3j9cpSaXW+OTRyhvPI2mABBOCB8wRdGBP69oarHQaAoA4YTwAb87UlmvY2cdih4WoXwaTQEg7BA+4HedX69deFOaEkdEm1wNAMDfCB/wq8aWVr3/SZUkqSCHRlMACEeED/jV5sNVana5NTElTjnjEs0uBwBgAsIH/MYwjC4rmtJoCgDhifABvzlcUa/Pqx2yRkYof8YYs8sBAJiE8AG/Wdtx1uO+7NFKiKXRFADCFeEDftFwuVVbj7Q3mi5jRVMACGuED/jFprJKtbR6NCk1XjOyRppdDgDARIQPDDnDMLw/IleQS6MpAIQ7wgeGXOnpr/VF7SXFREVo8S00mgJAuCN8YMh1fr32/ux02YdHmVwNAMBshA8Mqfpml7YerZbUfskFAADCB4bUO2Vn5WrzaMpom27OTDC7HABAACB8YMi0r2h6WhKNpgCAPyN8YMgcOHVRJ881KTZ6mBbfnG52OQCAAEH4wJDp/HrtA9PTFR9DoykAoB3hA0PiYpNLHxytkUSjKQCgK8IHhsQ7pZVyuT2aNsam7IwEs8sBAAQQwgcGnWEYWte5omnOWJOrAQAEGsIHBt3ery7oq/NNirNG6gEaTQEA30D4wKDrXNF00c3pirNGmlwNACDQED4wqM5fcmrbpzSaAgB6RvjAoNp4sFKtbkPTMxM0Nd1udjkAgABE+MCg8Xj+3Gi6LIezHgCA7hE+MGg+PnleZy42K94aqe9MH212OQCAAEX4wKDpbDT97owxio2m0RQA0D3CBwZFXWOLtn9WK4lGUwBA7wgfGBQbD1aqzWNoRlaCJqfZzC4HABDACB8YsCsbTQtyWdEUANA7wgcGbM+X51T59WXZYiL1nWwaTQEAvSN8YMA6G00fnJGhmKhhJlcDAAh0hA8MSK2jRTuP10mSltFoCgDoA8IHBmRDSYXcHkOzrhup61PjzS4HABAECB/oN7fH0HpvoylnPQAAfUP4QL8Vf1GnqoYWJcRGaeE0Gk0BAH1D+EC/dTaaPkSjKQDAB4QP9EtV/WXt6mg0XcolFwCADwgf6Jf1JRXyGNJt4xM1YVSc2eUAAIII4QM+a3N7tKGEFU0BAP1D+IDPdh2vU63DqcQR0Zo/NdXscgAAQYbwAZ8VdXy99uGZGbJG0mgKAPAN4QM+qbjYrOIvzkmSlubQaAoA8B3hAz7ZUFIhw5DumJik65JHmF0OACAIET7QZ61ujzYcrJAkFeTQaAoA6B/CB/ps5+e1OtfoVHJctO65kUZTAED/+BQ+Vq9erezsbNlsNtlsNuXl5emDDz7wPt/S0qLCwkIlJSUpLi5O+fn5qq2tHfSiYY61HSuaPnxrpqIjya0AgP7x6QiSkZGhl156SaWlpTp48KDuvvtuLVq0SJ9++qkkafny5dqyZYs2btyo4uJiVVVV6cEHHxySwuFfZy406/dfnpckLZ1FoykAoP8shmEYA9lAYmKiXn75ZT300EMaNWqUioqK9NBDD0mSjh8/rilTpmjv3r267bbb+rQ9h8Mhu92uhoYG2Wy2gZSGQfTjD49r9e6TuvP6ZP1/T+SaXQ4AIMD4cvzu97lzt9ut9evXq6mpSXl5eSotLVVra6vmzp3rHTN58mRlZWVp7969PW7H6XTK4XB0uSGwuNo82tjRaLqM33EBAAyQz+Hj6NGjiouLk9Vq1VNPPaVNmzbpxhtvVE1NjaKjo5WQkNBlfGpqqmpqanrc3qpVq2S32723zMxMn3cCQ2v7Z7U6f8mllHir5kyh0RQAMDA+h49Jkybp8OHD2r9/v55++mk99thj+uyzz/pdwMqVK9XQ0OC9VVRU9HtbGBpFB05LkpbMylTUMBpNAQADE+nrC6KjozVx4kRJ0syZM1VSUqKf//znWrJkiVwul+rr67uc/aitrVVaWlqP27NarbJarb5XDr84db5JH5+4IIulPXwAADBQA/5nrMfjkdPp1MyZMxUVFaWdO3d6nysvL9eZM2eUl5c30D8GJlnX8Tsus28YpYyRsSZXAwAIBT6d+Vi5cqUWLlyorKwsNTY2qqioSLt379a2bdtkt9v1xBNP6Pnnn1diYqJsNpueffZZ5eXl9fmbLggszja3fltaKUkqyGVFUwDA4PApfNTV1enRRx9VdXW17Ha7srOztW3bNt1zzz2SpFdeeUURERHKz8+X0+nU/Pnz9dprrw1J4Rh6Hx6r0cUml9JsMfr2pFFmlwMACBEDXudjsLHOR+BY8vpe7T91Ud+fc72W33OD2eUAAAKYX9b5QGg7UXdJ+09dVIRFeiSHRlMAwOAhfKBbnY2md09O0Wj7cJOrAQCEEsIHrtLS6tY7ZZ2NpqxoCgAYXIQPXOWDY9Wqb27VmITh+tYNKWaXAwAIMYQPXKVof/sllyWzMjUswmJyNQCAUEP4QBdf1Daq5E9fa1iEhRVNAQBDgvCBLjrPesyZnKJUW4zJ1QAAQhHhA14trW69S6MpAGCIET7gtfVItRwtbcoYOVx3Xc+KpgCAoUH4gFfR/tOSpKU5WYqg0RQAMEQIH5AkHa9xqOxMvSIjLHr41gyzywEAhDDCByT9udF03tRUpcTTaAoAGDqED6jZ1aZNZWclSQU5Y02uBgAQ6ggf0JZPqtTobNPYpFjdPiHJ7HIAACGO8AHvJRcaTQEA/kD4CHPHzjbok8oGRQ2z6KGZNJoCAIYe4SPMFR1oP+sxf2qakuOsJlcDAAgHhI8wdsnZpvcOdTSasqIpAMBPCB9h7P3DVWpyuTU+eYTyxtNoCgDwD8JHGCs68OcVTS0WGk0BAP5B+AhTRyrrdeysQ9HDIpRPoykAwI8IH2Gq8+u1C29KU+KIaJOrAQCEE8JHGGpsadX7n1RJkgpyaDQFAPgX4SMMbT5cpWaXWxNT4pQzLtHscgAAYYbwEWYMw/Becimg0RQAYALCR5g5XFGvz6sdskZGKH8GjaYAAP8jfISZzrMe92WPlj02yuRqAADhiPARRhout2rLkfZG02WsaAoAMAnhI4xsKqtUS6tHk1LjNSNrpNnlAADCFOEjTBiG4f0RuYJcGk0BAOYhfISJ0tNf64vaS4qJitDiW8aYXQ4AIIwRPsJEZ6Pp/dnpsg+n0RQAYB7CRxiob3Zp69FqSe2XXAAAMBPhIwy8U3ZWrjaPpoy26ebMBLPLAQCEOcJHiGtf0fS0JBpNAQCBgfAR4g6cuqiT55oUGz1Mi29ON7scAAAIH6Gu8+u1D0xPV3wMjaYAAPMRPkLYxSaXPjhaI0laljvW5GoAAGhH+Ahh75RWyuX26KYxdt2UYTe7HAAAJBE+QpZhGFp3xYqmAAAECsJHiNr71QV9db5JcdZIPTCdRlMAQOAgfISozhVNF92crhHWSJOrAQDgzwgfIej8Jae2fdreaMolFwBAoCF8hKCNByvV6jY0PTNBU9NpNAUABBbCR4jxeP7caLosh7MeAIDAQ/gIMR+fPK8zF5sVb43Ud6aPNrscAACuQvgIMZ2Npt+dMUax0TSaAgACD+EjhNQ1tmj7Z7WSaDQFAAQuwkcI2XiwUm0eQzOyEjQ5zWZ2OQAAdIvwESKubDQt4HdcAAABjPARIvZ8eU6VX1+WLSZS38mm0RQAELgIHyGis9H0wRkZiokaZnI1AAD0jPARAmodLdp5vE6StIxGUwBAgCN8hIANJRVyewzlXJeo61PjzS4HAIBeET6CnNtjaL230ZSzHgCAwEf4CHLFX9SpqqFFI2OjtGBamtnlAABwTYSPINfZaJpPoykAIEgQPoJYVf1l7epoNF3KJRcAQJAgfASxDSUV8hjSbeMTNWFUnNnlAADQJz6Fj1WrVmnWrFmKj49XSkqKFi9erPLy8i5jZs+eLYvF0uX21FNPDWrRkNrcHq0vYUVTAEDw8Sl8FBcXq7CwUPv27dP27dvV2tqqefPmqampqcu4J598UtXV1d7bT37yk0EtGtKu43WqdTiVOCJa86emml0OAAB95tNvrn/44Ydd7r/11ltKSUlRaWmp7rrrLu/jsbGxSkvjmxdDqajj67UPz8yQNZJGUwBA8BhQz0dDQ4MkKTExscvja9euVXJysqZNm6aVK1equbm5x204nU45HI4uN/Su4mKzir84J0lamkOjKQAguPh05uNKHo9Hzz33nO644w5NmzbN+3hBQYHGjh2r9PR0HTlyRCtWrFB5ebnefffdbrezatUqvfjii/0tIyxtKKmQYUh3TEzSdckjzC4HAACfWAzDMPrzwqeffloffPCB/vCHPygjI6PHcbt27dKcOXN04sQJTZgw4arnnU6nnE6n977D4VBmZqYaGhpks9n6U1pIa3V7dPtLu3Su0alXC2boPn7BFgAQABwOh+x2e5+O3/068/HMM89o69at2rNnT6/BQ5Jyc3MlqcfwYbVaZbVa+1NGWNr5ea3ONTqVHBete26k0RQAEHx8Ch+GYejZZ5/Vpk2btHv3bo0bN+6arzl8+LAkafRo/oU+GNZ2rGj68K2Zio5kmRYAQPDxKXwUFhaqqKhI7733nuLj41VTUyNJstvtGj58uE6ePKmioiLde++9SkpK0pEjR7R8+XLdddddys7OHpIdCCdnLjTr91+el8UiLZ1FoykAIDj5FD5Wr14tqX0hsSutWbNGjz/+uKKjo7Vjxw797Gc/U1NTkzIzM5Wfn68f/OAHg1ZwOFvXsajYndePUlZSrMnVAADQPz5fdulNZmamiouLB1QQuudq82jjwQpJUgFfrwUABDGaBoLE9s9qdf6SSynxVs2ZkmJ2OQAA9BvhI0gUHTgtSVoyK1NRw5g2AEDw4igWBP50vkkfn7ggi6U9fAAAEMwIH0FgXcfvuMy+YZQyRtJoCgAIboSPAOdsc2tjaaUkqSB3rMnVAAAwcISPAPfhsRpdbHIpzRajb08aZXY5AAAMGOEjwBV1rGi6ZFamImk0BQCEAI5mAexE3SXtP3VRERbpkRwaTQEAoYHwEcA6G03vnpyi0fbhJlcDAMDgIHwEqJZWt94p62w0ZUVTAEDoIHwEqA+OVau+uVVjEobrWzewoikAIHQQPgLUlY2mwyIsJlcDAMDgIXwEoC9qG1Xyp681LMLCiqYAgJBD+AhAnWc95k5JUaotxuRqAAAYXISPANPS6ta7ZaxoCgAIXYSPALP1SLUcLW3KTByuOycmm10OAACDjvARYIr2n5YkPTIrSxE0mgIAQhDhI4Acr3Go7Ey9IiMsevjWDLPLAQBgSBA+Akhno+m8qalKiafRFAAQmggfAaLZ1aZNZWclSQU5NJoCAEIX4SNAbPmkSo3ONo1NitXtE5LMLgcAgCFD+AgQnZdclubQaAoACG2EjwBw7GyDPqlsUNQwix6aSaMpACC0ET4CQNGB9rMe86emKTnOanI1AAAMLcKHyS452/TeoY5G09wsk6sBAGDoET5M9v7hKjW53BqfPEJ542k0BQCEPsKHyYoOtK9oujQnSxYLjaYAgNBH+DDRkcp6HTvrUHRkhPJpNAUAhAnCh4k6v15777Q0JY6INrkaAAD8g/BhksaWVr3/SZUkqSCXFU0BAOGD8GGSzYer1Oxya2JKnGZdN9LscgAA8BvChwkMw/Becimg0RQAEGYIHyY4XFGvz6sdskZGKH8GjaYAgPBC+DBB51mP+7JHyx4bZXI1AAD4F+HDzxout2rLkfZG02WsaAoACEOEDz/bfOisWlo9mpQarxlZNJoCAMIP4cOPDMPQ2v3tK5oW5NJoCgAIT4QPPyo9/bW+qL2kmKgILb5ljNnlAABgCsKHH3U2mt6fnS77cBpNAQDhifDhJ/XNLm09Wi2p/ZILAADhivDhJ++UnZWrzaMpo226OTPB7HIAADAN4cMP2lc0pdEUAACJ8OEXB05d1MlzTYqNHqbFN6ebXQ4AAKYifPhB0YH2RtNFN6crPoZGUwBAeCN8DLGLTS59cLRGklSQM9bkagAAMB/hY4i9U1opl9ujm8bYdVOG3exyAAAwHeFjCBmGoXUdl1z4ei0AAO0IH0No71cX9NX5JsVZI/XAdBpNAQCQCB9DqnNF00U3p2uENdLkagAACAyEjyFy/pJT2z7taDTlkgsAAF6EjyHy29JKtboNTc9M0NR0Gk0BAOhE+BgCHs+fG02X5XDWAwCAKxE+hsDHJ8/r9IVmxVsj9Z3po80uBwCAgEL4GAKdjabfnTFGsdE0mgIAcCXCxyCra2zR9s9qJdFoCgBAdwgfg2zjwUq1eQzNyErQ5DSb2eUAABBwCB+DqEujaS6/4wIAQHcIH4Noz5fnVPn1ZdmHR+m+bBpNAQDoDuFjEHU2mj44Y4xiooaZXA0AAIHJp/CxatUqzZo1S/Hx8UpJSdHixYtVXl7eZUxLS4sKCwuVlJSkuLg45efnq7a2dlCLDkS1jhbtPF4nSVpGoykAAD3yKXwUFxersLBQ+/bt0/bt29Xa2qp58+apqanJO2b58uXasmWLNm7cqOLiYlVVVenBBx8c9MIDzYaSCrk9hnKuS9TElHizywEAIGBZDMMw+vvic+fOKSUlRcXFxbrrrrvU0NCgUaNGqaioSA899JAk6fjx45oyZYr27t2r22677ZrbdDgcstvtamhokM0WHN8WcXsM3fnjXapqaNHPltysxbeMMbskAAD8ypfj94B6PhoaGiRJiYmJkqTS0lK1trZq7ty53jGTJ09WVlaW9u7d2+02nE6nHA5Hl1uwKf6iTlUNLRoZG6UF09LMLgcAgIDW7/Dh8Xj03HPP6Y477tC0adMkSTU1NYqOjlZCQkKXsampqaqpqel2O6tWrZLdbvfeMjMz+1uSaTobTfNnZNBoCgDANfQ7fBQWFurYsWNav379gApYuXKlGhoavLeKiooBbc/fquova1dHo+lSGk0BALimfv3wyDPPPKOtW7dqz549ysjI8D6elpYml8ul+vr6Lmc/amtrlZbW/eUIq9Uqq9XanzICwoaSCnkM6bbxiZowKs7scgAACHg+nfkwDEPPPPOMNm3apF27dmncuHFdnp85c6aioqK0c+dO72Pl5eU6c+aM8vLyBqfiANLm9mhDSfuZmgJWNAUAoE98OvNRWFiooqIivffee4qPj/f2cdjtdg0fPlx2u11PPPGEnn/+eSUmJspms+nZZ59VXl5en77pEmw+Kj+nGkeLEkdEa/7UVLPLAQAgKPgUPlavXi1Jmj17dpfH16xZo8cff1yS9MorrygiIkL5+flyOp2aP3++XnvttUEpNtCs3X9akvTwzAxZI2k0BQCgLwa0zsdQCJZ1PiouNuuulz+SYUi7/2G2rkseYXZJAACYxm/rfISzDSUVMgzpjolJBA8AAHxA+OiHVrdHGw62N5ouo9EUAACfED76YefntTrX6FRynFX33EijKQAAviB89MPajhVNv3drhqKG8b8QAABfcOT00ZkLzfr9l+dlsUhLc1jRFAAAXxE+fLSupP2sx53Xj1JmYqzJ1QAAEHwIHz5wtXm0saPRtICzHgAA9AvhwwfbP6vV+UsupcRbNWdKitnlAAAQlAgfPig60L6i6ZJZmTSaAgDQTxxB++hP55v08YkLsljawwcAAOgfwkcfrTvQ3mg6+4ZRyhhJoykAAP1F+OgDZ5tbG0srJUkFrGgKAMCAED76YNuntbrY5FKaLUbfnjTK7HIAAAhqhI8+WLvvz42mkTSaAgAwIBxJr+FE3SXtP3VRERbpkRwaTQEAGCjCxzV0NprePTlFo+3DTa4GAIDgR/joRUurW++UtTeaLqPRFACAQUH46MUHx6pV39yqMQnDddcNNJoCADAYCB+9KNrffsnlkVmZGhZhMbkaAABCA+GjB1/UNqrkT19rWIRF32NFUwAABg3howedZz3mTklRqi3G5GoAAAgdhI9utLS69W4ZK5oCADAUCB/d2HqkWo6WNmUmDtedE5PNLgcAgJBC+OhG0f72FU0fmZWlCBpNAQAYVISPbzhe41DZmXpFRlj08K0ZZpcDAEDIIXx8Q2ej6bypqUqJp9EUAIDBRvi4QrOrTZvKzkqSCnJoNAUAYCgQPq6w9ZNqNTrbNDYpVrdPSDK7HAAAQhLh4wprO35EbmkOjaYAAAwVwkeHY2cb9ElFvaKGWfTQTBpNAQAYKoSPDkUdZz3mT01TcpzV5GoAAAhdhA9Jl5xteu9Qe6PpMlY0BQBgSBE+JL1/uEpNLrfGjxqh28Ynml0OAAAhjfAhqehA+4qmBTlZslhoNAUAYCiFffg4UlmvY2cdio6MUP4MGk0BABhqYR8+Olc0vXdamkaOiDa5GgAAQl9Yh4/Glla9/0mVJKmARlMAAPwirMPH5sNVana5NTElTrOuG2l2OQAAhIWwDR+GYXgvudBoCgCA/4Rt+DhcUa/Pqx2y0mgKAIBfhW346DzrcV/2aNljo0yuBgCA8BGW4aPhcqu2HGlvNF2Wm2VyNQAAhJewDB+bD51VS6tHk1LjNSOLRlMAAPwp7MJHl0bTXBpNAQDwt7ALH2VnvlZ5baNioiK0+JYxZpcDAEDYCbvwsXZf+1mP+7PTZR9OoykAAP4WVuGjvtmlrUerJUnLbmNFUwAAzBBW4eOdsrNytXl042ibpmfYzS4HAICwFDbho73R9LQkGk0BADBT2ISPA6cu6uS5JsVGD9Oim9PNLgcAgLAVaXYB/jIpLV4/uG+KLrvcio+h0RQAALOETfhIiI3WX9853uwyAAAIe2Fz2QUAAAQGwgcAAPArwgcAAPArwgcAAPArwgcAAPArwgcAAPArwgcAAPArn8PHnj17dP/99ys9PV0Wi0WbN2/u8vzjjz8ui8XS5bZgwYLBqhcAAAQ5n8NHU1OTpk+frldffbXHMQsWLFB1dbX3tm7dugEVCQAAQofPK5wuXLhQCxcu7HWM1WpVWlpav4sCAACha0h6Pnbv3q2UlBRNmjRJTz/9tC5cuNDjWKfTKYfD0eUGAABC16CHjwULFuhXv/qVdu7cqR//+McqLi7WwoUL5Xa7ux2/atUq2e127y0zM3OwSwIAAAHEYhiG0e8XWyzatGmTFi9e3OOYr776ShMmTNCOHTs0Z86cq553Op1yOp3e+w6HQ5mZmWpoaJDNZutvaQAAwI8cDofsdnufjt9D/qu248ePV3Jysk6cONFt+LBarbJard77nVmIyy8AAASPzuN2X85pDHn4qKys1IULFzR69Og+jW9sbJQkLr8AABCEGhsbZbfbex3jc/i4dOmSTpw44b1/6tQpHT58WImJiUpMTNSLL76o/Px8paWl6eTJk/rHf/xHTZw4UfPnz+/T9tPT01VRUaH4+HhZLBZfy+tV5yWdioqKkLykE+r7J4X+PrJ/wS/U95H9C35DtY+GYaixsVHp6enXHOtz+Dh48KC+/e1ve+8///zzkqTHHntMq1ev1pEjR/T222+rvr5e6enpmjdvnn70ox91ubTSm4iICGVkZPhalk9sNlvIvqmk0N8/KfT3kf0LfqG+j+xf8BuKfbzWGY9OPoeP2bNn93o9Z9u2bb5uEgAAhBF+2wUAAPhVWIUPq9WqF154oc+XgIJNqO+fFPr7yP4Fv1DfR/Yv+AXCPg5onQ8AAABfhdWZDwAAYD7CBwAA8CvCBwAA8CvCBwAA8KuQCx+vvvqqrrvuOsXExCg3N1cHDhzodfzGjRs1efJkxcTE6KabbtL//u//+qlS36xatUqzZs1SfHy8UlJStHjxYpWXl/f6mrfeeksWi6XLLSYmxk8V++5f/uVfrqp38uTJvb4mWOZPkq677rqr9s9isaiwsLDb8cEwf3v27NH999+v9PR0WSwWbd68ucvzhmHon//5nzV69GgNHz5cc+fO1ZdffnnN7fr6OR4qve1fa2urVqxYoZtuukkjRoxQenq6Hn30UVVVVfW6zf68z4fKtebv8ccfv6rWBQsWXHO7gTJ/0rX3sbvPpMVi0csvv9zjNgNlDvtyXGhpaVFhYaGSkpIUFxen/Px81dbW9rrd/n5ufRFS4WPDhg16/vnn9cILL6isrEzTp0/X/PnzVVdX1+34P/7xj1q6dKmeeOIJHTp0SIsXL9bixYt17NgxP1d+bcXFxSosLNS+ffu0fft2tba2at68eWpqaur1dTabTdXV1d7b6dOn/VRx/0ydOrVLvX/4wx96HBtM8ydJJSUlXfZt+/btkqSHH364x9cE+vw1NTVp+vTpevXVV7t9/ic/+Yn+8z//U//93/+t/fv3a8SIEZo/f75aWlp63Kavn+Oh1Nv+NTc3q6ysTD/84Q9VVlamd999V+Xl5XrggQeuuV1f3udD6VrzJ0kLFizoUuu6det63WYgzZ907X28ct+qq6v15ptvymKxKD8/v9ftBsIc9uW4sHz5cm3ZskUbN25UcXGxqqqq9OCDD/a63f58bn1mhJCcnByjsLDQe9/tdhvp6enGqlWruh3/ve99z7jvvvu6PJabm2v87d/+7ZDWORjq6uoMSUZxcXGPY9asWWPY7Xb/FTVAL7zwgjF9+vQ+jw/m+TMMw/j+979vTJgwwfB4PN0+H2zzJ8nYtGmT977H4zHS0tKMl19+2ftYfX29YbVajXXr1vW4HV8/x/7yzf3rzoEDBwxJxunTp3sc4+v73F+627/HHnvMWLRokU/bCdT5M4y+zeGiRYuMu+++u9cxgTqH3zwu1NfXG1FRUcbGjRu9Yz7//HNDkrF3795ut9Hfz62vQubMh8vlUmlpqebOnet9LCIiQnPnztXevXu7fc3evXu7jJek+fPn9zg+kDQ0NEiSEhMTex136dIljR07VpmZmVq0aJE+/fRTf5TXb19++aXS09M1fvx4LVu2TGfOnOlxbDDPn8vl0q9//Wv91V/9Va8/oBhs83elU6dOqaampssc2e125ebm9jhH/fkcB5KGhgZZLBYlJCT0Os6X97nZdu/erZSUFE2aNElPP/20Lly40OPYYJ+/2tpa/e53v9MTTzxxzbGBOIffPC6UlpaqtbW1y3xMnjxZWVlZPc5Hfz63/REy4eP8+fNyu91KTU3t8nhqaqpqamq6fU1NTY1P4wOFx+PRc889pzvuuEPTpk3rcdykSZP05ptv6r333tOvf/1reTwe3X777aqsrPRjtX2Xm5urt956Sx9++KFWr16tU6dO6c4771RjY2O344N1/iRp8+bNqq+v1+OPP97jmGCbv2/qnAdf5qg/n+NA0dLSohUrVmjp0qW9/liXr+9zMy1YsEC/+tWvtHPnTv34xz9WcXGxFi5cKLfb3e34YJ4/SXr77bcVHx9/zcsSgTiH3R0XampqFB0dfVUYvtZxsXNMX1/THz7/sBzMV1hYqGPHjl3zGmNeXp7y8vK892+//XZNmTJFr7/+un70ox8NdZk+W7hwofe/s7OzlZubq7Fjx+o3v/lNn/4lEkzeeOMNLVy4sNefng62+Qtnra2t+t73vifDMLR69epexwbT+/yRRx7x/vdNN92k7OxsTZgwQbt379acOXNMrGxovPnmm1q2bNk1G7sDcQ77elwIFCFz5iM5OVnDhg27qou3trZWaWlp3b4mLS3Np/GB4JlnntHWrVv10UcfKSMjw6fXRkVF6ZZbbtGJEyeGqLrBlZCQoBtuuKHHeoNx/iTp9OnT2rFjh/76r//ap9cF2/x1zoMvc9Sfz7HZOoPH6dOntX37dp9/ovxa7/NAMn78eCUnJ/dYazDOX6ff//73Ki8v9/lzKZk/hz0dF9LS0uRyuVRfX99l/LWOi51j+vqa/giZ8BEdHa2ZM2dq586d3sc8Ho927tzZ5V+PV8rLy+syXpK2b9/e43gzGYahZ555Rps2bdKuXbs0btw4n7fhdrt19OhRjR49eggqHHyXLl3SyZMne6w3mObvSmvWrFFKSoruu+8+n14XbPM3btw4paWldZkjh8Oh/fv39zhH/fkcm6kzeHz55ZfasWOHkpKSfN7Gtd7ngaSyslIXLlzosdZgm78rvfHGG5o5c6amT5/u82vNmsNrHRdmzpypqKioLvNRXl6uM2fO9Dgf/fnc9rf4kLF+/XrDarUab731lvHZZ58Zf/M3f2MkJCQYNTU1hmEYxl/8xV8Y//RP/+Qd//HHHxuRkZHGf/zHfxiff/658cILLxhRUVHG0aNHzdqFHj399NOG3W43du/ebVRXV3tvzc3N3jHf3L8XX3zR2LZtm3Hy5EmjtLTUeOSRR4yYmBjj008/NWMXrunv//7vjd27dxunTp0yPv74Y2Pu3LlGcnKyUVdXZxhGcM9fJ7fbbWRlZRkrVqy46rlgnL/Gxkbj0KFDxqFDhwxJxk9/+lPj0KFD3m97vPTSS0ZCQoLx3nvvGUeOHDEWLVpkjBs3zrh8+bJ3G3fffbfxi1/8wnv/Wp/jQNk/l8tlPPDAA0ZGRoZx+PDhLp9Lp9PZ4/5d630eKPvX2Nho/MM//IOxd+9e49SpU8aOHTuMGTNmGNdff73R0tLS4/4F0vwZxrXfo4ZhGA0NDUZsbKyxevXqbrcRqHPYl+PCU089ZWRlZRm7du0yDh48aOTl5Rl5eXldtjNp0iTj3Xff9d7vy+d2oEIqfBiGYfziF78wsrKyjOjoaCMnJ8fYt2+f97lvfetbxmOPPdZl/G9+8xvjhhtuMKKjo42pU6cav/vd7/xccd9I6va2Zs0a75hv7t9zzz3n/X+Rmppq3HvvvUZZWZn/i++jJUuWGKNHjzaio6ONMWPGGEuWLDFOnDjhfT6Y56/Ttm3bDElGeXn5Vc8F4/x99NFH3b4vO/fD4/EYP/zhD43U1FTDarUac+bMuWrfx44da7zwwgtdHuvtc+xPve3fqVOnevxcfvTRR95tfHP/rvU+96fe9q+5udmYN2+eMWrUKCMqKsoYO3as8eSTT14VIgJ5/gzj2u9RwzCM119/3Rg+fLhRX1/f7TYCdQ77cly4fPmy8Xd/93fGyJEjjdjYWOO73/2uUV1dfdV2rnxNXz63A2Xp+IMBAAD8ImR6PgAAQHAgfAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL8ifAAAAL/6f0KiMXnG1t1eAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "u_0 = 16 # initial temperature\n", "u_heater_max = 40 # maximal temperature of the heater\n", @@ -87,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "6af0dba0-d481-4566-a8b7-244098eee713", "metadata": {}, "outputs": [], @@ -113,7 +124,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "de756b96-3c0d-42d7-a74d-db387f448426", "metadata": {}, "outputs": [], @@ -138,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "d428cf7f-89ee-4f3f-a1bf-822b82550a7e", "metadata": {}, "outputs": [], @@ -158,16 +169,16 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "e780f5fa-5ebf-4731-8568-77116ea039f6", "metadata": {}, "outputs": [], "source": [ "domain_initial_condition = Omega * I.boundary_left * A * P\n", - "sampler_initial_condition = tp.samplers.RandomUniformSampler(domain_initial_condition, 500)\n", + "sampler_initial_condition = tp.samplers.RandomUniformSampler(domain_initial_condition, 5000)\n", "\n", "domain_boundary_condition = Omega.boundary * I * A * P\n", - "sampler_boundary_condition = tp.samplers.RandomUniformSampler(domain_boundary_condition, 500)" + "sampler_boundary_condition = tp.samplers.RandomUniformSampler(domain_boundary_condition, 5000)" ] }, { @@ -182,7 +193,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "c29f3f92-d613-470f-ab74-9369e071ea04", "metadata": {}, "outputs": [], @@ -202,7 +213,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "65954de9-4c80-4d2a-be6e-0cd16ab82596", "metadata": {}, "outputs": [], @@ -222,7 +233,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "c97e8bfe-1580-4bb8-bb1b-d4c874ef6244", "metadata": {}, "outputs": [], @@ -242,7 +253,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "17d5e293-57bd-4739-9518-a014f6df2b79", "metadata": {}, "outputs": [], @@ -264,7 +275,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "4864c6ed-6f2b-4f80-bd6f-cd8ff3d8a809", "metadata": {}, "outputs": [], @@ -292,7 +303,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "bdef3d80-90e6-47aa-95ce-6d735fd03f36", "metadata": {}, "outputs": [], @@ -316,7 +327,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "id": "008c09a7-81f8-41b5-8c10-3892812740ad", "metadata": {}, "outputs": [], @@ -346,20 +357,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "bb76e892-bf53-4a01-adc5-74dddb770525", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "GPU available: True\n" + ] + } + ], "source": [ "import pytorch_lightning as pl\n", "import os\n", - "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" if torch.cuda.is_available() else \"0\"\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\" if torch.cuda.is_available() else \"0\"\n", "print (\"GPU available: \" + str(torch.cuda.is_available()))" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "60fb3653-7b2c-40cf-a19c-e82bc43ef0d2", "metadata": {}, "outputs": [], @@ -369,29 +388,89 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "857c00e3-07c8-45c5-bc14-cc4397b2d1d9", "metadata": {}, "outputs": [], "source": [ - "optim = tp.OptimizerSetting(optimizer_class=torch.optim.Adam, lr=0.025)\n", + "optim = tp.OptimizerSetting(optimizer_class=torch.optim.Adam, lr=1e-3)\n", "\n", "solver = tp.solver.Solver(train_conditions=training_conditions, optimizer_setting=optim)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "id": "818dd812-62c5-4bac-b8bf-c0d2da14a53c", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GPU available: True (cuda), used: True\n", + "TPU available: False, using: 0 TPU cores\n", + "HPU available: False, using: 0 HPUs\n", + "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [6]\n", + "\n", + " | Name | Type | Params | Mode \n", + "--------------------------------------------------------\n", + "0 | train_conditions | ModuleList | 11.9 K | train\n", + "1 | val_conditions | ModuleList | 0 | train\n", + "--------------------------------------------------------\n", + "11.9 K Trainable params\n", + "0 Non-trainable params\n", + "11.9 K Total params\n", + "0.048 Total estimated model params size (MB)\n", + "20 Modules in train mode\n", + "0 Modules in eval mode\n", + "/home/saktas/anaconda3/envs/deepsem/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:424: The 'train_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=254` in the `DataLoader` to improve performance.\n", + "/home/saktas/anaconda3/envs/deepsem/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:424: The 'val_dataloader' does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` to `num_workers=254` in the `DataLoader` to improve performance.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "14fd660dd2b447a9841d8200cad8c1d7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Training: | | 0/? [00:00" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "fig, animation = tp.utils.animate(model, lambda u : u, plot_sampler, ani_type='contour_surface', ani_speed=1)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "0a74056f-222e-4335-84b8-37ff8626af43", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "MovieWriter ffmpeg unavailable; using Pillow instead.\n" + ] + } + ], "source": [ "animation.save(f'animation_tut_2_a{a}_p{p}.gif')" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b5dc968f", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "deepsem", "language": "python", "name": "python3" }, @@ -483,7 +651,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.18" + "version": "3.8.20" } }, "nbformat": 4, diff --git a/src/torchphysics/models/FNO.py b/src/torchphysics/models/FNO.py index 53b0682e..ab274f29 100644 --- a/src/torchphysics/models/FNO.py +++ b/src/torchphysics/models/FNO.py @@ -123,7 +123,7 @@ class FNO(Model): of a spectral convolution with learnable kernels. See [1] for an overview of the model. Linear transformations and skip connections can be enabled in each layer as well. - hidden_channles : int + hidden_channels : int The number of hidden channels. fourier_modes : int or list, tuple The number of Fourier modes that will be used for the spectral convolution @@ -133,7 +133,7 @@ class FNO(Model): integers, such that in each layer a different amount of modes is used. In case of a N-dimensional space domain a list (or tuple) of N numbers must be passed in (Setting the modes for each direction), or again - a list of list containig each N numbers to vary the modes per layer. + a list of list containing each N numbers to vary the modes per layer. activations : torch.nn or list, tuple The activation function after each Fourier layer. Default is torch.nn.Tanh() @@ -155,7 +155,7 @@ class FNO(Model): Default is a linear mapping. positional_embedding : torchphysics.models.PositionalEmbedding or bool An additional embedding layer, which adds positional information to the input. - Default is True, adding an embedding given the shape of fourie modes. + Default is True, adding an embedding given the shape of fourier modes. xavier_gains : int or list, tuple For the weight initialization a Xavier/Glorot algorithm will be used. The gain can be specified over this value. @@ -198,9 +198,14 @@ def __init__(self, input_space, output_space, fourier_layers : int, if isinstance(fourier_modes, int): fourier_modes = fourier_layers * [fourier_modes] - elif isinstance(fourier_modes, (list, tuple)): - if len(fourier_modes) < fourier_layers: - fourier_modes = fourier_layers * [fourier_modes] + elif all(isinstance(x, int) for x in fourier_modes): + fourier_modes = [list(fourier_modes) for _ in range(fourier_layers)] + elif all(isinstance(x, (list, tuple)) for x in fourier_modes): + if len(fourier_modes) != fourier_layers: + raise ValueError( + "Number of layer mode specifications must match fourier_layers" + ) + fourier_modes = [list(m) for m in fourier_modes] else: raise ValueError(f"Invalid input for fourier modes") @@ -224,7 +229,7 @@ def __init__(self, input_space, output_space, fourier_layers : int, else: if positional_embedding is not None: print("Note: Positional embedding is used, make sure that the network for " \ - f"lifiting (channel up sampling) expects inputs of size {in_channels+positional_embedding.dim}.") + f"lifting (channel up sampling) expects inputs of size {in_channels+positional_embedding.dim}.") self.channel_up_sampling = channel_up_sample_network # combine embedding with up_sampling layer: diff --git a/src/torchphysics/models/PCANN.py b/src/torchphysics/models/PCANN.py index d1f28bbf..fbd6f6ba 100644 --- a/src/torchphysics/models/PCANN.py +++ b/src/torchphysics/models/PCANN.py @@ -35,7 +35,7 @@ class PCANN(Model): Notes ----- - The default implemwentation flatten the input and output data along all + The default implementation flatten the input and output data along all dimensions except the first one. .. [1] Kaushik Bhattacharya et al., "Model Reduction And Neural Networks For @@ -150,7 +150,7 @@ class PCANN_FC(PCANN): have been applied. hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. E.g hidden = (10, 5) -> 2 layers, with 10 and 5 neurons. @@ -158,7 +158,7 @@ class PCANN_FC(PCANN): The activation functions of this network. If a single function is passed as an input, will use this function for each layer. If a list is used, will use the i-th entry for i-th layer. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. The gain can be specified over this value. @@ -203,7 +203,7 @@ def from_fn_set(cls, data. hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. E.g. hidden = (10, 5) -> 2 layers, with 10 and 5 neurons. @@ -211,7 +211,7 @@ def from_fn_set(cls, The activation functions of this network. If a single function is passed as an input, will use this function for each layer. If a list is used, will use the i-th entry for i-th layer. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. The gain can be specified over this value. diff --git a/src/torchphysics/models/__init__.py b/src/torchphysics/models/__init__.py index b3f95b6a..4a43b55e 100644 --- a/src/torchphysics/models/__init__.py +++ b/src/torchphysics/models/__init__.py @@ -13,7 +13,7 @@ from .parameter import Parameter from .model import (Model, NormalizationLayer, AdaptiveWeightLayer, Sequential, Parallel, HardConstraint) -from .fcn import FCN, Harmonic_FCN, Polynomial_FCN +from .fcn import FCN, Harmonic_FCN, Polynomial_FCN, Operator_FCN from .deepritz import DeepRitzNet from .qres import QRES from .activation_fn import AdaptiveActivationFunction, ReLUn, Sinus diff --git a/src/torchphysics/models/activation_fn.py b/src/torchphysics/models/activation_fn.py index 44489797..d0d51cc9 100644 --- a/src/torchphysics/models/activation_fn.py +++ b/src/torchphysics/models/activation_fn.py @@ -12,8 +12,8 @@ class AdaptiveActivationFunction(nn.Module): ---------- activation_fn : torch.nn.module The underlying function that should be used for the activation. - inital_a : float, optional - The inital value for the adaptive parameter a. Changes the 'slop' + initial_a : float, optional + The initial value for the adaptive parameter a. Changes the 'slop' of the underlying function. Default is 1.0 scaling : float, optional An additional scaling factor, such that the 'a' only has to learn only @@ -26,10 +26,10 @@ class AdaptiveActivationFunction(nn.Module): physics-informed neural networks", 2020 """ - def __init__(self, activation_fn, inital_a=1.0, scaling=1.0): + def __init__(self, activation_fn, initial_a=1.0, scaling=1.0): super().__init__() self.activation_fn = activation_fn - self.a = nn.Parameter(torch.tensor(inital_a)) + self.a = nn.Parameter(torch.tensor(initial_a)) self.scaling = scaling def forward(self, x): @@ -62,7 +62,7 @@ class ReLUn(nn.Module): Parameters ---------- n : float - The power to which the inputs should be rasied before appplying the + The power to which the inputs should be raised before applying the rectified linear unit function. """ diff --git a/src/torchphysics/models/deeponet/branchnets.py b/src/torchphysics/models/deeponet/branchnets.py index 2d66bab8..9eec540d 100644 --- a/src/torchphysics/models/deeponet/branchnets.py +++ b/src/torchphysics/models/deeponet/branchnets.py @@ -90,7 +90,7 @@ def fix_input(self, function, device="cpu"): ---------- function : callable, torchphysics.domains.FunctionSet, torch.Tensor, torchphysics.spaces.Points - The function(s) for which the network should be evaluaded. + The function(s) for which the network should be evaluated. device : str, optional The device where the data lays. Default is 'cpu'. @@ -147,12 +147,12 @@ class FCBranchNet(BranchNet): times the function space dimension. hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. activations : torch.nn or list, optional The activation functions of this network. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. Default is 5/3. @@ -217,12 +217,12 @@ class ConvBranchNet(BranchNet): hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. activations : torch.nn or list, optional The activation functions of this network. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. Default is 5/3. diff --git a/src/torchphysics/models/deeponet/deeponet.py b/src/torchphysics/models/deeponet/deeponet.py index 9f32b254..f85d6515 100644 --- a/src/torchphysics/models/deeponet/deeponet.py +++ b/src/torchphysics/models/deeponet/deeponet.py @@ -77,7 +77,7 @@ def forward(self, trunk_inputs=None, branch_inputs=None, device="cpu"): The inputs for the trunk net. If no input is passed in, the default values from the trunk net are used. branch_inputs : callable, torchphysics.domains.FunctionSet, optional - The function(s) for which the branch should be evaluaded. If no + The function(s) for which the branch should be evaluated. If no input is given, the branch net has to be fixed before hand! device : str, optional The device where the data lays. Default is 'cpu'. @@ -121,7 +121,7 @@ def fix_branch_input(self, function, device="cpu"): Parameters ---------- function : callable, torchphysics.domains.FunctionSet - The function(s) for which the branch should be evaluaded. + The function(s) for which the branch should be evaluated. device : str, optional The device where the data lays. Default is 'cpu'. """ diff --git a/src/torchphysics/models/deeponet/trunknets.py b/src/torchphysics/models/deeponet/trunknets.py index 0b5a2e65..ceb2eb73 100644 --- a/src/torchphysics/models/deeponet/trunknets.py +++ b/src/torchphysics/models/deeponet/trunknets.py @@ -17,7 +17,7 @@ class TrunkNet(Model): The default input for the trunk net if no other values are specified. This default is used when the DeepONet only should be evaluated for new branch inputs, but the trunk input stays fixed. Shape should be of the form - (N_batch, ..., dimension of input space), here ... can be abritrary many dimensions. + (N_batch, ..., dimension of input space), here ... can be arbitrary many dimensions. If trunk_input_copied=True, N_batch needs to be equal to 1. trunk_input_copied : bool, optional If every sample function of the branch input gets evaluated at the same trunk input, @@ -94,13 +94,20 @@ class FCTrunkNet(TrunkNet): input_space : Space The space of the points the can be put into this model. hidden : list or tuple + default_trunk_input : tp.spaces.Points, torch.tensor + The default input for the trunk net if no other values are specified. + This default is used when the DeepONet only should be evaluated for new + branch inputs, but the trunk input stays fixed. Shape should be of the form + (N_batch, ..., dimension of input space), here ... can be arbitrary many dimensions. + If trunk_input_copied=True, N_batch needs to be equal to 1. + trunk_input_copied : bool, optional The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. activations : torch.nn or list, optional The activation functions of this network. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. Default is 5/3. diff --git a/src/torchphysics/models/fcn.py b/src/torchphysics/models/fcn.py index 6267ce3b..f5f23a76 100644 --- a/src/torchphysics/models/fcn.py +++ b/src/torchphysics/models/fcn.py @@ -40,7 +40,7 @@ class FCN(Model): The space of the points returned by this model. hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. E.g hidden = (10, 5) -> 2 layers, with 10 and 5 neurons. @@ -48,7 +48,7 @@ class FCN(Model): The activation functions of this network. If a single function is passed as an input, will use this function for each layer. If a list is used, will use the i-th entry for i-th layer. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. The gain can be specified over this value. @@ -104,22 +104,22 @@ class Harmonic_FCN(Model): The space of the points returned by this model. hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. E.g hidden = (10, 5) -> 2 layers, with 10 and 5 neurons. max_frequenz : int - The highest frequenz that should be used in the input computation. - Equal to :math:`n` in the above describtion. + The highest frequency that should be used in the input computation. + Equal to :math:`n` in the above description. min_frequenz : int - The smallest frequenz that should be used. Usefull, if it is expected, that - only higher frequenzies appear in the solution. + The smallest frequency that should be used. Useful, if it is expected, that + only higher frequencies appear in the solution. Default is 0. activations : torch.nn or list, optional The activation functions of this network. If a single function is passed as an input, will use this function for each layer. If a list is used, will use the i-th entry for i-th layer. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. The gain can be specified over this value. @@ -221,3 +221,45 @@ def _build_one_layer(self, input_dim, output_dim, polynomial_degree, xavier_gain in_layer = torch.empty((input_dim, output_dim, polynomial_degree)) self.layers.append(nn.Parameter(in_layer)) torch.nn.init.xavier_normal_(self.layers[-1], gain=xavier_gain) + + +class Operator_FCN(Model): + """A simple fully connected neural network used for operator learning. + + Parameters + ---------- + """ + + def __init__( + self, + input_fn_space, + output_fn_space, + in_discretization, + out_discretization, + hidden=(20, 20, 20), + activations=nn.Tanh(), + xavier_gains=5/3, + activation_fn_output=None, + ): + super().__init__(input_fn_space.output_space, output_fn_space.output_space) + + layers = _construct_FC_layers( + hidden=hidden, + input_dim=in_discretization*input_fn_space.output_space.dim, + output_dim=out_discretization*output_fn_space.output_space.dim, + activations=activations, + xavier_gains=xavier_gains, + ) + + self.out_dis = out_discretization + + if not activation_fn_output is None: + layers.append(activation_fn_output) + + self.sequential = nn.Sequential(*layers) + + def forward(self, points): + points = self._fix_points_order(points).as_tensor + model_out = self.sequential(points.flatten(start_dim=-2)) + return Points(torch.unflatten(model_out, dim=-1, sizes=(self.out_dis, self.output_space.dim)), + self.output_space) \ No newline at end of file diff --git a/src/torchphysics/models/model.py b/src/torchphysics/models/model.py index f55b6945..598b96a2 100644 --- a/src/torchphysics/models/model.py +++ b/src/torchphysics/models/model.py @@ -130,7 +130,7 @@ class Sequential(Model): *models : The models that should be evaluated sequentially. The evaluation happens in the order that the models are passed in. - To work correcty the output of the i-th model has to fit the input + To work correctly the output of the i-th model has to fit the input of the i+1-th model. """ diff --git a/src/torchphysics/models/parameter.py b/src/torchphysics/models/parameter.py index 5d385889..f70cf3ec 100644 --- a/src/torchphysics/models/parameter.py +++ b/src/torchphysics/models/parameter.py @@ -9,7 +9,7 @@ class Parameter(Points): Parameters ---------- init : number, list, array or tensor - The inital guess for the parameter. + The initial guess for the parameter. space : torchphysics.problem.spaces.Space The Space to which this parameter belongs. Essentially defines the shape of the parameter, e.g for a single number use R1. @@ -20,7 +20,7 @@ class Parameter(Points): condition. If many different parameters are used they have to be connected over .join(), see the Points-Class for the exact usage. - If the domains itself should depend on some parameters or the solution sholud be + If the domains itself should depend on some parameters or the solution should be learned for different parameter values, this class should NOT be used. These parameters are mostly meant for inverse problems. Instead, the parameters have to be defined with their own domain and samplers. diff --git a/src/torchphysics/models/qres.py b/src/torchphysics/models/qres.py index 27b22b0d..9b5dfdfc 100644 --- a/src/torchphysics/models/qres.py +++ b/src/torchphysics/models/qres.py @@ -7,7 +7,7 @@ class Quadratic(nn.Module): """Implements a quadratic layer of the form: W_1*x (*) W_2*x + W_1*x + b. - Here (*) means the hadamard product of two vectors (elementwise multiplication). + Here (*) means the hadamard product of two vectors (element-wise multiplication). W_1, W_2 are weight matrices and b is a bias vector. Parameters @@ -55,7 +55,7 @@ class QRES(Model): """Implements the quadratic residual networks from [#]_. Instead of a linear layer, a quadratic layer W_1*x (*) W_2*x + W_1*x + b will be used. Here (*) means the hadamard product of two vectors - (elementwise multiplication). + (element-wise multiplication). Parameters ---------- @@ -65,7 +65,7 @@ class QRES(Model): The space of the points returned by this model. hidden : list or tuple The number and size of the hidden layers of the neural network. - The lenght of the list/tuple will be equal to the number + The length of the list/tuple will be equal to the number of hidden layers, while the i-th entry will determine the number of neurons of each layer. E.g hidden = (10, 5) -> 2 layers, with 10 and 5 neurons. @@ -73,7 +73,7 @@ class QRES(Model): The activation functions of this network. If a single function is passed as an input, will use this function for each layer. If a list is used, will use the i-th entry for i-th layer. - Deafult is nn.Tanh(). + Default is nn.Tanh(). xavier_gains : float or list, optional For the weight initialization a Xavier/Glorot algorithm will be used. The gain can be specified over this value. diff --git a/src/torchphysics/problem/conditions/condition.py b/src/torchphysics/problem/conditions/condition.py index c284baab..b181671b 100644 --- a/src/torchphysics/problem/conditions/condition.py +++ b/src/torchphysics/problem/conditions/condition.py @@ -123,6 +123,7 @@ def __init__( use_full_dataset=False, name="datacondition", constrain_fn=None, + root=1.0, weight=1.0, epsilon=1e-8, ): @@ -134,6 +135,7 @@ def __init__( self.use_full_dataset = use_full_dataset self.constrain_fn = constrain_fn self.epsilon = epsilon + self.root = root if self.constrain_fn: self.constrain_fn = UserFunction(self.constrain_fn) @@ -152,14 +154,14 @@ def _compute_dist(self, batch, device): y_norm = torch.max(torch.abs(y.as_tensor), dim=list(range(1, len(model_out.shape)))) + self.epsilon out = out_norm / y_norm else: - out_norm = torch.norm(model_out - y.as_tensor, p=self.norm, dim=list(range(1, len(model_out.shape)))) - y_norm = torch.norm(y.as_tensor, p=self.norm, dim=list(range(1, len(model_out.shape)))) + self.epsilon + out_norm = torch.sum(torch.abs(model_out - y.as_tensor)**self.norm, dim=list(range(1, len(model_out.shape)))) + y_norm = torch.sum(torch.abs(y.as_tensor)**self.norm, dim=list(range(1, len(model_out.shape)))) + self.epsilon out = out_norm / y_norm else: if self.norm == "inf": out = torch.abs(model_out - y.as_tensor) else: - out = torch.norm(model_out - y.as_tensor, p=self.norm, dim=list(range(1, len(model_out.shape)))) + out = torch.sum(torch.abs(model_out - y.as_tensor)**self.norm, dim=list(range(1, len(model_out.shape)))) return out def forward(self, device="cpu", iteration=None): @@ -170,7 +172,10 @@ def forward(self, device="cpu", iteration=None): if self.norm == "inf": loss = torch.maximum(loss, torch.max(a)) else: - loss = loss + torch.mean(a) / len(self.dataloader) + a_mean = torch.mean(a) + if self.root != 1.0: + a_mean = a_mean**(1/self.root) + loss = loss + a_mean / len(self.dataloader) else: try: batch = next(self.iterator) @@ -182,6 +187,8 @@ def forward(self, device="cpu", iteration=None): loss = torch.max(a) else: loss = torch.mean(a) + if self.root != 1.0: + loss = loss**(1.0/self.root) return loss diff --git a/src/torchphysics/problem/conditions/operator_condition.py b/src/torchphysics/problem/conditions/operator_condition.py index 23dd3581..16a4891f 100644 --- a/src/torchphysics/problem/conditions/operator_condition.py +++ b/src/torchphysics/problem/conditions/operator_condition.py @@ -24,7 +24,7 @@ class OperatorCondition(Condition): Whether to compute the relative error (i.e. error / target) or absolute error. Default is True, hence, the relative error is used. error_fn : callable, optional - the function used to compute the final loss. E.g., the squarred error or + the function used to compute the final loss. E.g., the squared error or any other norm. reduce_fn : callable, optional Function that will be applied to reduce the loss to a scalar. Defaults to diff --git a/src/torchphysics/problem/domains/domain.py b/src/torchphysics/problem/domains/domain.py index ae89fba1..795b96e3 100644 --- a/src/torchphysics/problem/domains/domain.py +++ b/src/torchphysics/problem/domains/domain.py @@ -37,7 +37,7 @@ def set_necessary_variables(self, *domain_params): def transform_to_user_functions(self, *domain_params): """Transforms all parameters that define a given domain to - a UserFunction. This enables that the domain can dependt on other variables. + a UserFunction. This enables that the domain can dependent on other variables. Parameters ---------- @@ -76,7 +76,7 @@ def set_volume(self, volume): ----- For all basic domains the volume (and surface) are implemented. But if the given domain has a complex shape or is - dependent on other variables, the volume can only be approixmated. + dependent on other variables, the volume can only be approximated. Therefore one can set here a exact expression for the volume, if known. """ self._user_volume = DomainUserFunction(volume) @@ -91,7 +91,7 @@ def volume(self, params=Points.empty(), device="cpu"): Parameters ---------- params : torchphysics.problem.Points, optional - Additional paramters that are needed to evaluate the domain. + Additional parameters that are needed to evaluate the domain. Returns ------- @@ -211,7 +211,7 @@ def bounding_box(self, params=Points.empty(), device="cpu"): @abc.abstractmethod def sample_grid(self, n=None, d=None, params=Points.empty(), device="cpu"): - """Creates an equdistant grid in the domain. + """Creates an equidistant grid in the domain. Parameters ---------- @@ -221,7 +221,7 @@ def sample_grid(self, n=None, d=None, params=Points.empty(), device="cpu"): The density of points that should be created, if n is not defined. params : torchphysics.problem.Points, optional - Additional paramters that are maybe needed to evaluate the domain. + Additional parameters that are maybe needed to evaluate the domain. device : str The device on which the points should be created. Default is 'cpu'. @@ -247,7 +247,7 @@ def sample_random_uniform( The density of points that should be created, if n is not defined. params : torchphysics.problem.Points, optional - Additional paramters that are maybe needed to evaluate the domain. + Additional parameters that are maybe needed to evaluate the domain. device : str The device on which the points should be created. Default is 'cpu'. diff --git a/src/torchphysics/problem/domains/domain0D/point.py b/src/torchphysics/problem/domains/domain0D/point.py index 85d1a67c..6db9c59f 100644 --- a/src/torchphysics/problem/domains/domain0D/point.py +++ b/src/torchphysics/problem/domains/domain0D/point.py @@ -61,7 +61,7 @@ def _bounds_for_higher_dimensions(self, device="cpu"): bounds = [] for i in range(self.space.dim): p = self.point.fun[i] - # substract/add a value to get a real bounding box, + # subtract/add a value to get a real bounding box, # important if we later use these values to normalize the input bounds.append(p - self.bounding_box_tol) bounds.append(p + self.bounding_box_tol) diff --git a/src/torchphysics/problem/domains/domain1D/interval.py b/src/torchphysics/problem/domains/domain1D/interval.py index 2afe0fac..a7b3ae20 100644 --- a/src/torchphysics/problem/domains/domain1D/interval.py +++ b/src/torchphysics/problem/domains/domain1D/interval.py @@ -80,14 +80,14 @@ def boundary(self): @property def boundary_left(self): - """Returns only the left boundary value, useful for the definintion - of inital conditions. + """Returns only the left boundary value, useful for the definition + of initial conditions. """ return IntervalSingleBoundaryPoint(self, side=self.lower_bound) @property def boundary_right(self): - """Returns only the left boundary value, useful for the definintion + """Returns only the left boundary value, useful for the definition of end conditions. """ return IntervalSingleBoundaryPoint(self, side=self.upper_bound, normal_vec=1) diff --git a/src/torchphysics/problem/domains/domain2D/shapely_polygon.py b/src/torchphysics/problem/domains/domain2D/shapely_polygon.py index 19ffab0c..afe3ccae 100644 --- a/src/torchphysics/problem/domains/domain2D/shapely_polygon.py +++ b/src/torchphysics/problem/domains/domain2D/shapely_polygon.py @@ -15,7 +15,7 @@ class ShapelyPolygon(Domain): space : Space The space in which this object lays. vertices : list of lists, optional - The corners/vertices of the polygon. Can be eihter in clockwise or counter- + The corners/vertices of the polygon. Can be either in clockwise or counter- clockwise order. shapely_polygon : shapely.geometry.Polygon, optional Instead of defining the corner points, it is also possible to give a already @@ -90,7 +90,7 @@ def _sample_in_triangulation(self, t, n, device): if n > 0: new_points = self._random_points_in_triangle(n, corners, device) # when the polygon has holes or is non convex, it can happen - # that the triangle is not completly in the polygon + # that the triangle is not completely in the polygon if not t.within(self.polygon): inside = self._contains(new_points) index = torch.where(inside)[0] @@ -247,7 +247,7 @@ def _distribute_line_to_boundary( side_length = torch.linalg.norm(corners[1] - corners[0]) while index < len(line_points): if line_points[index] <= current_length + side_length: - point = self._translate_point_to_bondary( + point = self._translate_point_to_boundary( index, line_points, corners, @@ -267,7 +267,7 @@ def _distribute_line_to_boundary( ) return points, index, current_length - def _translate_point_to_bondary( + def _translate_point_to_boundary( self, index, line_points, corners, current_length, corner_index, side_length ): coord = line_points[index] - current_length diff --git a/src/torchphysics/problem/domains/domain2D/triangle.py b/src/torchphysics/problem/domains/domain2D/triangle.py index 0db2b97d..b531bfde 100644 --- a/src/torchphysics/problem/domains/domain2D/triangle.py +++ b/src/torchphysics/problem/domains/domain2D/triangle.py @@ -140,7 +140,7 @@ def sample_grid(self, n=None, d=None, params=Points.empty(), device="cpu"): return Points(points, self.space) def _compute_barycentric_grid(self, n, dir_1, dir_2, device): - # since we have a triangle, we rmove later the points with bary.- + # since we have a triangle, we remove later the points with bary.- # coordinates bigger one. Therefore double the number of points: scaled_n = 2 * n side_length_1 = torch.linalg.norm(dir_1, dim=1) diff --git a/src/torchphysics/problem/domains/domainoperations/intersection.py b/src/torchphysics/problem/domains/domainoperations/intersection.py index 65eafb41..24558d65 100644 --- a/src/torchphysics/problem/domains/domainoperations/intersection.py +++ b/src/torchphysics/problem/domains/domainoperations/intersection.py @@ -128,7 +128,7 @@ def _contains(self, points, params=Points.empty()): def _get_volume(self, params=Points.empty(), device="cpu"): warnings.warn( """Exact volume of this intersection-boundary is not known, - will use the estimate: volume = boundary_a + bounadry_b. + will use the estimate: volume = boundary_a + boundary_b. If you need the exact volume for sampling, use domain.set_volume()""" ) diff --git a/src/torchphysics/problem/domains/domainoperations/product.py b/src/torchphysics/problem/domains/domainoperations/product.py index 0b294021..494bb2cd 100644 --- a/src/torchphysics/problem/domains/domainoperations/product.py +++ b/src/torchphysics/problem/domains/domainoperations/product.py @@ -31,7 +31,7 @@ def __init__(self, domain_a, domain_b): if not self.domain_a.space.keys().isdisjoint(self.domain_b.space): warnings.warn( """Warning: The space of a ProductDomain will be the product - of its factor domains spaces. This may lead to unexpected behaviour.""" + of its factor domains spaces. This may lead to unexpected behavior.""" ) # check dependencies, so that at most domain_a needs variables of domain_b self._check_variable_dependencies() @@ -100,7 +100,7 @@ def _create_point_data(self, space, data): @property def boundary(self): # Domain object of the boundary - # TODO: implement a seperate class for this for normals etc. + # TODO: implement a separate class for this for normals etc. boundary_1 = ProductDomain(self.domain_a.boundary, self.domain_b) boundary_2 = ProductDomain(self.domain_a, self.domain_b.boundary) return UnionDomain(boundary_1, boundary_2) @@ -116,7 +116,7 @@ def set_bounding_box(self, bounds): Parameters ---------- bounds : list - The bounding box of the domain. Whereby the lenght of the list + The bounding box of the domain. Whereby the length of the list has to be two times the domain dimension. And the bounds need to be in the following order: [min_axis_1, max_axis_1, min_axis_2, max_axis_2, ...] """ @@ -124,7 +124,7 @@ def set_bounding_box(self, bounds): self.bounds = bounds def bounding_box(self, params=Points.empty(), device="cpu"): - if self.bounds: + if self.bounds is not None: return self.bounds elif self._is_constant or self.domain_b.space in params.space: # if the domain is constant or additional data for domain a is given @@ -134,11 +134,11 @@ def bounding_box(self, params=Points.empty(), device="cpu"): bounds_a = torch.cat((bounds_a, bounds_b)) else: # we have to sample some points in b, and approx the bounds. warnings.warn( - f"""The bounding box of the ProductDomain dependens of the - values of domain_b. Therefor will sample - {N_APPROX_VOLUME} in domain_b, to compute a - approixmation. If the bounds a known exactly, set - them with .set_bounds().""" + f"""The bounding box of the ProductDomain depends of the + values of domain_b. Therefore will sample + {N_APPROX_VOLUME} in domain_b, to compute an + approximation. If the bounds are known exactly, set + them with .set_bounding_box().""" ) bounds_b = self.domain_b.bounding_box(params, device=device) b_points = self.domain_b.sample_random_uniform( @@ -209,7 +209,7 @@ def avg_volume(local_params): def sample_grid(self, n=None, d=None, params=Points.empty(), device="cpu"): raise NotImplementedError( - """Grid sampling on a product domain is not implmented. Use a product sampler + """Grid sampling on a product domain is not implemented. Use a product sampler instead.""" ) diff --git a/src/torchphysics/problem/domains/domainoperations/union.py b/src/torchphysics/problem/domains/domainoperations/union.py index f5c0c448..89750811 100644 --- a/src/torchphysics/problem/domains/domainoperations/union.py +++ b/src/torchphysics/problem/domains/domainoperations/union.py @@ -66,7 +66,7 @@ def sample_random_uniform( ): if n: return self._sample_random_with_n(n, params, device) - # esle d not None + # else d not None return self._sample_random_with_d(d, params, device) def _sample_random_with_n(self, n, params=Points.empty(), device="cpu"): @@ -195,9 +195,9 @@ def _get_volume(self, params=Points.empty(), device="cpu"): if not self.domain.disjoint: warnings.warn( """Exact volume of this domain is not known, will use the - estimate: volume = domain_a.volume + domain_b.volume. - If you need the exact volume for sampling, - use domain.set_volume()""" + estimate: volume = domain_a.volume + domain_b.volume. + If you need the exact volume for sampling, + use domain.set_volume()""" ) volume_a = self.domain.domain_a.boundary.volume(params, device=device) volume_b = self.domain.domain_b.boundary.volume(params, device=device) diff --git a/src/torchphysics/problem/domains/functionsets/data_functionset.py b/src/torchphysics/problem/domains/functionsets/data_functionset.py index 0b99bdc0..48b16856 100644 --- a/src/torchphysics/problem/domains/functionsets/data_functionset.py +++ b/src/torchphysics/problem/domains/functionsets/data_functionset.py @@ -6,7 +6,7 @@ class DataFunctionSet(DiscreteFunctionSet): """FunctionSet that is created from a given data set. - This function set is always a discret set, since the data can not + This function set is always a discrete set, since the data can not be evaluated at arbitrary points. Parameters diff --git a/src/torchphysics/problem/domains/functionsets/functionset.py b/src/torchphysics/problem/domains/functionsets/functionset.py index e622f25b..3fbfd061 100644 --- a/src/torchphysics/problem/domains/functionsets/functionset.py +++ b/src/torchphysics/problem/domains/functionsets/functionset.py @@ -5,7 +5,7 @@ integer_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int, torch.int64, torch.long] class FunctionSet(): - """ A function set describes a specfic type of functions that can be used + """ A function set describes a specific type of functions that can be used for creating data for training different operator approaches. Parameters @@ -172,19 +172,19 @@ def __add__(self, other): return FunctionSetAdd(self.function_space, [self, other]) def __sub__(self, other): - """ Performs the "pointwise" substraction of two function sets. + """ Performs the "pointwise" subtraction of two function sets. Parameters ---------- - The other function set that should be substracted from this one. + The other function set that should be subtracted from this one. Returns ------- - tp.domains.FunctionSetSubstract + tp.domains.FunctionSetSubtract The function sets that computes the difference of the inputs. """ - from .functionset_operations import FunctionSetSubstract - return FunctionSetSubstract(self.function_space, [self, other]) + from .functionset_operations import FunctionSetSubtract + return FunctionSetSubtract(self.function_space, [self, other]) def _transform_locations(self, locations): # TODO: Improve this for general location shapes @@ -232,10 +232,10 @@ def compute_pca(self, Parameters ---------- components : int - The number of components that should be keeped in the PCA. + The number of components that should used in the PCA. normalize_data : bool, optional If the data of the function set should be normalized before the - PCA is computed (recommented). Default is true. + PCA is computed (recommended). Default is true. Note, the normalization is only applied during this method and not saved afterwards, therefore the underlying data in this function set is **not** modified! @@ -254,7 +254,7 @@ def compute_pca(self, @property def principal_components(self): """ Returns the principal components of this function set. - It is requiered to first call 'compute_pca' to compute them and set + It is required to first call 'compute_pca' to compute them and set a number n of the used components. Returns @@ -379,6 +379,6 @@ def __mul__(self, other): return FunctionSetProduct(self.function_space*other.function_space, [self, other]) else: Warning(f"""DiscretizedFunctionSet is multiplied with a continuous FunctionSet. - The continuous FunctionSet will be discrtized to create the product.""") + The continuous FunctionSet will be discretized to create the product.""") other_discrete = other.discretize(self.locations) return FunctionSetProduct(self.function_space*other.function_space, [self, other_discrete]) diff --git a/src/torchphysics/problem/domains/functionsets/functionset_operations.py b/src/torchphysics/problem/domains/functionsets/functionset_operations.py index 7a3db77e..c2e0ba5c 100644 --- a/src/torchphysics/problem/domains/functionsets/functionset_operations.py +++ b/src/torchphysics/problem/domains/functionsets/functionset_operations.py @@ -249,9 +249,9 @@ def __add__(self, other): self.function_sets + [other]) -class FunctionSetSubstract(FunctionSetArithmetics): +class FunctionSetSubtract(FunctionSetArithmetics): """ - A class handling the pointwise substraction of two sets. + A class handling the pointwise subtraction of two sets. """ def arithmetic_function(self, data): # data will be always only two different function sets @@ -259,7 +259,7 @@ def arithmetic_function(self, data): return Points(output, self.function_space.output_space) def discretize(self, locations): - return FunctionSetSubstract(self.function_space, + return FunctionSetSubtract(self.function_space, [f.discretize(locations) for f in self.function_sets]) diff --git a/src/torchphysics/problem/domains/functionsets/grf_functionset.py b/src/torchphysics/problem/domains/functionsets/grf_functionset.py index ec8d5663..63512b7e 100644 --- a/src/torchphysics/problem/domains/functionsets/grf_functionset.py +++ b/src/torchphysics/problem/domains/functionsets/grf_functionset.py @@ -11,8 +11,8 @@ class GRFFunctionSet(DiscreteFunctionSet): Parameters ---------- resolution : int, tuple, list - The resolution of the gausian random field. For higher dimensional - fields a tuple or list must be passed in setting the resoultion + The resolution of the Gaussian random field. For higher dimensional + fields a tuple or list must be passed in setting the resolution for each dimension. Each resolution needs to be even. auto_cov_fn : callable, optional The function describing the correlation between the points in the diff --git a/src/torchphysics/problem/domains/functionsets/harmonic_functionset.py b/src/torchphysics/problem/domains/functionsets/harmonic_functionset.py index 96900a67..7aa92026 100644 --- a/src/torchphysics/problem/domains/functionsets/harmonic_functionset.py +++ b/src/torchphysics/problem/domains/functionsets/harmonic_functionset.py @@ -75,7 +75,7 @@ class HarmonicFunctionSet2D(HarmonicFunctionSet1D): The number of functions in the set. This sets how many a_i, b_i are created at once. period : list or tuple - The length of the underyling domain in each space direction. + The length of the underlying domain in each space direction. max_frequence : list or tuple The maximum frequence of the functions in each space direction. random_sample_fn : callable, optional diff --git a/src/torchphysics/problem/samplers/data_samplers.py b/src/torchphysics/problem/samplers/data_samplers.py index 0ad18fb5..594b66b8 100644 --- a/src/torchphysics/problem/samplers/data_samplers.py +++ b/src/torchphysics/problem/samplers/data_samplers.py @@ -17,33 +17,60 @@ class DataSampler(PointSampler): The data points that this data sampler should pass to a condition. Either already a torchphysics.spaces.points object or in form of dictionary like: {'x': tensor_for_x, 't': tensor_for_t, .....}. - For the dicitionary all tensor need to have the same batch dimension. + For the dictionary all tensor need to have the same batch dimension. + n_points : int, optional + The number of points that should be sampled from the data. + If n_points < len(points) we do batch sampling, where the first + **sample_points** call will return the first n_points, the second call + the next n_points, and so on. At the end the we wrap around and + starts at the beginning of the points again. If n_points is not a + perfect divisor of the data set length, the last batch of points + will be of shorter length """ - def __init__(self, points): + def __init__(self, points, n_points=-1): if isinstance(points, Points): self.points = points elif isinstance(points, dict): self.points = Points.from_coordinates(points) else: raise TypeError("points should be one of Points or dict.") - n = len(self.points.as_tensor) - super().__init__(n_points=n) + + self.total_len = len(self.points.as_tensor) + if n_points < 1: + n_points = self.total_len + elif n_points > self.total_len: + print(f"""Sampling number was set to {n_points} while only {self.total_len} data points are available. + Will sample {self.total_len} points whenever called!""") + n_points = self.total_len + super().__init__(n_points=n_points) + self.current_idx = 0 def __len__(self): - return self.points._t.shape[-2] + return self.n_points def sample_points(self, params=Points.empty(), device="cpu"): self.points = self.points.to(device) - # If sampler not coupled to other samplers or parameters - # we can return: + # Take a batch of points + return_points = self.points[ + self.current_idx * self.n_points : min(self.total_len, (self.current_idx+1) * self.n_points) + ] + if (self.current_idx+1) * self.n_points >= self.total_len: + self.current_idx = 0 + else: + self.current_idx += 1 + + # If sampler not coupled to other samplers or parameters, we can return: if params.isempty: - return self.points + return return_points + repeated_params = self._repeat_params(params, len(self)) + repeated_points = return_points.repeat(len(params)) + return repeated_points.join(repeated_params) # Maybe given data has more dimensions than batch and space # (For example evaluation on quadrature points) - # TODO: Make more general. What happends when parameters have higher dimension? + # TODO: Make more general. What happens when parameters have higher dimension? # What when multiple dimension in both that do not fit? # start_time = time.time() # if len(self.points.as_tensor.shape) > 2: @@ -56,7 +83,7 @@ def sample_points(self, params=Points.empty(), device="cpu"): # repeated_params = Points(repeated_tensor, params.space) # print("Dimension thing took", time.time() - start_time) - # # else we have to repeat data (meshgrid of both) and join the tensors together: + # # else we have to repeat data (mesh grid of both) and join the tensors together: # start_time = time.time() # repeated_params = self._repeat_params(repeated_params, len(self)) # print("Repeating params took", time.time() - start_time) diff --git a/src/torchphysics/problem/samplers/function_sampler.py b/src/torchphysics/problem/samplers/function_sampler.py index 8c1117e1..2f72fd02 100644 --- a/src/torchphysics/problem/samplers/function_sampler.py +++ b/src/torchphysics/problem/samplers/function_sampler.py @@ -13,7 +13,7 @@ class FunctionSampler: The number of functions that should be sampled when calling sample_functions. function_set : tp.domains.FunctionSet The function set from which functions should be sampled. Note that the size of the - functions set needs to be larger or eqaul to n_functions. + functions set needs to be larger or equal to n_functions. function_creation_interval : int, optional functions set needs to be larger or equal to n_functions. function_creation_interval : int @@ -76,21 +76,21 @@ class FunctionSamplerOrdered(FunctionSampler): def __init__(self, n_functions, function_set : FunctionSet, function_creation_interval : int = 0): super().__init__(n_functions, function_set, function_creation_interval) self.current_indices = torch.arange(self.n_functions, dtype=torch.int64) - self.new_indieces = torch.zeros_like(self.current_indices, dtype=torch.int64) + self.new_indices = torch.zeros_like(self.current_indices, dtype=torch.int64) def sample_functions(self, device="cpu"): self._check_recreate_functions(device=device) - self.current_indices = self.new_indieces.clone() + self.current_indices = self.new_indices.clone() current_out = self.function_set.get_function(self.current_indices) - self.new_indieces = (self.current_indices + self.n_functions) % self.function_set.function_set_size + self.new_indices = (self.current_indices + self.n_functions) % self.function_set.function_set_size return current_out class FunctionSamplerCoupled(FunctionSampler): """ A sampler that is coupled to another sampler, such that the same indices of functions are sampled from both samplers. - Can be usefull is two different data function sets are used where the data of + Can be useful is two different data function sets are used where the data of both sets is coupled and should therefore be samples accordingly. """ def __init__(self, function_set : FunctionSet, coupled_sampler : FunctionSampler): diff --git a/src/torchphysics/problem/samplers/grid_samplers.py b/src/torchphysics/problem/samplers/grid_samplers.py index b7e472db..43e4ec53 100644 --- a/src/torchphysics/problem/samplers/grid_samplers.py +++ b/src/torchphysics/problem/samplers/grid_samplers.py @@ -20,7 +20,7 @@ class GridSampler(PointSampler): n_points : int, optional The number of points that should be sampled. density : float, optional - The desiered density of the created points. + The desired density of the created points. filter_fn : callable, optional A function that restricts the possible positions of sample points. A point that is allowed should return True, therefore a point that should be @@ -106,7 +106,7 @@ def _append_random_points(self, new_points, current_params, device): class ExponentialIntervalSampler(PointSampler): - """Will sample non equdistant grid points in the given interval. + """Will sample non equidistant grid points in the given interval. This works only on intervals! Parameters @@ -116,7 +116,7 @@ class ExponentialIntervalSampler(PointSampler): n_points : int The number of points that should be sampled. exponent : Number - Determines how non equdistant the points are and at which corner they + Determines how non equidistant the points are and at which corner they are accumulated. They are computed with a grid in [0, 1] and then transformed with the exponent and later scaled/translated: exponent < 1: More points at the upper bound. diff --git a/src/torchphysics/problem/samplers/plot_samplers.py b/src/torchphysics/problem/samplers/plot_samplers.py index 9618ed59..a2e83cc6 100644 --- a/src/torchphysics/problem/samplers/plot_samplers.py +++ b/src/torchphysics/problem/samplers/plot_samplers.py @@ -23,7 +23,7 @@ class PlotSampler(PointSampler): n_points : int, optional The number of points that should be used for the plot. density : float, optional - The desiered density of the created points. + The desired density of the created points. device : str or torch device, optional The device of the model/function. data_for_other_variables : dict or torchphysics.spaces.Points, optional @@ -156,7 +156,7 @@ class AnimationSampler(PlotSampler): n_points : int, optional The number of points that should be used for the plot domain. density : float, optional - The desiered density of the created points, in the plot domain. + The desired density of the created points, in the plot domain. device : str or torch device, optional The device of the model/function. data_for_other_variables : dict, optional @@ -185,7 +185,7 @@ def __init__( self._check_correct_types(animation_domain) self.frame_number = frame_number self.animation_domain = animation_domain(**data_for_other_variables) - self.animatoin_sampler = self._construct_sampler_for_Interval( + self.animation_sampler = self._construct_sampler_for_Interval( self.animation_domain, n=frame_number ) @@ -207,13 +207,13 @@ def plot_domain_constant(self): @property def animation_key(self): - """Retunrs the name of the animation variable""" + """Returns the name of the animation variable""" ani_key = list(self.animation_domain.space.keys())[0] return ani_key def sample_animation_points(self): """Samples points out of the animation domain, e.g. time interval.""" - ani_points = self.animatoin_sampler.sample_points() + ani_points = self.animation_sampler.sample_points() num_of_points = len(ani_points) self.frame_number = num_of_points self._set_device_and_grad_true(ani_points) diff --git a/src/torchphysics/problem/samplers/random_samplers.py b/src/torchphysics/problem/samplers/random_samplers.py index ef63f7e7..193f9805 100644 --- a/src/torchphysics/problem/samplers/random_samplers.py +++ b/src/torchphysics/problem/samplers/random_samplers.py @@ -19,7 +19,7 @@ class RandomUniformSampler(PointSampler): n_points : int, optional The number of points that should be sampled. density : float, optional - The desiered density of the created points. + The desired density of the created points. filter : callable, optional A function that restricts the possible positions of sample points. A point that is allowed should return True, therefore a point that should be @@ -257,7 +257,7 @@ class AdaptiveThresholdRejectionSampler(AdaptiveSampler): The number of points that should be sampled. density : float, optional The desired initial (and average) density of the created points, actual - density will change loccally during iterations. + density will change locally during iterations. filter : callable, optional A function that restricts the possible positions of sample points. A point that is allowed should return True, therefore a point that should be @@ -307,7 +307,7 @@ class AdaptiveRandomRejectionSampler(AdaptiveSampler): The number of points that should be sampled. density : float, optional The desired initial (and average) density of the created points, actual - density will change loccally during iterations. + density will change locally during iterations. filter : callable, optional A function that restricts the possible positions of sample points. A point that is allowed should return True, therefore a point that should be diff --git a/src/torchphysics/problem/samplers/sampler_base.py b/src/torchphysics/problem/samplers/sampler_base.py index 0d50ecb2..8bf2d165 100644 --- a/src/torchphysics/problem/samplers/sampler_base.py +++ b/src/torchphysics/problem/samplers/sampler_base.py @@ -66,7 +66,7 @@ def set_length(self, length): def __iter__(self): """Creates a iterator of this Pointsampler, with *next* the ``sample_points`` - methode can be called. + method can be called. """ return self @@ -80,8 +80,8 @@ def __len__(self): Note ---- This can be only called if the number of points is set with ``n_points``. - Elsewise the the number can only be known after the first call to - ``sample_points`` methode or may even change after each call. + Else the number can only be known after the first call to + ``sample_points`` method or may even change after each call. If you know the number of points yourself, you can set this with ``.set_length``. """ @@ -101,7 +101,7 @@ def make_static(self, resample_interval=math.inf): points the first time .sample_points() is called. Afterwards the points are saved and will always be returned if .sample_points() is called again. Useful if the same points should be used while training/validation - or if it is not practicall to create new points in each iteration + or if it is not practical to create new points in each iteration (e.g. grid points). Parameters @@ -115,7 +115,7 @@ def make_static(self, resample_interval=math.inf): @property def is_static(self): - """Checks if the Sampler is a ``StaticSampler``, e.g. retuns always the + """Checks if the Sampler is a ``StaticSampler``, e.g. returns always the same points. """ return isinstance(self, StaticSampler) @@ -182,7 +182,7 @@ def append(self, other): return AppendSampler(self, other) def _sample_params_independent(self, sample_function, params, device): - """If the domain is independent of the used params it is more efficent + """If the domain is independent of the used params it is more efficient to sample points once and then copy them accordingly. """ points = sample_function(n=self.n_points, d=self.density, device=device) @@ -196,7 +196,7 @@ def _sample_params_independent(self, sample_function, params, device): def _sample_params_dependent(self, sample_function, params, device): """If the domain is dependent on some params, we can't always sample points for all params at once. Therefore we need a loop to iterate over the params. - This happens for example with denstiy sampling or grid sampling. + This happens for example with density sampling or grid sampling. """ num_of_params = max(1, len(params)) sample_points = None @@ -250,7 +250,7 @@ def _cut_tensor_to_length_n(self, points): class ProductSampler(PointSampler): """A sampler that constructs the product of two samplers. - Will create a meshgrid (Cartesian product) of the data points of both samplers. + Will create a mesh grid (Cartesian product) of the data points of both samplers. Parameters ---------- @@ -304,7 +304,7 @@ def sample_points(self, params=Points.empty(), device="cpu"): class AppendSampler(PointSampler): """A sampler that appends the output of two samplers behind each other. - Essentially calling torch.coloumn_stack for the data points. + Essentially calling torch.column_stack for the data points. Parameters ---------- @@ -399,7 +399,7 @@ class AdaptiveSampler(PointSampler): """ def sample_points(self, unreduced_loss, params=Points.empty(), device="cpu"): - """Extends the sample methode of the parent class. Also requieres the + """Extends the sample method of the parent class. Also requires the unreduced loss of the previous iteration to create the new points. Parameters diff --git a/src/torchphysics/solver.py b/src/torchphysics/solver.py index 2fdd6798..3a4fa6ca 100644 --- a/src/torchphysics/solver.py +++ b/src/torchphysics/solver.py @@ -40,7 +40,7 @@ class Solver(pl.LightningModule): losses will be computed and minimized. val_conditions : tuple or list Conditions to be tracked during the validation part of the training, can - be used e.g. to track errors comparede to measured data. + be used e.g. to track errors compared to measured data. optimizer_setting : OptimizerSetting A OptimizerSetting object that contains all necessary parameters for optimizing, see :class:`OptimizerSetting`. @@ -104,7 +104,7 @@ def training_step(self, batch, batch_idx): self.log(f"train/{condition.name}", cond_loss) loss = loss + condition.weight * cond_loss - self.log('train/loss', loss, prog_bar=True) + self.log('train/loss', loss.item(), prog_bar=True) self.n_training_step += 1 return loss diff --git a/src/torchphysics/utils/callbacks.py b/src/torchphysics/utils/callbacks.py index 42626f09..30b05996 100644 --- a/src/torchphysics/utils/callbacks.py +++ b/src/torchphysics/utils/callbacks.py @@ -84,8 +84,7 @@ class PlotterCallback(Callback): Parameters ---------- plot_function : callable - A function that specfices the part of the model that should be plotted. - A function that specfices the part of the model that should be plotted. + A function that specifies the part of the model that should be plotted. point_sampler : torchphysics.samplers.PlotSampler A sampler that creates the points that should be used for the plot. log_interval : str, optional @@ -98,7 +97,7 @@ class PlotterCallback(Callback): Specifies how the output should be plotted. If no input is given, the method will try to use a fitting way, to show the data. See also plot-functions. kwargs: - Additional arguments to specify different parameters/behaviour of + Additional arguments to specify different parameters/behavior of the plot. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html for possible arguments of each underlying object. """ diff --git a/src/torchphysics/utils/data/dataloader.py b/src/torchphysics/utils/data/dataloader.py index 6c765b63..b691fa58 100644 --- a/src/torchphysics/utils/data/dataloader.py +++ b/src/torchphysics/utils/data/dataloader.py @@ -19,7 +19,7 @@ class PointsDataset(torch.utils.data.Dataset): shuffle : bool Whether to shuffle the order of the data points at initialization. drop_last : bool - Whether to drop the last (and non-batch-size-) minibatch. + Whether to drop the last (and non-batch-size-) mini batch. """ def __init__(self, data_points, batch_size, shuffle=False, drop_last=False): @@ -62,7 +62,7 @@ def __getitem__(self, idx): class PointsDataLoader(torch.utils.data.DataLoader): """ - A DataLoader that can be used in a condition to load minibatches of paired data + A DataLoader that can be used in a condition to load mini batches of paired data points as the input and output of a model. Parameters @@ -80,7 +80,7 @@ class PointsDataLoader(torch.utils.data.DataLoader): pin_memory : bool Whether to use pinned memory during data loading, see also: the PyTorch documentation drop_last : bool - Whether to drop the last (and non-batch-size-) minibatch. + Whether to drop the last (and non-batch-size-) mini batch. """ def __init__( diff --git a/src/torchphysics/utils/data/deeponet_dataloader.py b/src/torchphysics/utils/data/deeponet_dataloader.py index d839c463..50a7262e 100644 --- a/src/torchphysics/utils/data/deeponet_dataloader.py +++ b/src/torchphysics/utils/data/deeponet_dataloader.py @@ -6,7 +6,7 @@ class DeepONetDataLoader(torch.utils.data.DataLoader): """ - A DataLoader that can be used in a condition to load minibatches of paired data + A DataLoader that can be used in a condition to load mini batches of paired data points as the input and output of a DeepONet-model. Parameters @@ -19,7 +19,7 @@ class DeepONetDataLoader(torch.utils.data.DataLoader): the shape would be: [20, 100, 2] trunk_data : torch.tensor A tensor containing the input data for the trunk network. There are two different - possibilites for the shape of this data: + possibilities for the shape of this data: 1) Every branch input function uses the same trunk values, then we can pass in the shape: [number_of_trunk_points, input_dim_of_trunk_net] This can speed up the trainings process. @@ -207,7 +207,7 @@ def __getitem__(self, idx): idx : int The index of the desired point. """ - # frist slice in branch dimension (dim 0): + # first slice in branch dimension (dim 0): branch_idx = int(idx / self.branch_batch_len) a = (branch_idx * self.branch_batch_size) % len(self.branch_data_points) b = ((branch_idx + 1) * self.branch_batch_size) % len(self.branch_data_points) @@ -298,7 +298,7 @@ def __init__( def __len__(self): """Returns the number of points of this dataset.""" # the least common multiple of both possible length will lead to the correct distribution - # of data points and hopefully managable effort + # of data points and hopefully manageable effort return int( np.lcm( int( diff --git a/src/torchphysics/utils/differentialoperators/differenceoperators.py b/src/torchphysics/utils/differentialoperators/differenceoperators.py index 5e4e1269..5a5f6d68 100644 --- a/src/torchphysics/utils/differentialoperators/differenceoperators.py +++ b/src/torchphysics/utils/differentialoperators/differenceoperators.py @@ -16,7 +16,7 @@ def discrete_grad_on_grid(model_out, grid_size): Notes ----- - This methode assumes that the input function which the gradient should be + This method assumes that the input function which the gradient should be computed of is defined on a regular equidistant grid. The shape of function is assumed to be of the form (batch_size, N_1, N_2, ..., N_d, dim), @@ -36,10 +36,10 @@ def discrete_grad_on_grid(model_out, grid_size): read_from_slice_one_sided = [slice(None)] * len(model_out.shape) for i in range(number_of_dims): - # Update the last dimension to wrtie the correct gradient components + # Update the last dimension to write the correct gradient components write_to_slice[-1] = slice(i * model_out.shape[-1], (i + 1) * model_out.shape[-1]) - # pick correct dimenison for the current direction + # pick correct dimension for the current direction write_to_slice[i+1] = slice(1, -1) read_from_slice_left[i+1] = slice(2, None) read_from_slice_right[i+1] = slice(0, -2) @@ -89,7 +89,7 @@ def discrete_laplacian_on_grid(model_out, grid_size): Notes ----- - This methode assumes the same properties as `discrete_grad_on_grid`. + This method assumes the same properties as `discrete_grad_on_grid`. """ number_of_dims = len(model_out.shape) - 2 laplace = torch.zeros(*model_out.shape, device=model_out.device) @@ -101,7 +101,7 @@ def discrete_laplacian_on_grid(model_out, grid_size): read_from_slice_one_sided = [slice(None)] * len(model_out.shape) for i in range(number_of_dims): - # pick correct dimenison for the current direction + # pick correct dimension for the current direction write_to_slice[i+1] = slice(1, -1) read_from_slice_center[i+1] = slice(1, -1) read_from_slice_left[i+1] = slice(2, None) diff --git a/src/torchphysics/utils/differentialoperators/differentialoperators.py b/src/torchphysics/utils/differentialoperators/differentialoperators.py index ba2ba4a1..36284a54 100644 --- a/src/torchphysics/utils/differentialoperators/differentialoperators.py +++ b/src/torchphysics/utils/differentialoperators/differentialoperators.py @@ -1,6 +1,6 @@ -"""File contains differentialoperators +"""File contains differential operators -NOTE: We aim to make the computation of differential operaotrs more efficient +NOTE: We aim to make the computation of differential operators more efficient by building an intelligent framework that is able to keep already computed derivatives and therefore make the computations more efficient. """ @@ -109,7 +109,7 @@ def grad(model_out, *derivative_variable): def normal_derivative(model_out, normals, *derivative_variable): - """Computes the normal derivativ of a network with respect to the given variable + """Computes the normal derivative of a network with respect to the given variable and normal vectors. Parameters @@ -136,7 +136,7 @@ def normal_derivative(model_out, normals, *derivative_variable): def div(model_out, *derivative_variable): """Computes the divergence of a network with respect to the given variable. - Only for vector valued inputs, for matices use the function matrix_div. + Only for vector valued inputs, for matrices use the function matrix_div. Parameters ---------- model_out : torch.tensor @@ -168,7 +168,7 @@ def div(model_out, *derivative_variable): """ def div(model_out, *derivative_variable): '''Computes the divergence of a network with respect to the given variable. - Only for vector valued inputs, for matices use the function matrix_div. + Only for vector valued inputs, for matrices use the function matrix_div. Parameters ---------- @@ -265,7 +265,7 @@ def rot(model_out, *derivative_variable): Parameters ---------- model_out : torch.tensor - The output tensor of shape (b, 3) in which respect the roation should be + The output tensor of shape (b, 3) in which respect the rotation should be computed. derivative_variable : torch.tensor The input tensor of shape (b, 3) in which respect the rotation should be @@ -368,7 +368,7 @@ def matrix_div(model_out, *derivative_variable): Parameters ---------- model_out : torch.tensor - The (batch) of matirces that should be differentiated. + The (batch) of matrices that should be differentiated. derivative_variable : torch.tensor The spatial variable in which respect should be differentiated. @@ -376,7 +376,7 @@ def matrix_div(model_out, *derivative_variable): ---------- torch.tensor A Tensor of vectors of the form (batch, dim), containing the - divegrence of the input. + divergence of the input. """ div_out = torch.zeros((len(model_out), model_out.shape[1]), device=model_out.device) for i in range(model_out.shape[1]): diff --git a/src/torchphysics/utils/evaluation.py b/src/torchphysics/utils/evaluation.py index c6fd4256..3e158972 100644 --- a/src/torchphysics/utils/evaluation.py +++ b/src/torchphysics/utils/evaluation.py @@ -1,4 +1,4 @@ -"""File contains different helper functions to get specific informations about +"""File contains different helper functions to get specific information about the computed solution. """ @@ -23,7 +23,7 @@ def compute_min_and_max( A sampler that creates the points where the model should be evaluated. evaluation_fn : callable A user-defined function that uses the neural network and creates the - desiered output quantity. + desired output quantity. device : str or torch device The device of the model. track_gradients : bool diff --git a/src/torchphysics/utils/plotting/animation.py b/src/torchphysics/utils/plotting/animation.py index b31ecd27..31d9519f 100644 --- a/src/torchphysics/utils/plotting/animation.py +++ b/src/torchphysics/utils/plotting/animation.py @@ -29,7 +29,7 @@ def animate( model : torchphysics.models.Model The Model/neural network that should be used in the plot. ani_function : Callable - A function that specfices the part of the model that should be animated. + A function that specifies the part of the model that should be animated. Of the same form as the plot function. point_sampler : torchphysics.samplers.AnimationSampler A Sampler that creates the points that should be used for the @@ -37,12 +37,12 @@ def animate( angle : list, optional The view angle for 3D plots. Standard angle is [30, 30] ani_type : str, optional - Specifies how the output sholud be animated. If no input is given, the method + Specifies how the output should be animated. If no input is given, the method will try to use a fitting way, to show the data. Implemented types are: - 'line' for line animations, with a 1D-domain and output - 'surface_2D' for surface animation, with a 2D-domain - 'quiver_2D' for quiver/vector field animation, with a 2D-domain - - 'contour_surface' for contour/colormaps, with a 2D-domain + - 'contour_surface' for contour/color-maps, with a 2D-domain Returns ------- @@ -53,7 +53,7 @@ def animate( Notes ----- - This methode only creates a simple animation and is for complex + This method only creates a simple animation and is for complex domains not really optimized. Should only be used to get a rough understanding of the trained neural network. """ @@ -167,7 +167,7 @@ def _animation_for_two_outputs(domain_dim): def animation_line( outputs, ani_sampler, animation_points, domain_points, angle, ani_speed ): - """Handels 1D animations, inputs are the same as animation().""" + """Handles 1D animations, inputs are the same as animation().""" output_max, output_min, domain_bounds, _, domain_name = _compute_animation_params( outputs, ani_sampler, animation_points ) @@ -216,7 +216,7 @@ def animate(frame_number, outputs, line): def animation_surface2D( outputs, ani_sampler, animation_points, domain_points, angle, ani_speed ): - """Handels 2D animations, inputs are the same as animation().""" + """Handles 2D animations, inputs are the same as animation().""" output_max, output_min, domain_bounds, ani_key, domain_name = ( _compute_animation_params(outputs, ani_sampler, animation_points) ) diff --git a/src/torchphysics/utils/plotting/plot_functions.py b/src/torchphysics/utils/plotting/plot_functions.py index e1b16cd8..a6221282 100644 --- a/src/torchphysics/utils/plotting/plot_functions.py +++ b/src/torchphysics/utils/plotting/plot_functions.py @@ -19,7 +19,7 @@ class Plotter: Parameters ---------- plot_function : callable - A function that specfices the part of the model that should be plotted. + A function that specifies the part of the model that should be plotted. Can be of the same form as the condition-functions. E.g. if the solution name is 'u' we can use @@ -35,7 +35,7 @@ class Plotter: A Sampler that creates the points that should be used for the plot. angle : list, optional - The view angle for surface plots. Standart angle is [30, 30] + The view angle for surface plots. Standard angle is [30, 30] log_interval : int Plots will be saved every log_interval steps if the plotter is used in training of a model. @@ -46,9 +46,9 @@ class Plotter: - 'surface_2D' for surface plots, with a 2D-domain - 'curve' for a curve in 3D, with a 1D-domain, - 'quiver_2D' for quiver/vector field plots, with a 2D-domain - - 'contour_surface' for contour/colormaps, with a 2D-domain + - 'contour_surface' for contour/color-maps, with a 2D-domain kwargs: - Additional arguments to specify different parameters/behaviour of + Additional arguments to specify different parameters/behavior of the plot. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html for possible arguments of each underlying object. """ @@ -108,7 +108,7 @@ def plot( model : torchphysics.models.Model The Model/neural network that should be used in the plot. plot_function : callable - A function that specfices the part of the model that should be plotted. + A function that specifies the part of the model that should be plotted. Of the same form as the condition-functions. E.g. if the solution name is 'u', we can use @@ -126,15 +126,15 @@ def plot( angle : list, optional The view angle for 3D plots. Standard angle is [30, 30] plot_type : str, optional - Specifies how the output sholud be plotted. If no input is given the method + Specifies how the output should be plotted. If no input is given the method will try to use a fitting way to show the data. Implemented types are: - 'line' for plots in 1D - 'surface_2D' for surface plots, with a 2D-domain - 'curve' for a curve in 3D, with a 1D-domain, - 'quiver_2D' for quiver/vector-field plots, with a 2D-domain - - 'contour_surface' for contour/colormaps, with a 2D-domain + - 'contour_surface' for contour/color-maps, with a 2D-domain kwargs: - Additional arguments to specify different parameters/behaviour of + Additional arguments to specify different parameters/behavior of the plot. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html for possible arguments of each underlying object. @@ -147,11 +147,11 @@ def plot( Notes ----- What this function does is: - creating points with sampler -> evaluate model -> evalute plot function + creating points with sampler -> evaluate model -> evaluate plot function -> create the plot with matplotlib.pyplot. The function is only meant to give a fast overview over the trained neural network. - In general the methode is not optimized for complex domains. + In general the method is not optimized for complex domains. """ if not isinstance(plot_function, UserFunction): plot_function = UserFunction(fun=plot_function) @@ -260,7 +260,7 @@ def _plot_for_two_outputs(domain_dim): def surface2D(output, domain_points, point_sampler, angle, **kwargs): - """Handels surface plots w.r.t. a two dimensional variable.""" + """Handles surface plots w.r.t. a two dimensional variable.""" # For complex domains it is best to triangulate them for the plotting triangulation = _triangulation_of_domain(point_sampler.domain, domain_points) fig, ax = _create_figure_and_axis(angle) @@ -276,7 +276,7 @@ def surface2D(output, domain_points, point_sampler, angle, **kwargs): def line_plot(output, domain_points, point_sampler, angle, **kwargs): - """Handels line plots w.r.t. a one dimensional variable.""" + """Handles line plots w.r.t. a one dimensional variable.""" fig = plt.figure() ax = fig.add_subplot() ax.grid() diff --git a/src/torchphysics/utils/user_fun.py b/src/torchphysics/utils/user_fun.py index 59ef4d8e..fffe0995 100644 --- a/src/torchphysics/utils/user_fun.py +++ b/src/torchphysics/utils/user_fun.py @@ -1,5 +1,5 @@ """Contains a class which extracts the needed arguments of an arbitrary -methode/function and wraps them for future usage. E.g correctly choosing +method/function and wraps them for future usage. E.g correctly choosing the needed arguments and passing them on to the original function. """ @@ -78,7 +78,7 @@ def _set_input_args_for_function(self): # self.defaults.update(f_kwonlydefaults) def __call__(self, args={}, vectorize=False): - """To evalute the function. Will automatically extract the needed arguments + """To evaluate the function. Will automatically extract the needed arguments from the input data and will set the possible default values. Parameters @@ -113,7 +113,7 @@ def __call__(self, args={}, vectorize=False): def evaluate_function(self, **inp): """Evaluates the original input function. Should not be used directly, - rather use the call-methode. + rather use the call-method. """ if callable(self.fun): return self.fun(**inp) @@ -160,7 +160,7 @@ def partially_evaluate(self, **args): Returns ------- Out : value or UserFunction - If the input arguments are enough to evalate the whole function, the + If the input arguments are enough to evaluate the whole function, the corresponding output is returned. If some needed arguments are missing, a copy of this UserFunction will be returned. Whereby the values of **args will be added to the @@ -269,7 +269,7 @@ class DomainUserFunction(UserFunction): """ def __call__(self, args={}, device="cpu"): - """To evalute the function. Will automatically extract the needed arguments + """To evaluate the function. Will automatically extract the needed arguments from the input data and will set the possible default values. Parameters @@ -277,7 +277,7 @@ def __call__(self, args={}, device="cpu"): args : dict or torchphysics.Points The input data, where the function should be evaluated. device : str, optional - The device on which the output of th efunction values should lay. + The device on which the output of the function values should lay. Default is 'cpu'. Returns @@ -301,12 +301,12 @@ def __call__(self, args={}, device="cpu"): def evaluate_function(self, device="cpu", **inp): """Evaluates the original input function. Should not be used directly, - rather use the call-methode. + rather use the call-method. Parameters ---------- device : str, optional - The device on which the output of th efunction values should lay. + The device on which the output of the function values should lay. Default is 'cpu'. inp The input values. diff --git a/tests/test_conditions.py b/tests/test_conditions.py index b46f8608..f6184ad8 100644 --- a/tests/test_conditions.py +++ b/tests/test_conditions.py @@ -109,7 +109,7 @@ def test_datacondition_forward_2(): Points(torch.tensor([[0.0], [2.0]]), R1('u'))), batch_size=1) cond = DataCondition(module=module, dataloader=loader, - norm=2, relative=False, use_full_dataset=True) + norm=2, relative=False, use_full_dataset=True, root=2.0) out = cond() assert out == 1.0 diff --git a/tests/tests_functionsets/test_FunctionSetTransforms.py b/tests/tests_functionsets/test_FunctionSetTransforms.py index 89ec97ca..e137fc82 100644 --- a/tests/tests_functionsets/test_FunctionSetTransforms.py +++ b/tests/tests_functionsets/test_FunctionSetTransforms.py @@ -3,7 +3,7 @@ from torchphysics.problem.domains.functionsets import FunctionSet from torchphysics.problem.domains.functionsets.functionset_operations import ( - FunctionSetAdd, FunctionSetSubstract, FunctionSetTransform + FunctionSetAdd, FunctionSetSubtract, FunctionSetTransform ) from torchphysics.problem.spaces import R1, R2, FunctionSpace, Points from torchphysics.problem.domains.functionsets import CustomFunctionSet @@ -50,7 +50,7 @@ def test_create_fn_set_substract(): fn_set = FunctionSet(space, 100) fn_set2 = FunctionSet(space, 100) fn_set -= fn_set2 - assert isinstance(fn_set, FunctionSetSubstract) + assert isinstance(fn_set, FunctionSetSubtract) assert fn_set.function_space == space assert fn_set.function_set_size == 100 @@ -67,7 +67,7 @@ def test_create_fn_set_sum_and_substract(): assert fn_set4.function_set_size == 100 fn_set4 = fn_set3 - fn_set - assert isinstance(fn_set4, FunctionSetSubstract) + assert isinstance(fn_set4, FunctionSetSubtract) assert fn_set4.function_space == space assert fn_set4.function_set_size == 100 diff --git a/tests/tests_models/test_activation_fn.py b/tests/tests_models/test_activation_fn.py index 1e14ff2c..8ab03fd8 100644 --- a/tests/tests_models/test_activation_fn.py +++ b/tests/tests_models/test_activation_fn.py @@ -13,7 +13,7 @@ def test_create_adaptive_with_tanh(): def test_create_adaptive_with_ReLu(): - adap_fn = AdaptiveActivationFunction(torch.nn.ReLU(), inital_a=5.0, scaling=10.0) + adap_fn = AdaptiveActivationFunction(torch.nn.ReLU(), initial_a=5.0, scaling=10.0) assert isinstance(adap_fn.activation_fn, torch.nn.ReLU) assert adap_fn.a == 5.0 assert adap_fn.a.requires_grad @@ -22,7 +22,7 @@ def test_create_adaptive_with_ReLu(): def test_forward_of_adaptive_activation(): input_x = torch.tensor([[1.0], [2.0], [-5.0]]) - adap_fn = AdaptiveActivationFunction(torch.nn.ReLU(), inital_a=5.0, scaling=10.0) + adap_fn = AdaptiveActivationFunction(torch.nn.ReLU(), initial_a=5.0, scaling=10.0) output_x = adap_fn(input_x) assert len(output_x) == 3 assert output_x[0] == 50.0 diff --git a/tests/tests_sampler/test_function_sampler.py b/tests/tests_sampler/test_function_sampler.py index ebe5f131..313232cb 100644 --- a/tests/tests_sampler/test_function_sampler.py +++ b/tests/tests_sampler/test_function_sampler.py @@ -71,8 +71,8 @@ def test_ordered_function_sampler_sample(): fn_set = make_default_fn_set() fn_sampler = FunctionSamplerOrdered(20, fn_set, 100) fns = fn_sampler.sample_functions() - assert torch.all(fn_sampler.new_indieces >= 20) - assert torch.all(fn_sampler.new_indieces < 40) + assert torch.all(fn_sampler.new_indices >= 20) + assert torch.all(fn_sampler.new_indices < 40) assert callable(fns) @@ -81,8 +81,8 @@ def test_ordered_function_sampler_sample_two_times(): fn_sampler = FunctionSamplerOrdered(20, fn_set, 100) fns = fn_sampler.sample_functions() fns = fn_sampler.sample_functions() - assert torch.all(fn_sampler.new_indieces >= 40) - assert torch.all(fn_sampler.new_indieces < 60) + assert torch.all(fn_sampler.new_indices >= 40) + assert torch.all(fn_sampler.new_indices < 60) assert callable(fns) @@ -91,8 +91,8 @@ def test_ordered_function_sampler_sample_multiple_times(): fn_sampler = FunctionSamplerOrdered(20, fn_set, 100) for _ in range(5): _ = fn_sampler.sample_functions() - assert torch.all(fn_sampler.new_indieces >= 0) - assert torch.all(fn_sampler.new_indieces < 20) + assert torch.all(fn_sampler.new_indices >= 0) + assert torch.all(fn_sampler.new_indices < 20) def test_create_coupled_function_sampler():