diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..240b71bf --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM python + +MAINTAINER Giuseppe Romano + +RUN apt-get update + +RUN apt-get install -y build-essential libopenmpi-dev libgmsh-dev + +ADD dist app + +WORKDIR app + +RUN pip install --no-cache openbte-2.70.tar.gz + +RUN useradd -ms /bin/bash openbte + +ENV OMPI_MCA_btl_vader_single_copy_mechanism none + +ENV OMPI_MCA_btl_base_warn_component_unused 0 + +ENV MPLCONFIGDIR /tmp/matplotlib + +RUN mkdir /workspace + +USER openbte + +WORKDIR /home/openbte + +LABEL org.opencontainers.image.source https://github.com/romanodev/openbte + +EXPOSE 8050 + + + + + + + diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100755 index a2284183..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,3 +0,0 @@ -include LICENSE -include README.rst -include extern/gmsh diff --git a/apt.txt b/apt.txt deleted file mode 100755 index ff7487fe..00000000 --- a/apt.txt +++ /dev/null @@ -1,3 +0,0 @@ -libopenmpi-dev -openmpi-bin -gmsh diff --git a/docs/source/Geometry.rst b/docs/source/Geometry.rst deleted file mode 100644 index 27c7cba1..00000000 --- a/docs/source/Geometry.rst +++ /dev/null @@ -1,144 +0,0 @@ -Geometry -=================================== - -A geometry object can be created with the ``Geometry`` class. - -.. code:: python - - Geometry(model=('lattice')|'custom'|'bulk') - -The default model is ``lattice``. All the models are based on a unit cell of size ``lx`` and ``ly`` (in nm). Optionally, a thickness ``lz`` can be added. Note that ``3D`` simulations are experimental and not currently supported. Periodic boundary conditions are applied throughout the unit cell, unless the ``Periodic`` option is defined. An applied difference of temperature :math:`\Delta T =` 1 K is applied along :math:`x`. The ``bulk`` is simply a square repeated along :math:`x` and :math:`y`. In-plane thermal conductivity of a thin film can be created with - -Bulk -------------------------------------- - -The ``bulk`` is simply a square repeated along :math:`x` and :math:`y` - -.. code:: python - - Geometry(model='bulk',lx=100,ly=100, stpe=5) - -Diffuse scattering boundary conditions are applied to the top and bottom surfaces. - -Film -------------------------------------- - -Within OpenBTE, a thin film is just a ``bulk`` model with diffuse boundary conditions at the top and bottom surfaces - -.. code:: python - - Geometry(model='bulk',lx=100,ly=100, stpe=5,Periodic=[True,False,True]) - - -Porous ----------------------------------------- - -Porous materials can be defined with the ``lattice`` model. Options include a `porosity` (i.e. the volume fraction), a `base` - i.e. the position of the pores in the unit-cell and a `shape`. Below is a complete list of options. Additionally, the mesh step size (``step``) must be include. For example: - -.. code:: python - - Geometry(model='lattice',lx = 10,ly = 10, step = 0.5, base = [[0.2,0],[-0.2,0]],porosity=0.1,shape='circle') - - -Additional info/tips: - - - Commonly, a shape may cross one or even two boundaries of the unit-cell. The user needs `not` to include in the base the image pore, since OpenBTE will automatically added it. - - - The base has the range :math:`x\in [-0.5,0.5]` and :math:`y\in [-0.5,0.5]` - - - When two pores overlap, including the cases where the overlap is due to periodicity, a bigger pore - the union of the two overlapping pores, is created. - - - The area of the pores is defined by the chosen porosity, the shape and number of pores in the base. For example, when the number of pores in the base doubles - with all other parameters being equal, the area of each pore halves. - - - The ``step`` keyword defined the characteristic size of the mesh. In 2D domains, the number of elements will be roughly :math:`lx*ly/\mathrm{step}^2`. Typical calculations have 400-10 K elements. - - - Diffuse scattering boundary conditions are applied along the walls of the pores. - - - A shape can be either a predefined one - including ``square``, ``triangle`` and ``circle`` - or user defined, as outlined in the next section. - -Pores with different size/shapes -########################################## - -To have pores with different sizes, you can use the option ``area_ratio``, which takes a list of ``relative`` areas for each pores. Note that the total area will still be set by the porosity. Example: - -.. code:: python - - Geometry(model='lattice',lx = 10,ly = 10, step = 0.5, base = [[0.2,0],[-0.2,0]],porosity=0.1,shape='circle',area_ratio=[1,2]) - -In this case the second pore would be twice as large as the first one. Optionally, you can also define a vector of shapes, e.g. ``shape=['circle','square']``. An example is reported in Example 1. - - -Custom shapes -########################################## - -Custom shapes (which should not be confused with custom geometry mode, defined below) can be created with ``shape=custom``. The user-defined structure is identified with ``shape_function`` and its options, ``shape_options``. See Example 2 for further clarifications. - -Additional info/tips: - - - The shape coordinates are normalized to :math:`(-0.5,0.5)` both in :math:`x` and :math:`y` coordinates. - - Multiple shape functions can also be declared in lists. - - The shape function must at least take the option ``area`` in input, which is internally calculated, so that the nominal porosity is respected. Note that ``area`` is normalized to the unit square. The workflow is this: 1) decide the porosity of your material 2) based on the option ``area_ratio``, assign a porosity to each pore. If ``area_ratio`` is not assigned, then the porosity of each pore is the porosity of the material. 3) Build your structure using custom options. - - The values for ``shape_options`` can also be a ``list`` with the same size as the number of pores with custom shapes. In this case, these values are passed separately to the pores. - -For an example, see Example 2. - - -.. code:: python - - from openbte import Geometry - import numpy as np - - def shape(options): - area = options['area'] - T = options['T'] - f = np.sqrt(2) - - poly_clip = [] - a = area/T/2 - - poly_clip.append([0,0]) - poly_clip.append([a/f,a/f]) - poly_clip.append([a/f-T*f,a/f]) - poly_clip.append([-T*f,0]) - poly_clip.append([a/f-T*f,-a/f]) - poly_clip.append([a/f,-a/f]) - - return poly_clip - - geo = Geometry(porosity=0.05,lx=100,ly=100,step=5,shape='custom',base=[[0,0]],lz=0,save=False,shape_function=shape,shape_options={'T':0.05}) - - - -Custom ------------------------------------------------------ - -With the custom model, the structured is defined a series of polygons defining the regions of the material to be carved out. Below is an example - -.. code:: python - - from openbte import Geometry - - k = 0.1 - h = 0.1 - d = 0.07 - poly1 = [[-k/2,0],[-k/2,-h],[k/2,0]] - poly2 = [[-0.6,0],[-0.6,-0.8],[0.6,-0.8],[0.6,-0],[k/2+d,0],[-k/2-d,-k-2*d],[-k/2-d,0]] - - Geometry(model='custom',lx=100,ly=100,step=5,polygons = [poly1,poly2]) - -.. image:: carved.png - :width: 500 px - -Note that the coordinates are within the :math:`(-0.5,0.5)` range. - - -Additional info/tips: - - - If you want to work with unnormalized coordinate use ``relative=False``. - - - Pores that cross the boundaries are repeated. You can turn off this behaviour by using ``repeat=False``. - - - - - diff --git a/docs/source/Material.rst b/docs/source/Material.rst deleted file mode 100644 index a862a2ca..00000000 --- a/docs/source/Material.rst +++ /dev/null @@ -1,210 +0,0 @@ -Material -=================================== - -OpenBTE features several material models. A material model takes bulk data in input. Bulk-related files can either be generated by the users or retrieved by the database. For the latter case, you can use the following command - -.. code-block:: python - - Material(source='database',filename='Si',temperature=300,model='rta2DSym') - -In this case the file ``rta.npz`` will be copy into your current directory and the material model ``rta2DSym`` will be created and saved in file ``material.npz``. - - -Fourier model ------------------------------------ - -OpenBTE features a 3D solver of diffusive heat conduction solved on unstructured grids. This model is used as a first-guess to OpenBTE and can be used as a stand-alone model. To create the relative material model, simply use - -.. code-block:: python - - Material(model='fourier',kappa=130) - -where ``kappa`` is the bulk thermal conductivity. - -It is also possible to define an anisotropic thermal conductivity - -.. code-block:: python - - Material(model='fourier',kappa_xx=100,kappa_yy=150) - -Gray model approximation ------------------------------------ - -The simplest BTE model assumes gray medium, i.e. phonons have the same MFP. To create ``material.npz`` the mean-free-path (in m) and the bulk thermal conductivity must be specified. Here is an example: - -.. code-block:: python - - Material(model='gray',mfp=1e-8,kappa=130) - -Note that the gray model assumes 2D material, i.e. no integration over the azimuthal angle is performed. - - -Mean-free-path approximation ------------------------------------ - -This method estimates kappa given only the cumulative thermal conductivity. It assumes isotropic distribution, therefore use it cautiously. The BTE to solve is - -* ``model='mfp3D'``: three-dimensional domain - -* ``model='mfp2DSym'``: three-dimensional domain with infinite thickness - -* ``model='mfp2D'``: two-dimensional domain - - -The material file can be created by running - -.. code-block:: python - - Material(model=<('mfp3D'),'mfp2DSym','mfp2D'>) - -provided the file ``mfp.npz`` is in your current directory. This file must contain the following information - -.. table:: - :widths: auto - :align: center - - +--------------------------+-------------+--------------------------------------------------------------------------+-------------------------------------------+ - | **Item** | **Shape** | **Symbol [Units]** | **Name** | - +--------------------------+-------------+--------------------------------------------------------------------------+-------------------------------------------+ - | ``mfp`` | N | :math:`\Lambda` [:math:`m`] | Mean Free Path | - +--------------------------+-------------+--------------------------------------------------------------------------+-------------------------------------------+ - | ``Kacc`` | N | :math:`\alpha` [:math:`\mathrm{W}\mathrm{m}^{-1}\textrm{K}^{-1}`] | Cumulative thermal conductivity | - +--------------------------+-------------+--------------------------------------------------------------------------+-------------------------------------------+ - -Once you have your dictionary with proper information, you can simplt save the ``mfp.npz`` file by typing - -.. code-block:: python - - from openbte.utils import * - - save_data('mfp',{'mfp':mfp,'Kacc':Kacc}) - - - -Relaxation time approximation ------------------------------------ - -RTA models implement the anisotropic-mean-free-path-BTE (aMFP-BTE_) flavor. - - -Creating ``rta.npz`` -############################################### - -The first step for solving the RTA-BTE is to create the file ``rta.npz``. This file has ``npz`` format and must have the following items: - -.. table:: - :widths: auto - :align: center - - +----------------+-------------+--------------------------------------------------------------------------+--------------------------+ - | **Item** | **Shape** | **Symbol [Units]** | **Name** | - +----------------+-------------+--------------------------------------------------------------------------+--------------------------+ - | ``tau`` | N | :math:`\tau` [:math:`s`] | Scattering time | - +----------------+-------------+--------------------------------------------------------------------------+--------------------------+ - | ``C`` | N | :math:`C` [:math:`\mathrm{W}\mathrm{s}\textrm{K}^{-1}\textrm{m}^{-3}`] | Specific Heat capacity | - +----------------+-------------+--------------------------------------------------------------------------+--------------------------+ - | ``v`` | N x 3 | :math:`\mathbf{v}` [:math:`\mathrm{m}\textrm{s}^{-1}`] | Group velocity | - +----------------+-------------+--------------------------------------------------------------------------+--------------------------+ - | ``kappa`` | 3 x 3 | :math:`\kappa` [:math:`\mathrm{W}\textrm{K}^{-1}\textrm{m}^{-1}`] | Thermal conductivity | - +----------------+-------------+--------------------------------------------------------------------------+--------------------------+ - - -Each item must be a ``numpy`` array with the prescribed ``shape``. The thermal conductivity tensor is given by :math:`\kappa^{\alpha\beta} = \sum_{\mu} C_\mu v_\mu^{\alpha} v_\mu^{\beta} \tau_\mu`. - -With ``rta.npz`` in your current directory, ``material.npz`` can be generated simply with - -.. code-block:: python - - Material(model=<('rta3D'),'rta2DSym','rta2D'>) - - -The RTA-BTE has three material models: - -* ``model='rta3D'``: three-dimensional domain - -* ``model='rta2DSym'``: three-dimensional domain with infinite thickness - -* ``model='rta2D'``: two-dimensional domain - - -Interface with AlmaBTE -############################################### - -AlmaBTE_ is a popular package that compute the thermal conductivity of bulk materials, thin films and superlattices. OpenBTE is interfaced with AlmaBTE for RTA calculations via the script ``almabte2openbte.py``. - -Assuming you have ``AlmaBTE`` in your current ``PATH``, this an example for ``Si``. - -- Download Silicon force constants from AlmaBTE's database_. - - .. code-block:: bash - - wget https://almabte.bitbucket.io/database/Si.tar.xz - tar -xf Si.tar.xz && rm -rf Si.tar.xz - -- Compute bulk scattering time with AlmaBTE. - - .. code-block:: bash - - echo " - - - " > inputfile.xml - - VCAbuilder inputfile.xml - phononinfo Si/Si_8_8_8.h5 300.0 - -- A file named ``Si_8_8_8_300K.phononinfo`` is in your current directory. Note that you can specify the temperature. Here we choose 300 K. `The file ``rta.npz`` can then be created with - - .. code-block:: bash - - AlmaBTE2OpenBTE Si_8_8_8_300K.phononinfo - -- Using OpenBTE command line interface, the ``material`` may be created with - - .. code-block:: bash - - OpenBTE $'Material:\n model: rta2DSym' - -Interface with Phono3Py -############################################### - -Phono3py_ calculates the bulk thermal conductivity using both the RTA and full scattering operator. Currently, only the former is supported. Once Phono3py is solved, the ``rta.npz`` is created by - - -.. code-block:: bash - - phono3pytoOpenBTE unitcell_name nx ny nz - -where ``unitcell_name`` is the file of your unit cell and ``nx ny nz`` is the reciprical space discretization. - -Here is an example assuming you have a working installation of Phono3py: - -.. code-block:: bash - - git clone https://github.com/phonopy/phono3py.git - - cd phono3py/examples/Si-PBEsol - - phono3py --dim="2 2 2" --sym-fc -c POSCAR-unitcell - - phono3py --dim="2 2 2" --pa="0 1/2 1/2 1/2 0 1/2 1/2 1/2 0" -c POSCAR-unitcell --mesh="8 8 8" --fc3 --fc2 --ts=100 - - Phono3py2OpenBTE POSCAR-unitcell 8 8 8 - -Note that ``rta.npz`` is also created in the case you want to use a RTA model. - - - -.. _Deepdish: https://deepdish.readthedocs.io/ -.. _Phono3py: https://phonopy.github.io/phono3py/ -.. _AlmaBTE: https://almabte.bitbucket.io/ -.. _database: https://almabte.bitbucket.io/database/ -.. _aMFP-BTE: https://arxiv.org/abs/2105.08181 -.. _Deepdish: https://deepdish.readthedocs.io/ -.. _`Wu et al.`: https://www.sciencedirect.com/science/article/pii/S0009261416310193?via%3Dihub -.. _Phono3py: https://phonopy.github.io/phono3py/ - - - - - diff --git a/docs/source/_static/configurations.png b/docs/source/_static/configurations.png new file mode 100644 index 00000000..b5ce8f49 Binary files /dev/null and b/docs/source/_static/configurations.png differ diff --git a/docs/source/_static/disk.png b/docs/source/_static/disk.png new file mode 100644 index 00000000..d03195cf Binary files /dev/null and b/docs/source/_static/disk.png differ diff --git a/docs/source/_static/geometry_models.png b/docs/source/_static/geometry_models.png new file mode 100644 index 00000000..3b0e790e Binary files /dev/null and b/docs/source/_static/geometry_models.png differ diff --git a/docs/source/_static/plotly_4.html b/docs/source/_static/plotly_4.html new file mode 100644 index 00000000..6c3876dc --- /dev/null +++ b/docs/source/_static/plotly_4.html @@ -0,0 +1,71 @@ + + + +
+
+ + \ No newline at end of file diff --git a/docs/source/database.rst b/docs/source/database.rst index 9c811016..ad9c4671 100644 --- a/docs/source/database.rst +++ b/docs/source/database.rst @@ -11,6 +11,6 @@ Here we provide an example to get you started +----------------+-------------------+-----------------------------+--------------------+ | **Name** | **Temperature** | **Root Model** | **Provenance** | +----------------+-------------------+-----------------------------+--------------------+ - | ``Si`` | 300 | ``rta`` | AlmaBTE (24x24x24) | + | ``Si`` | 300 | ``rta`` | AlmaBTE (24x24x24)| +----------------+-------------------+-----------------------------+--------------------+ diff --git a/docs/source/examples.rst b/docs/source/examples.rst index cf19e03c..f12e1acc 100644 --- a/docs/source/examples.rst +++ b/docs/source/examples.rst @@ -9,7 +9,7 @@ Interactive examples can be run in Google Colab -1) Different shapes and non-uniform areas +Different shapes and non-uniform areas ######################################### @@ -35,7 +35,7 @@ Interactive examples can be run in Google Colab -2) Custom shapes +Custom shapes ######################################### @@ -94,7 +94,7 @@ Interactive examples can be run in Google Colab -3) Simple gray model +Gray model ######################################### Sometimes you might just want to do some quick calculations using the gray model. In this case no file is needed to create the `Material` object; only the bulk MFP and bulk thermal conductivity need to be specified. @@ -104,9 +104,9 @@ Sometimes you might just want to do some quick calculations using the gray model from openbte import Material,Geometry,Solver,Plot - Material(model='gray2D',mfp=1e-7,kappa=130) #mfp in nm + Material(model='gray2D',mfp=1e-7,kappa=130) #mfp in m - Geometry(model='lattice',lx = 10,ly = 10, lz=0,step = 0.5, base = [[0,0]],porosity=0.2,shape='square',delete_gmsh_files=True,direction='x') + Geometry(model='lattice',lx = 10,ly = 10, lz=0,step = 0.5, base = [[0,0]],porosity=0.2,shape='square',direction='x') Solver(multiscale=False,max_bte_iter=30) @@ -117,8 +117,27 @@ Sometimes you might just want to do some quick calculations using the gray model +Disk +######################################### + +Heat source at the center of a disk + +.. code-block:: python + + from openbte import Geometry, Solver, Material, Plot + Material(source='database',model='rta2DSym',filename='rta_Si_300') + Geometry(model='disk',Rh=1,R=10,step=1,heat_source=0.5) + + Solver(verbose=True,max_bte_iter=30) + + Plot(model='maps',write_html=True) + +.. raw:: html + + + diff --git a/docs/source/features.rst b/docs/source/features.rst deleted file mode 100644 index ce8085e5..00000000 --- a/docs/source/features.rst +++ /dev/null @@ -1,6 +0,0 @@ -Features -=================================== - -.. image:: https://docs.google.com/drawings/d/e/2PACX-1vRqrihU3IHGVNRaNN7sc2r5CMphXVz6iT8jesHsX0blyj7GPh5KyiUiOFw8WMH9bHHNZYMzBTIgLPNo/pub?w=1230&h=504 - :alt: my-picture1 - diff --git a/docs/source/geometry.rst b/docs/source/geometry.rst new file mode 100644 index 00000000..eafd4f75 --- /dev/null +++ b/docs/source/geometry.rst @@ -0,0 +1,177 @@ +Geometry +=================================== + +``OpenBTE`` reads generic 2D ``.geo`` files from gmsh_. The supported boundary conditions are diffusive adiabatic, thermostatting and periodic. Non-equilibrium phonon populations can either be induced by an external temperature gradient or heat source. In most cases, it is possible to create the ``geo`` file just by providing high-level information about the simulation domain and boundary conditions. The module ``Geometry`` fulfills this task. There are currently three geometry models, as outlined below. + + +.. table:: + :widths: auto + :align: center + + +----------------------+---------------------------------------------+-----------------------+ + | **Option** | **Note** | **Default** | + +----------------------+---------------------------------------------+-----------------------+ + | ``step`` | mesh size [nm] | required | + +----------------------+---------------------------------------------+-----------------------+ + | ``delete_gmsh_files``| wheather to delete ``gmsh`` files | ``True`` | + +----------------------+---------------------------------------------+-----------------------+ + + + + +Lattice +---------------------------------------- + +Porous materials can be defined with ``model=lattice`` model. Options include `porosity` (i.e. the volume fraction), `base` - i.e. the position of the pores in the unit-cell and `shape`. Below is a complete list of options: + +.. table:: + :widths: auto + :align: center + + +-------------------+---------------------------------------------+--------------------------+ + | **Option** | **Note** | **Default** | + +-------------------+---------------------------------------------+--------------------------+ + | ``lx`` | lenght along x [nm] | required | + +-------------------+---------------------------------------------+--------------------------+ + | ``porosity`` | volume fraction | required | + +-------------------+---------------------------------------------+--------------------------+ + | ``shape`` | ``circle``, ``square`` or ``triangle`` | ``circle`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``ly`` | lenght along y [nm] | ``lx`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``repeate`` | Add periodic pores | ``True`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``base`` | local coordinate of the pores | ``[[0,0]]`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``shape_function``| function for a custom shape | ``None`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``boundary`` | boundary conditions | ``[Periodic,Periodic]`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``relative`` | relative coordindate for shape definition | ``True`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``area_ratio`` | ratio of the areas of the pores | ``None`` | + +-------------------+---------------------------------------------+--------------------------+ + | ``shape_options`` | options for each custom shape | ``None`` | + +-------------------+---------------------------------------------+--------------------------+ + + +.. code:: python + + Geometry(model='lattice',lx = 10,ly = 10, step = 0.5, base = [[0.2,0],[-0.2,0]],porosity=0.1,shape='circle') + +Pores with different size/shapes +########################################## + +To have pores with different sizes, you can use the option ``area_ratio``, which takes a list of relative areas for each pores. Note that the total area will still be set by the porosity. Example: + +.. code:: python + + Geometry(model='lattice',lx = 10,ly = 10, step = 0.5, base = [[0.2,0],[-0.2,0]],porosity=0.1,shape='circle',area_ratio=[1,2]) + +In this case the second pore would be twice as large as the first one. Optionally, you can also define a vector of shapes, e.g. ``shape=['circle','square']``. + +.. image:: _static/geometry_models.png + :alt: Geometry models + +In the figure above a case of custom shape (first three panels) and custom model (last panel) are shown. + +Custom shapes +########################################## + +Custom shapes (which should not be confused with custom geometry model) can be created with ``shape=custom``. The user-defined structure is identified with ``shape_function`` and its options, ``shape_options``. The shape coordinates are normalized to :math:`(-0.5,0.5)` both in :math:`x` and :math:`y` coordinates. Multiple shape functions can also be declared in lists. The shape function must at least take the option ``area`` in input, which is internally calculated, so that the nominal porosity is respected. Note that ``area`` is normalized to the unit square. The values for ``shape_options`` can also be a ``list`` with the same size as the number of pores with custom shape. In this case, these values are passed separately to the pores. Here is an example of a custom shape model: + +.. code:: python + + from openbte import Geometry + import numpy as np + + def shape(options): + area = options['area'] + T = options['T'] + f = np.sqrt(2) + + poly_clip = [] + a = area/T/2 + + poly_clip.append([0,0]) + poly_clip.append([a/f,a/f]) + poly_clip.append([a/f-T*f,a/f]) + poly_clip.append([-T*f,0]) + poly_clip.append([a/f-T*f,-a/f]) + poly_clip.append([a/f,-a/f]) + + return poly_clip + + geo = Geometry(porosity=0.05,lx=100,ly=100,step=5,shape='custom',base=[[0,0]],lz=0,save=False,shape_function=shape,shape_options={'T':0.05}) + + + +Custom +################################## + +With ``model=custom`` it is possible to create a geometry by directly defining a series of polygons + +.. code:: python + + from openbte import Geometry + + k = 0.1 + h = 0.1 + d = 0.07 + poly1 = [[-k/2,0],[-k/2,-h],[k/2,0]] + poly2 = [[-0.6,0],[-0.6,-0.8],[0.6,-0.8],[0.6,-0],[k/2+d,0],[-k/2-d,-k-2*d],[-k/2-d,0]] + + Geometry(model='custom',lx=100,ly=100,step=5,polygons = [poly1,poly2]) + +The resulting shape is illustrated in the last panel of the figure above. Similarly to the ``lattice`` model, shapes that cross the boundaries are repeated periodically. This feature can be turned off with ``repeat=False``. Lastly, working with unnormalized coordinates can be enabled with ``relative=False``. + + + +Boundary conditions +################################## + +An external temperature gradient can be specified with ``direction='x'``. In this case, the component ``xx`` of the effective thermal conductivity tensor is evaluated. Boundary conditions can be applied using the keyword ``boundary = [boundary_x,boundary_y]``, where each element of the list can be ``Periodic``, ``Diffuse`` or ``Isothermal``. Note that along the applied perturbation, the only suitable keywords are ``Periodic`` and ``Isothermal``, while for the other directions one must use either ``Diffuse`` or ``Periodic``. In the case of ``Isothermal`` boundaries, the hot and cold contacts are thermalized to the deviational temperatures -0.5 K and 0.5 K, respectively. Lastly, the internal boundaries are always diffuse. + + +.. image:: _static/configurations.png + :alt: configurations + + +Heat source +################################## + +In addition to applying a difference of temperature, it is possible to add heat source/sink. To do so, we simply use the model ``lattice`` and assign to some pores a heat source we want. Furthermore, it is possible to switch off the applied temperature gradient with ``apply_gradient=False``. For example: + + +.. code:: python + + G = 1e3 + Geometry(model='lattice',lx = 10,ly = 10, step = 0.5, base = [[0.2,0],[0,0],[-0.2,0]],porosity=0.1,shape='circle',apply_gradient=False,heat_source=[G,None,-G]) + +Note that ``G`` here is in ``K/s``. It assumes that your mode-resolved heat generation :math:`W_\mu` has the form :math:`W_\mu = C_\mu G`, where :math:`C_\mu` is the mode-resolved specific heat capacity. In the example above, we have an heat source, a heat sink and one pore. + + + +Disk +-------------------------------- + +With ``model=disk`` it is possible to have a disk a simulation domain. This scenario is particularly useful when studying heat transport in the presence of heat sources. Besides the common flags, two additional parameters need to be specified, ``R`` and ``Rh``, i.e' the radious of the disk and the heated region, respectively. Lastly, the intensity of the heat source is specified by ``heat_source``. The boundary of the disk are thermalized to the deviational temperature of :math:`\Delta T = 0 K`. Here is an example + +.. code:: python + + from openbte import Geometry + + Geometry(model='disk',R1=1,R2=3,R3=10,step=0.5,delete_gmsh_files=False,heat_source=0.5) + +Here is an illustration for the disk simulation domain + +.. image:: _static/disk.png + :width: 200 + :align: center + :alt: disk + + +.. _gmsh: https://gmsh.info/ + + + diff --git a/docs/source/index.rst b/docs/source/index.rst index 984eefd4..3817a042 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,53 +1,39 @@ OpenBTE documentation =================================== - -OpenBTE is an open-source solver for the first-principles Boltzmann transport equation, with a focus on phonon transport. Here is an example to calculate the effective thermal conductivity of nanoporous silicon: - - -.. raw:: html - - - - -.. code-block:: python +.. toctree:: + :maxdepth: 1 + :caption: Install - from openbte import Geometry, Solver, Material, Plot + install - #Create Material - Material(source='database',filename='Si',temperature=300,model='rta2DSym') - #Create Geometry - Geometry(model='lattice',lx=100,ly=100,step=5,porosity=0.1,base=[[-0.1,-0.1],[0.1,0.1]]) +.. toctree:: + :maxdepth: 2 + :caption: Geometry - #Run the BTE - Solver(verbose=False) + geometry - #Plot Maps - Plot(model='maps',repeat=[3,3,1]); +.. toctree:: + :maxdepth: 2 + :caption: Material -.. raw:: html + material - .. toctree:: :maxdepth: 2 - :caption: Getting Started + :caption: Solver - features - install - run + solver .. toctree:: :maxdepth: 2 - :caption: Modules + :caption: Plot - Geometry - Material - Solver - Plot + plot .. toctree:: @@ -62,13 +48,6 @@ OpenBTE is an open-source solver for the first-principles Boltzmann transport eq examples -.. toctree:: - :maxdepth: 2 - :caption: Scripts - - scripts - - .. toctree:: :maxdepth: 2 diff --git a/docs/source/install.rst b/docs/source/install.rst index 14e21b94..66d2b461 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -1,92 +1,45 @@ -Install -=================================== - - -PYPI -#################################### - - .. code-block:: bash - - apt-get update - apt-get install build-essential libopenmpi-dev libgmsh-dev - pip install --upgrade --no-cache openbte - - DOCKER -#################################### - - Install Docker_. - - The image is installed with - - .. code-block:: bash - - docker pull romanodev/openbte:latest - - You can run OpenBTE sharing your current directory - - .. code-block:: bash - - docker run --shm-size=10g -v `pwd`:`pwd` -w `pwd` --user "$(id -u):$(id -g)" --net host romanodev/openbte mpirun -np 4 python input.py - - where we assumed you have 4 virtual CPUs. Note that in this case, your script ``input.py`` must be in your current directory. Also, for intensive calculations, you might want to increase the size of the used shared memory (here is ``10g``). Keep in mind that the above command will allow docker to write in your current directory. - - If you frequently use Docker, you may want to add your ``user`` to the Docker group. +######################################## - .. code-block:: bash - - sudo service docker start - sudo usermod -a -G docker $USER - sudo newgroup docker + .. code-block:: bash - Execute the following command to make it sure you are added to the Docker group - - .. code-block:: bash - - docker info + docker pull ghcr.io/romanodev/openbte:latest -WINDOWS -#################################### + Once you pull the image, you can run the code sharing your current directory - Use Docker after Installing MSMPI_. + .. code-block:: bash + docker run --shm-size=10g -v `pwd`:`pwd` -w `pwd` --user $(id -u):$(id -g) --rm --net host ghcr.io/romanodev/openbte:latest -np 4 python input.py -MacOS -#################################### - + where we assumed you have 4 CPUs. Note that in this case, your script ``input.py`` must be in your current directory. Also, for intensive calculations, you might want to increase the size of the available shared memory (here is ``10g``). Keep in mind that the above command will write in your current directory. - Install Anaconda_. - Install mpi4py with +PYPI +####################################### - .. code-block:: bash - - conda install -c conda-forge mpi4py +The latest release can be downloaded from the PyPi repository - Install gmsh_. - - Install XCode with .. code-block:: bash - xcode-select --install - - Install OpenBTE with - - .. code-block:: bash + apt-get update + apt-get install build-essential libopenmpi-dev libgmsh-dev + pip install --upgrade --no-cache openbte - pip install --upgrade --no-cache openbte +Note that the ``mpi`` and ``gmsh`` are needed. +Development version +####################################### +To access the latest (and often untested) features, you can install the code directly from ``github`` - Note the you need XSelect installed. +.. code-block:: bash + apt-get update + apt-get install build-essential libopenmpi-dev libgmsh-dev + pip install --no-cache --upgrade git+https://github.com/romanodev/OpenBTE.git -.. _link: https://colab.research.google.com/drive/1eAfX3PgyO7TyGWPee8HRx5ZbQ7tZfLDr?usp=sharing .. _Docker: https://docs.docker.com/engine/install/ubuntu/ -.. _Anaconda: https://docs.anaconda.com/anaconda/install/ -.. _MSMPI: https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi -.. _gmsh: https://gmsh.info/ diff --git a/docs/source/material.rst b/docs/source/material.rst new file mode 100644 index 00000000..9342bba2 --- /dev/null +++ b/docs/source/material.rst @@ -0,0 +1,149 @@ +Material +=================================== + +A material model takes bulk data as input and create the file ``material.npz``. There are several material models, as expanded upon below. + +Gray model approximation +----------------------------------- + +The simplest model assumes single-MFP BTE. To create ``material.npz`` the mean-free-path (in m) and the bulk thermal conductivity must be specified. Here is an example: + +.. code-block:: python + + Material(model='gray',mfp=1e-8,kappa=130) + +Note that the gray model assumes a two-dimensional material. + +Relaxation time approximation +----------------------------------- + +The RTA model implements the anisotropic-mean-free-path-BTE (aMFP-BTE_). It needs a file called ``rta.npz`` in your current directory, with the following properties + +.. table:: + :widths: auto + :align: center + + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + | **Item** | **Shape** | **Symbol [Units]** | **Name** | + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + | ``scattering_time`` | N | :math:`\tau` [:math:`s`] | Scattering time | + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + | ``heat_capacity`` | N | :math:`C` [:math:`\mathrm{W}\mathrm{s}\textrm{K}^{-1}\textrm{m}^{-3}`] | Specific heat capacity | + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + | ``group_velocity`` | N x 3 | :math:`\mathbf{v}` [:math:`\mathrm{m}\textrm{s}^{-1}`] | Group velocity | + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + | ``frequency`` | N | :math:`f` [:math:`Hz`] | Frequency | + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + | ``kappa`` | 3 x 3 | :math:`\kappa` [:math:`\mathrm{W}\textrm{K}^{-1}\textrm{m}^{-1}`] | Thermal conductivity | + +------------------------+-------------+--------------------------------------------------------------------------+--------------------------+ + + +Each item must be a ``numpy`` array with the prescribed shape. The thermal conductivity tensor is given by :math:`\kappa^{\alpha\beta} = \sum_{\mu} C_\mu v_\mu^{\alpha} v_\mu^{\beta} \tau_\mu`. To save a dictionary in the ``npz`` format, we recommend to use following script + +.. code-block:: python + + from openbte.utils import * + + data = {'tau':tau,'C':C,'v':c,'kappa':kappa} + + save(data,'rta') + +With ``rta.npz`` in your current directory, ``material.npz`` can be generated with + +.. code-block:: python + + Material(model='rta2DSym',**options) + +.. table:: + :widths: auto + :align: center + + +------------------------+-------------------------+-------------------+ + | **Item** | **Note** | **Default** | + +------------------------+-------------------------+-------------------+ + | ``n_mfp`` | number of mfps | 50 | + +------------------------+-------------------------+-------------------+ + | ``n_phi`` | number of polar angles | 48 | + +------------------------+-------------------------+-------------------+ + + +Interface with AlmaBTE +############################################### + +AlmaBTE_ is a popular package that compute the thermal conductivity of bulk materials, thin films and superlattices. OpenBTE is interfaced with AlmaBTE for RTA calculations via the script ``almabte2openbte.py``. + +Assuming you have ``AlmaBTE`` in your current ``PATH``, this an example for ``Si``. + +- Download Silicon force constants from AlmaBTE's database_. + + .. code-block:: bash + + wget https://almabte.bitbucket.io/database/Si.tar.xz + tar -xf Si.tar.xz && rm -rf Si.tar.xz + +- Compute bulk scattering time with AlmaBTE. + + .. code-block:: bash + + echo " + + + " > inputfile.xml + + VCAbuilder inputfile.xml + phononinfo Si/Si_8_8_8.h5 300.0 + +- A file named ``Si_8_8_8_300K.phononinfo`` is in your current directory. Note that you can specify the temperature. Here we choose 300 K. `The file ``rta.npz`` can then be created with + + .. code-block:: bash + + AlmaBTE2OpenBTE Si_8_8_8_300K.phononinfo + +- Using OpenBTE command line interface, the ``material`` may be created with + + .. code-block:: bash + + OpenBTE $'Material:\n model: rta2DSym' + +Interface with Phono3Py +############################################### + +Phono3py_ calculates the bulk thermal conductivity using both the RTA and full scattering operator. Currently, only the former is supported. Once Phono3py is solved, the ``rta.npz`` is created by + + +.. code-block:: bash + + phono3pytoOpenBTE unitcell_name nx ny nz + +where ``unitcell_name`` is the file of your unit cell and ``nx ny nz`` is the reciprical space discretization. + +Here is an example assuming you have a working installation of Phono3py: + +.. code-block:: bash + + git clone https://github.com/phonopy/phono3py.git + + cd phono3py/examples/Si-PBEsol + + phono3py --dim="2 2 2" --sym-fc -c POSCAR-unitcell + + phono3py --dim="2 2 2" --pa="0 1/2 1/2 1/2 0 1/2 1/2 1/2 0" -c POSCAR-unitcell --mesh="8 8 8" --fc3 --fc2 --ts=100 + + Phono3py2OpenBTE POSCAR-unitcell 8 8 8 + +Note that ``rta.npz`` is also created in the case you want to use a RTA model. + + +.. _Deepdish: https://deepdish.readthedocs.io/ +.. _Phono3py: https://phonopy.github.io/phono3py/ +.. _AlmaBTE: https://almabte.bitbucket.io/ +.. _database: https://almabte.bitbucket.io/database/ +.. _aMFP-BTE: https://arxiv.org/abs/2105.08181 +.. _Deepdish: https://deepdish.readthedocs.io/ +.. _`Wu et al.`: https://www.sciencedirect.com/science/article/pii/S0009261416310193?via%3Dihub +.. _Phono3py: https://phonopy.github.io/phono3py/ + + + + + diff --git a/docs/source/Plot.rst b/docs/source/plot.rst similarity index 52% rename from docs/source/Plot.rst rename to docs/source/plot.rst index 8cf1134a..cd43d082 100644 --- a/docs/source/Plot.rst +++ b/docs/source/plot.rst @@ -1,7 +1,7 @@ Plot =================================== -The ``Plot`` module helps visualize results, based on the file ``material.h5``, ``geometry.h5`` and ``solver.h5``, which must be in the current directory. +The ``Plot`` module visualizes and analyzes results, reading ``material.npz``, ``geometry.npz`` and ``solver.nzp``. Internal Viewer ################################### @@ -13,23 +13,7 @@ OpenBTE features its own viewer, based on plotly_. This experimental feature can Plot(model='maps',repeat=[2,2,1]) -where ``repeat`` is used to plot the supercell. Nodes, that OpenBTE includes the periodic boundary conditions in translating cell data to node data. - -Here is an example: - -.. raw:: html - - - - - -Note that the external viewer can also be run with - -.. code:: bash - - gui - -where ``repeat=[2,2,1]`` is assumed. +where ``repeat`` is used to plot the supercell. External Viewer @@ -41,8 +25,7 @@ Alternatively, it is possible to write results in the ``vtu`` format Plot(model='vtu',repeat=[2,2,1]) -The created file ``output.vtk`` can be read by Paraview_ - +The created file ``output.vtk`` is compatible with Paraview_ Mode-resolved effective thermal conductivity ############################################ @@ -52,7 +35,6 @@ Once you have calculated the effective thermal conductivity, you may want to int Plot(model='kappa_mode') - Notes/limitations: - kappa_mode works only with the material model ``rta2DSym``. @@ -62,37 +44,6 @@ Notes/limitations: .. image:: _static/kappa_mode.png :width: 600 -Formulation -############################################ - - -The effective thermal conductivity, after interpolation, can be computed as - -.. math:: - - \kappa^{\mathrm{eff}}_\mu = C_\mu v_\mu^x \Lambda_\mu^{x,\mathrm{eff}} - -where - -.. math:: - - \Lambda_\mu^{x,\mathrm{eff}}= \frac{L}{\Delta T A_{\mathrm{hot}}}\int_{A_\mathrm{hot}} dS \Delta T_\mu(\mathbf{r}). - - - - - - - - - - - - - - - - diff --git a/docs/source/run.rst b/docs/source/run.rst deleted file mode 100644 index 36b85dcc..00000000 --- a/docs/source/run.rst +++ /dev/null @@ -1,92 +0,0 @@ -Run -=================================== - -OpenBTE can be run either via API or through a properly-formatted ``yaml`` file. - - -API -######################################################################################## - - Assuming you have `rta.h5` in your current directory, create the file ``input.py`` - - .. code-block:: python - - from OpenBTE import Solver,Geometry,Material - - Material(model='rta2DSym',n_phi=48) - - Geometry(lx=100,ly=100,lz=0,step=5,base=[[0,0]],porosity=0.3,save=True,shape='circle') - - Solver(only_fourier=False,max_bte_iter=100,alpha=1,max_bte_error= 1e-4) - - In you shell: - - .. code-block:: bash - - python input.py - - If you have a recent laptop, you probably have multiple cores. You can check it with ``lscpu``. To take advantage of of parallel computing, you may use ``mpirun`` - - .. code-block:: bash - - mpirun -np 4 python input.py - -YAML -######################################################################################## - - - Prepare a ``yaml`` file like the one below, and let's name it ``input.yaml``. Note that there are 1-1 correspondance between the API and the ``yaml`` version. - - .. code-block:: yaml - - --- - Material: - - model: rta2DSym - n_phi: 48 - - Geometry: - - lx: 100 - ly: 100 - lz: 0 - step: 5 - base: [[0,0]] - porosity: 0.3 - shape: circle - - Solver: - - only_fourier: True - max_bte_iter: 100 - alpha: 1 - max_bte_error: 1e-4 - - then, run OpenBTE - - .. code-block:: bash - - OpenBTE input.yaml - - Note that ``input.yaml`` is the default name, so you can omit in this case. - -COMMAND LINE -######################################################################################## - - For whe command line version we feed OpenBTE directly with the contents of the ``YAML`` file instead of the file itself. For example: - - .. code-block:: bash - - OpenBTE $'Material:\n model: rta2Sym' - - This is exactly like running ``OpenBTE`` with the yaml file: - - .. code-block:: yaml - - Material: - - model: rta2DSym - n_phi: 48 - - - diff --git a/docs/source/scripts.rst b/docs/source/scripts.rst deleted file mode 100644 index 6236a80f..00000000 --- a/docs/source/scripts.rst +++ /dev/null @@ -1,38 +0,0 @@ - -Post-processing Scripts -========================================= - - -Here is a collection of scripts I use to analyze results. - -Extract thermal conductivity -############################################ - - -.. code-block:: python - - from openbte.utils import * - - kappa_bte = load_data('solver')['kappa'][-1] - kappa_fourier = load_data('solver')['kappa'][0] - - -Extract mode-resolved thermal conductivity -############################################ - -First, you will need to create the ``kappa_mode`` file with - -.. code-block:: python - - from openbte import Plot - from openbte.utils import * - - #rta.npz, solver.npz and material.npz must be in your current directory - Plot(model='kappa_mode',save=True,show=False) - - #this create kappa_mode.rtz - data= load_data('kappa_mode') - - print(data.keys()) - - diff --git a/docs/source/Solver.rst b/docs/source/solver.rst similarity index 65% rename from docs/source/Solver.rst rename to docs/source/solver.rst index b299ef56..81ed224a 100644 --- a/docs/source/Solver.rst +++ b/docs/source/solver.rst @@ -1,20 +1,17 @@ Solver =================================== -Solver reads the files `geometry.h5` and `material.h5` and, after solving the BTE, creates `solver.h5`. Simple example: +Solver reads the files `geometry.npz` and `material.npz` and, after solving the BTE, creates `solver.npz`. Example: .. code:: python - Solver() - + Solver(**options) Options ------------------------- - - ``max_bte_iter`` : maximum number of BTE iteration - ``max_bte_error`` : maximum error for the BTE solver (computed for the thermal conductivity) - ``max_fourier_iter`` : maximum number of Fourier iteration - ``max_fourier_error`` : maximum error for the Fourier solver (computed for the thermal conductivity) - ``only_fourier`` : whether to compute only Fourier - - ``multiscale`` : when True (Default is ``False``), ballistic and diffusive phonons are computed more efficiently. diff --git a/openbte/almabte2openbte.py b/openbte/almabte2openbte.py index b4783677..d31c35c8 100644 --- a/openbte/almabte2openbte.py +++ b/openbte/almabte2openbte.py @@ -23,19 +23,11 @@ def almabte2openbte(options_almabte)->'rta': w = tmp[:,5 ] kappa = np.einsum('ki,kj,k,k->ij',v,v,tau,C) - mfp_bulk = np.einsum('qj,q->qj',v,tau) - r = np.linalg.norm(mfp_bulk[:,:2],axis=1) #absolute values of the projection - #I = np.where(r > 1e-10)[0] - #C = C[I] - #tau = tau[I] - #v = v[I] - #w = w[I] - - data = {'C':C,\ - 'tau':tau,\ - 'v':v,\ - 'f':w/2.0/np.pi,\ - 'kappa':kappa} + data = {'heat_capacity':C,\ + 'scattering_time':tau,\ + 'group_velocity':v,\ + 'frequency':w/2.0/np.pi,\ + 'thermal_conductivity':kappa} return utils.create_shared_memory_dict(data) diff --git a/openbte/app.py b/openbte/app.py deleted file mode 100644 index ca51924e..00000000 --- a/openbte/app.py +++ /dev/null @@ -1,247 +0,0 @@ -import plotly.offline as py -import plotly.graph_objs as go -import numpy as np -import plotly.io as pio -import dash -import dash_core_components as dcc -import dash_html_components as html -from threading import Timer -import dash_bootstrap_components as dbc -import flask -from dash.dependencies import Input, Output -import gzip -import pickle -import plotly.express as px -import pandas as pd -import json -import os -from mpi4py import MPI -from openbte import Plot - -comm = MPI.COMM_WORLD - -def load_data(fh): - - if os.path.isfile(fh + '.npz'): - with gzip.open(fh + '.npz', 'rb') as f: - - return pickle.load(f) - - -def get_layout(size = []): - - - sl = True - - dim = 2 if len(size) == 2 else 3 - x = 1;y=size[1]/size[0] - z= 1 if dim == 2 else size[2]/size[0] - - axis = dict( - backgroundcolor="rgb(230, 230,230)", - gridcolor="rgb(255, 255, 255)", - zerolinecolor="rgb(255, 255, 255)", - showbackground=False - ) - - xaxis = axis.copy();xaxis['title']=' x [nm]' - yaxis = axis.copy();yaxis['title']=' y [nm]' - zaxis = axis.copy();zaxis['title']=' z [nm]' - - font=dict( - family="Courier New, monospace", - size=12, - #color="#7f7f7f" - color="gray" - ) - - # Update 3D scene options - scene = dict( - aspectratio=dict(x= x, \ - y= y, \ - z= z), - xaxis= xaxis, - yaxis= yaxis, - zaxis= zaxis) - - - #camera - camera = dict( - up=dict(x=0, y=1, z=0), - center=dict(x=0, y=0, z=0), - eye=dict(x=0, y=0, z=1.5) - ) - - - layout = dict(font=font,\ - #title=title,\ - autosize=True,\ - scene=scene,\ - uirevision='static',\ - scene_camera = camera,\ - xaxis_showgrid=False,\ - yaxis_showgrid=False,\ - margin =dict(t=50, b=20, l=20, r=20),\ - paper_bgcolor='rgba(0,0,0,0)',\ - plot_bgcolor='rgba(0,0,0,0)') - - return layout - - -def App(**argv): - - if comm.rank == 0: - #load data-- - #data_tot = argv['bundle'] if 'bundle' in argv.keys() else load_data('bundle') - data_tot = {'sample_1': Plot(model='maps',show=False)} - - - - d_sample = html.Div([ - dcc.Dropdown( - id='samples', - value=0, - style = {}, - clearable=False, - searchable=False - ), - ]) - - - d_variable = html.Div([ - dcc.Dropdown( - id='variables', - value=0, - style= {}, - clearable=False, - searchable=False - ), - ]) - - - external_stylesheets=[dbc.themes.CYBORG] - server = flask.Flask(__name__) # define flask app.server - app = dash.Dash(__name__, external_stylesheets=external_stylesheets,server=server) - - #Init figure--- - fig = go.Figure() - fig.update_xaxes(showline=False,gridcolor='rgba(0,0,0,0)',zerolinecolor='rgba(0,0,0,0)') - fig.update_yaxes(showline=False,gridcolor='rgba(0,0,0,0)',zerolinecolor='rgba(0,0,0,0)') - - fig.update_layout( - plot_bgcolor='rgba(0,0,0,0)', - paper_bgcolor='rgba(0,0,0,0)', - font_color='rgba(0,0,0,0)', - ) - #------------- - - fig.update_xaxes(showline=False) - - fig_dcc = dcc.Graph(id='master',figure=fig,style={'left':'15%','width':'70%','height':'70%','bottom':'10%','position':'absolute'}) - - - - - dd_sample = html.Div( children = [d_sample],style = {'left':'1%','height':'10%','width':'25%','bottom':'88%','position':'absolute'}) - dd_variable = html.Div( children = [d_variable],style = {'left':'28%','height':'10%','width':'25%','bottom':'88%','position':'absolute'}) - - hidden = html.Div(id='intermediate-value', style={'display': 'none'}) - - url = dcc.Location(id='url', refresh=True) - - - #img = html.Div(html.Img(src=app.get_asset_url('logo.png'), style={'width':'18%','left':'80%','bottom':'88%','position':'absolute'})) - - #div1 = html.Div(style = {'left':'25%','height':'70%','width':'50%','bottom':'15%',"border":"2px white solid",'position':'absolute'},children=[fig_dcc,dd_sample,dd_variable,hidden,url]); - - #app.layout = html.Div(children = [div1,img],style = {'height': '100vh','width': '100%',"border":"2px white solid"}) - #app.layout = html.Div(children = [fig_dcc,dd_sample,dd_variable,hidden,url ,img],style = {'height': '100vh','width': '100%',"border":"2px white solid"}) - #layout = html.Div(children = [fig_dcc,dd_sample,dd_variable,hidden,url ,img],style = {'height': '100vh','width': '100%','position':'absolute'}) - layout = html.Div(children = [fig_dcc,dd_sample,dd_variable,hidden,url],style = {'height': '100vh','width': '100%','position':'absolute'}) - - #--------------------------------------------------------------------- - - - @app.callback( - Output('master','figure'), - Input('samples','value'), Input('variables','value'),Input('intermediate-value','children'),prevent_initial_call=True - ) - def plot(sample,variable,data_tot): - - - key_sample = list(data_tot.keys())[int(sample)] - data = data_tot[key_sample] - name = list(data['variables'].keys())[int(variable)] - variable = data['variables'][name] - elems = np.array(data['elems']) - nodes = np.array(data['nodes']) - node_data = variable['data'] - - - bb = str(round(data['bte'],2))+' W/m/K' if 'bte' in data.keys() else '--' - text = 'Bulk: ' + str(round(data['bulk'],2)) +' W/m/K
Fourier: '\ - + str(round(data['fourier'],2)) + ' W/m/K
BTE:' \ - + bb +'
' + \ - 'Gradient applied along ' + data['direction'] - - - - z = np.zeros(len(nodes)) if np.shape(nodes)[1] == 2 else nodes[:,2] - - plotly_data = go.Mesh3d(x=nodes[:,0], - y=nodes[:,1], - z=z, - colorscale='Viridis', - colorbar_title = '[' + variable['units'] + ']' if len(variable['units']) > 0 else "", - intensity = node_data, - colorbar=dict(len=0.5,x=0.14), - intensitymode='vertex', - i=elems[:,0], - j=elems[:,1], - k=elems[:,2], - name=name, - showscale = True, - visible=True) - - fig = go.Figure(data=plotly_data,layout=get_layout(data['size'])) - - - #fig.add_annotation( - # x=1, - # y=0.05, - # xref='paper', - # showarrow=False, - # yref='paper', - # text=text,align='left') - - - return fig - - - - @app.callback( - [Output('intermediate-value', 'children'),Output('samples','options'),Output('variables','options')], - [Input('url','search')],prevent_initial_call=True - ) - def update_output_div(pathname): - - input_value = pathname[1:] - - variables = data_tot[list(data_tot.keys())[0]]['variables'].keys() - - options_samples=[{'label': s, 'value': str(n)} for n,s in enumerate(data_tot)] - options_variables=[{'label': s, 'value': str(n)} for n,s in enumerate(variables)] - - - - return data_tot,options_samples,options_variables - - - app.layout = layout - if argv.setdefault('show',False): - app.run_server(host='localhost',port=8000) - - return app - - - diff --git a/openbte/bundle_data.py b/openbte/bundle_data.py deleted file mode 100644 index 00d81417..00000000 --- a/openbte/bundle_data.py +++ /dev/null @@ -1,38 +0,0 @@ -from openbte.utils import * - - - -def read_data(**argv): - - data = {} - if len(argv) == 0: - for name in sys.argv[1:]: - tmp = load_data(name[:-4]) - data[name[:-4]] = tmp - else: - for name,value in argv.items(): - tmp = load_data(name) - data[name] = value - - return data - - - - -def main(): - - - data = read_data() - - - save_data('bundle',data) - - - - - - - - - - diff --git a/openbte/elba.yaml b/openbte/elba.yaml deleted file mode 100644 index 258aef35..00000000 --- a/openbte/elba.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - openbte: - functions: - - almabte2openbte:almabte2openbte - - rta2DSym:rta2DSym - - rta3D:rta3D - - gray2D:gray2D - - geometry:geometry - - first_guess:first_guess - - solve_rta:solve_rta - - kappa_mode:kappa_mode_2DSym - - line_data:plot_line_data - - line_data:compute_line_data - - material:database - - kappa_mode:kappa_mode_3D - - kappa_mode:plot_kappa_mode - - suppression:suppression_2DSym - - suppression:suppression_3D - - suppression:plot_suppression - - suppression:plot_angular_suppression - - helpers:hexagonal_lattice - - helpers:honeycomb_lattice - - helpers:square_lattice - - vtu:vtu - - viewer:plot_results - pipelines: - - kappa2DSym : database,rta2DSym,geometry,first_guess,solve_rta,kappa_mode_2DSym - - kappa2DSym_abte : almabte2openbte,rta2DSym,geometry,first_guess,solve_rta,kappa_mode_2DSym - - kappa3D : database,rta3D,geometry,first_guess,solve_rta,kappa_mode_3D - - kappa3D_external : rta3D,geometry,first_guess,solve_rta,kappa_mode_3D,vtu - - kappa2dGray : gray2D,geometry,first_guess,solve_rta - - hexagonal2DSym : hexagonal_lattice,kappa2DSym - - hexagonal3D : hexagonal_lattice,kappa3D - - - - diff --git a/openbte/first_guess.py b/openbte/first_guess.py index 4396212b..32a45afb 100644 --- a/openbte/first_guess.py +++ b/openbte/first_guess.py @@ -1,5 +1,5 @@ -def first_guess(geometry,material,options_first_guess)->'temperatures': +def first_guess(geometry,material,options_first_guess)->'first_guess': import openbte.fourier as fourier import numpy as np @@ -11,12 +11,12 @@ def first_guess(geometry,material,options_first_guess)->'temperatures': data = None if comm.rank == 0: - argv = {'geometry':geometry,'material':material} #this needs to change + argv = {'geometry':geometry,'material':material,'additional_heat_source':options_first_guess.setdefault('additional_heat_source',np.zeros_like(geometry['generation'])),'verbose':options_first_guess.setdefault('verbose',True)} #this needs to change - f = fourier.solve_fourier_single(argv) + f = fourier.solve_fourier_single(**argv) + + data = {'Temperature_Fourier':f['temperature'],'kappa_fourier':np.array([f['meta'][0]]),'Flux_Fourier':f['flux'],'gradT':f['grad'],'Heat_Generation':geometry['generation']} - data = {'data':f['temperature'],'kappa':[f['meta'][0]],'flux':f['flux'],'gradT':f['grad']} - return utils.create_shared_memory_dict(data) diff --git a/openbte/fourier.py b/openbte/fourier.py index ba0f7d6a..c8dca24a 100644 --- a/openbte/fourier.py +++ b/openbte/fourier.py @@ -7,7 +7,6 @@ import scipy.sparse as sp import time from scipy.sparse.linalg import lgmres -#import scikits.umfpack as um import sys import scipy from cachetools import cached,LRUCache @@ -49,7 +48,6 @@ def get_SU(bundle,k): def get_key(ll,kappa): - return (ll,tuple(map(tuple,kappa))) def unpack(data): @@ -71,28 +69,22 @@ def get_kappa_map_from_mat(**argv): return np.array([list(kappa)]*len(argv['geometry']['elems'])) -def solve_fourier_single(argv): +def solve_fourier_single(**argv): """ Solve Fourier with a single set of kappa """ - - #data = None - - #if comm.rank == 0: - mesh = argv['geometry'] if 'kappa_map_external' in argv.keys(): #read external kappa map mesh['elem_kappa_map'] = argv['kappa_map_external'] else: mesh['elem_kappa_map'] = get_kappa_map_from_mat(**argv) - #['kappa_map_external'] = mesh['elem_kappa_map'] kappa = -1 #it means that it will take the map from mesh - F,B,scale = assemble(mesh,kappa) + F,B,scale = assemble(mesh,argv['material'],kappa) - meta,temp,grad = solve_convergence(argv,kappa,B,splu(F),scale=scale) + meta,temp,grad = solve_convergence(argv,kappa,B,splu(F),scale=scale,generation=mesh['generation']+argv.setdefault('additional_heat_source',np.zeros_like(mesh['generation']))) kappa_map = get_kappa_map(mesh,kappa) @@ -104,8 +96,7 @@ def solve_fourier_single(argv): n_elems = len(mesh['elems']) if dim == 2:flux = np.append(flux, np.zeros((n_elems,1)), axis=1) - - if argv.setdefault('verbose',False): + if argv.setdefault('verbose',True): fourier_info(meta) data = {'meta':meta,'flux':flux,'temperature':temp,'grad':grad} @@ -168,14 +159,17 @@ def get_decomposed_directions(mesh,kappa_ll): dim = int(mesh['meta'][2]) normal = mesh['face_normals'][ll,0:dim] dist = mesh['dists'][ll,0:dim] - rot = kappa_map[:dim,:dim] - v_orth = np.dot(normal,np.dot(rot,normal))/np.dot(normal,dist) - v_non_orth = np.dot(rot,normal) - dist*v_orth + + if ll in (list(mesh['cold_sides']) + list(mesh['hot_sides'])): + v_orth = np.dot(normal,np.dot(rot,normal))/np.dot(normal,dist) + v_non_orth = np.zeros(dim) + else: + v_orth = np.dot(normal,np.dot(rot,normal))/np.dot(normal,dist) + v_non_orth = np.dot(rot,normal) - dist*v_orth return v_orth,v_non_orth[:dim] - def compute_laplacian(temp,**argv): grad_T = compute_grad(temp,**argv).T @@ -188,92 +182,34 @@ def compute_laplacian(temp,**argv): -@cached(cache=cache_compute_grad_data,key=lambda mesh,dummy:dummy) -def compute_grad_data(mesh,jump): - - #Compute deltas------------------- - rr = [] - for i in mesh['side_per_elem']: - rr.append(i*[0]) - - #this is wrong - for ll in mesh['active_sides'] : - kc1,kc2 = mesh['side_elem_map_vec'][ll] - ind1 = list(mesh['elem_side_map_vec'][kc1]).index(ll) - ind2 = list(mesh['elem_side_map_vec'][kc2]).index(ll) - - delta = 0 - if ll in mesh['periodic_sides']: - delta = mesh['periodic_side_values'][list(mesh['periodic_sides']).index(ll)] - else: delta = 0 - if jump == 0: delta= 0 - - rr[kc1][ind1] = [kc2,kc1, delta] - rr[kc2][ind2] = [kc1,kc2,-delta] - - return rr - - -def compute_grad(temp,**argv): - - mesh = argv['geometry'] - - argv.setdefault('jump',True) - rr = compute_grad_data(mesh,argv['jump']) - - diff_temp = [[temp[j[0]]-temp[j[1]]+j[2] for j in f] for f in rr] - - #Add boundary - #if 'TB' in argv.keys(): - # TB = argv['TB'] - # for n,(eb,sb) in enumerate(zip(*(mesh['eb'],mesh['sb']))): - # ind = list(mesh['elem_side_map_vec'][eb]).index(sb) - # diff_temp[eb][ind] = temp[eb]-TB[n] - - gradT = np.array([np.einsum('js,s->j',mesh['weigths'][k,:,:mesh['n_non_boundary_side_per_elem'][k]],np.array(dt)) for k,dt in enumerate(diff_temp)]) - - - return gradT - +def compute_secondary_flux(temp,kappa_map,**argv): + mesh = argv['geometry'] -def compute_secondary_flux(temp,kappa_map,**argv): - - #Compute gradient - gradT = compute_grad(temp,**argv) - #-------------- + gradT = compute_grad_common(temp,mesh) - mesh = argv['geometry'] dim = int(mesh['meta'][2]) n_elems = len(mesh['elems']) - #-----------SAVE some time-------------------------------------- - v_non_orth = {} - for ll in mesh['active_sides']: - (i,j) = mesh['side_elem_map_vec'][ll] - if not i==j: - - kappa_loc = get_kappa(mesh,i,j,ll,kappa_map) - (v_orth,v_non_orth[ll]) = get_decomposed_directions(mesh,get_key(ll,kappa_loc)) - - area = mesh['areas'][ll] - normal = mesh['face_normals'][ll,0:dim] - dist = mesh['dists'][ll,0:dim] - #----------------------------------------------------------------- - C = np.zeros(n_elems) for ll in mesh['active_sides']: area = mesh['areas'][ll] (i,j) = mesh['side_elem_map_vec'][ll] + kappa_loc = get_kappa(mesh,i,j,ll,kappa_map) + (v_orth,v_non_orth) = get_decomposed_directions(mesh,get_key(ll,kappa_loc)) if not i==j: w = mesh['interp_weigths'][ll] F_ave = (w*gradT[i] + (1.0-w)*gradT[j]) - #tmp = np.dot(F_ave,cache['v_non_orth'][ll])*area - tmp = np.dot(F_ave,v_non_orth[ll])*area + tmp = np.dot(F_ave,v_non_orth)*area C[i] += tmp C[j] -= tmp + #else: + # if ll in (list(mesh['cold_sides'])+list(mesh['hot_sides'])): + # F_ave = gradT[i] + # tmp = np.dot(F_ave,v_non_orth)*area + # C[i] += tmp return C/mesh['volumes'],gradT @@ -295,24 +231,20 @@ def compute_diffusive_thermal_conductivity(temp,kappa_map,**argv): kappa_loc = get_kappa(mesh,i,j,ll,kappa_map) (v_orth,v_non_orth) = get_decomposed_directions(mesh,get_key(ll,kappa_loc)) - deltaT = temp[i] - temp[j] - 1 - kappa_eff -= v_orth * deltaT * area + if ll in mesh['hot_sides']: #Isothermal + kappa_eff += np.einsum('ij,j,i->',kappa_loc,argv['grad'][i],mesh['face_normals'][ll][:dim])*area #this is because here flux is in + else: + deltaT = temp[i] - temp[j] - 1 #Periodic + kappa_eff -= v_orth * deltaT * area - if 'grad' in argv.keys(): - gradT = argv['grad'] - w = mesh['interp_weigths'][ll] - grad_ave = w*gradT[i] + (1.0-w)*gradT[j] - k_non_orth += np.dot(grad_ave,v_non_orth)/2 * area - area_tot +=area - return (kappa_eff+k_non_orth)*mesh['meta'][1] -def solve_convergence(argv,kappa,B,SU,log=False,scale=[]): +def solve_convergence(argv,kappa,B,SU,log=False,scale=[],generation=None): - argv.setdefault('max_fourier_error',1e-6) - argv.setdefault('max_fourier_iter',10) + argv.setdefault('max_fourier_error',1e-10) + argv.setdefault('max_fourier_iter',20) C = np.zeros_like(B) @@ -324,25 +256,26 @@ def solve_convergence(argv,kappa,B,SU,log=False,scale=[]): dim = int(mesh['meta'][2]) grad = np.zeros((n_elems,dim)) - - n_iter = 0;kappa_old = 0;error = 1 + n_iter = 0;kappa_old = 0;error = 1;C_old = np.zeros(n_elems) while error > argv['max_fourier_error'] and \ n_iter < argv['max_fourier_iter'] : - RHS = B + C + RHS = B + C + generation if len(scale) > 0: RHS *=scale temp = SU.solve(RHS) - temp = temp - (max(temp)+min(temp))/2.0 + #temp = temp - (max(temp)+min(temp))/2.0 + argv['grad'] = grad kappa_eff = compute_diffusive_thermal_conductivity(temp,kappa_map,**argv) - - error = abs((kappa_eff - kappa_old)/kappa_eff) kappa_old = kappa_eff n_iter +=1 + #TODO: check error on residual C,grad = compute_secondary_flux(temp,kappa_map,**argv) + error = (np.linalg.norm(C-C_old))/np.linalg.norm(C) + C_old = C.copy() return [kappa_eff,error,n_iter],temp,grad @@ -371,11 +304,18 @@ def get_kappa_map(mesh,kappa): return kappa -@cached(cache=cache_assemble, key=lambda mesh, kappa: hashkey(kappa)) -def assemble(mesh,kappa): +#@cached(cache=cache_assemble, key=lambda mesh, kappa: hashkey(kappa)) +def assemble(mesh,material,kappa): kappa_map = get_kappa_map(mesh,kappa) + #Boundary conductance + if 'boundary_conductance' in material.keys(): + h = material['boundary_conductance'][0]*1e-9 + else: + h = None + #-------------------- + n_elems = len(mesh['elems']) dim = int(mesh['meta'][2]) @@ -388,11 +328,10 @@ def assemble(mesh,kappa): (i,j) = mesh['side_elem_map_vec'][ll] vi = mesh['volumes'][i] vj = mesh['volumes'][j] + kappa_loc = get_kappa(mesh,i,j,ll,kappa_map) if not i == j: - kappa_loc = get_kappa(mesh,i,j,ll,kappa_map) - - (v_orth,dummy) = get_decomposed_directions(mesh,get_key(ll,kappa_loc)) + (v_orth,_) = get_decomposed_directions(mesh,get_key(ll,kappa_loc)) iff.append(i) jff.append(i) @@ -408,12 +347,29 @@ def assemble(mesh,kappa): dff.append(-v_orth/vj*area) if ll in mesh['periodic_sides']: kk = list(mesh['periodic_sides']).index(ll) + vi = mesh['volumes'][i] B[i] += mesh['periodic_side_values'][kk]*v_orth/vi*area B[j] -= mesh['periodic_side_values'][kk]*v_orth/vj*area + + for value,ll in zip(mesh['fixed_temperature'],mesh['fixed_temperature_sides']): + area = mesh['areas'][ll] + (i,j) = mesh['side_elem_map_vec'][ll] + (v_orth,_) = get_decomposed_directions(mesh,get_key(ll,kappa_loc)) + if h == None: #Dirichlet B.C. + a = v_orth + else: + a = v_orth*h/(h + v_orth) + iff.append(i) + jff.append(i) + dff.append(a/vi*area) + B[i] += value*a/vi*area + + + F = sp.csc_matrix((np.array(dff),(np.array(iff),np.array(jff))),shape = (n_elems,n_elems)) - scale = fix_instability(F,B,scale=False) + scale = fix_instability(F,B,scale=False,floating=(len(mesh['cold_sides'])+len(mesh['hot_sides']))==0) return F,B,scale diff --git a/openbte/full_model.py b/openbte/full_model.py deleted file mode 100644 index a3603254..00000000 --- a/openbte/full_model.py +++ /dev/null @@ -1,118 +0,0 @@ -import sys -import numpy as np -from scipy.sparse.linalg import lsqr -from scipy.sparse.linalg import inv as sinv -from scipy.linalg import inv as inv -from scipy.sparse import csc_matrix -import scipy -from .utils import * -import time - -def energy_conserving(W): - - - bottom = np.sum(np.absolute(W)) - r = np.sum(np.absolute(np.einsum('ij->j',W))) - print('Row check:' + str(r/bottom)) - - c = np.sum(np.absolute(np.einsum('ij->i',W))) - print('Column Check:' + str(c/bottom)) - - nm = np.shape(W)[0] - delta = np.einsum('ij->j',W) - b = -2*np.concatenate((delta,delta)) - A = np.zeros((2*nm,2*nm)) - A[0:nm,0:nm] = nm*np.eye(nm) - A[nm:,nm:] = nm*np.eye(nm) - A[0:nm,nm:] = 1 - A[nm:,0:nm] = 1 - l = np.linalg.solve(A,b) - lr = l[:nm] - lv = l[nm:] - beta = np.zeros((nm,nm)) - for i in range(nm): - for j in range(nm): - beta[i,j] = -(lr[j]+lv[i])/2 - W -= beta - - bottom = np.sum(np.absolute(W)) - r = np.sum(np.absolute(np.einsum('ij->j',W))) - print('') - print(' After:') - print('Row check:' + str(r/bottom)) - c = np.sum(np.absolute(np.einsum('ij->i',W))) - print('Column Check:' + str(c/bottom)) - - return W - -def full(**argv): - - filename = argv.setdefault('input_filename','full.npz') - - print(' ') - print('Importing ' + filename + ' ... ',end= '') - data = load_data('full') - print(' done') - print(' ') - - print('Computing sigmas ... ',end= '') - v = data['v'] - C = data['C'] - sigma = np.einsum('u,ui->ui',C,v) - - if np.linalg.norm(sigma,axis=0)[2] == 0.0: - dim = 2 - else: - dim = 3 - - - print('... done') - print(' ') - - print('Simmetrizing scattering matrix A ...',end= '') - W = data['W'] - - #Cut zero velocity modes ----- - tmp = np.linalg.norm(sigma,axis=1) - index = (tmp>0).nonzero()[0] - exclude = (tmp==0).nonzero()[0] - sigma = sigma[index] - W = np.delete(W,exclude,0) - W = np.delete(W,exclude,1) - tc = C[index]/sum(C[index]) - print(np.shape(W)) - #----------------------------- - - - W = 0.5*W + 0.5*W.T - nm = W.shape[0] - - print('Making W energy conserving ...',end= '') - W = energy_conserving(W) - print('... done') - print(' ') - print('Computing kappa bulk ...') - - #kappa = np.einsum('ui,uq,qj->ij',sigma,np.linalg.pinv(W),sigma)/data['alpha'] - - k_xx = compute_kappa(W*data['alpha'],sigma[:,0]) - k_yy = compute_kappa(W*data['alpha'],sigma[:,1]) - kappa = np.array([[k_xx,0],[0,k_yy]]) - - - print(kappa) - print(' done') - print(' ') - #---------- - - data = {'tc':tc,'W':W,'sigma':sigma[:,:dim],'kappa':kappa[:dim,:dim],'model':[10],'alpha':data['alpha']} - - return data - - - - - - - - diff --git a/openbte/geometry.py b/openbte/geometry.py index be663a7a..c9eed092 100644 --- a/openbte/geometry.py +++ b/openbte/geometry.py @@ -1,35 +1,37 @@ -def geometry(options_geometry)->'geometry': +import openbte.mesh as mesh +import openbte.mesher as mesher +import openbte.utils as utils +from mpi4py import MPI + +comm = MPI.COMM_WORLD - import openbte.mesh as mesh - import openbte.mesher as mesher - import openbte.utils as utils - from mpi4py import MPI - comm = MPI.COMM_WORLD +def geometry(options_geometry)->'geometry': - options_geometry.setdefault('model','lattice') + options_geometry.setdefault('model','lattice') + options_geometry.setdefault('lx',0) options_geometry.setdefault('ly',options_geometry['lx']) data = None if comm.rank == 0: options_geometry['dmin'] = 0 + options_geometry['overlap'] = 0 mesher.Mesher(options_geometry) data = mesh.import_mesh(**options_geometry) - mesh.compute_data(data,**options_geometry) - - return utils.create_shared_memory_dict(data) + mesh.compute_data(data,**options_geometry) + return utils.create_shared_memory_dict(data) def Geometry(**options_geometry): data = geometry(options_geometry) #quick hack if options_geometry.setdefault('save',True) and comm.rank == 0: - save_data(options_geometry.setdefault('output_filename','geometry'),data) + utils.save(options_geometry.setdefault('output_filename','geometry'),data) return data diff --git a/openbte/gray2D b/openbte/gray2D deleted file mode 100644 index e69de29b..00000000 diff --git a/openbte/gray2D.py b/openbte/gray2D.py index 1a701526..70e142e3 100644 --- a/openbte/gray2D.py +++ b/openbte/gray2D.py @@ -11,7 +11,7 @@ def gray2D(options_material)->'material': #Parse options n_phi = options_material.setdefault('n_phi',48) - kappa_bulk = options_material['kappa'] + kappa_bulk = options_material.setdefault('kappa',1) kappa = options_material['kappa']*np.eye(2) mfp_bulk = np.array([options_material['mfp']]) @@ -20,17 +20,18 @@ def gray2D(options_material)->'material': phi = np.linspace(Dphi/2.0,2.0*np.pi-Dphi/2.0,n_phi,endpoint=True) polar = np.array([np.sin(phi),np.cos(phi)]).T fphi= np.sinc(Dphi/2.0/np.pi) - polar_ave = polar*fphi + polar_ave = polar#*fphi #------------------ - sigma_sampled = np.zeros((1,n_phi,2)) temp_coeff = np.zeros((1,n_phi)) temp_coeff[0,:] = 1/n_phi suppression = np.zeros((1,n_phi,1)) + sigma_sampled = np.zeros((1,n_phi,2)) for p in range(n_phi): - sigma_sampled[0,p] += kappa_bulk/mfp_bulk * polar_ave[p]/n_phi*2 + sigma_sampled[0,p] = kappa_bulk/mfp_bulk * polar_ave[p]/n_phi*2 VMFP = np.einsum('m,qi->mqi',mfp_bulk,polar_ave) + kappa_sampled = np.einsum('mpi,mpj->ij',VMFP,sigma_sampled) tc = temp_coeff/np.sum(temp_coeff) @@ -39,6 +40,9 @@ def gray2D(options_material)->'material': data['sigma'] = sigma_sampled data['kappa'] = kappa_sampled data['tc'] = tc + if options_material.setdefault('boundary_conductance',False): + data['boundary_conductance'] =np.pi*kappa_bulk/mfp_bulk + data['r_bulk'] = mfp_bulk data['F'] = VMFP data['VMFP'] = polar_ave diff --git a/openbte/gray2DSym.py b/openbte/gray2DSym.py deleted file mode 100644 index 42aca5b8..00000000 --- a/openbte/gray2DSym.py +++ /dev/null @@ -1,109 +0,0 @@ -import sys -import numpy as np -from .utils import * - - -def gray2DSym(**argv): - - #Import data---- - #kappa_bulk = argv['kappa'] - #kappa = argv['kappa']*np.eye(2) - #mfp_bulk = argv['mfp'] - #n_phi = int(argv.setdefault('n_phi',48)) - #n_mfp = int(argv.setdefault('n_mfp',50)) - #n_theta = int(argv.setdefault('n_theta',48)) - #Dphi = 2*np.pi/n_phi - #phi = np.linspace(Dphi/2.0,2.0*np.pi-Dphi/2.0,n_phi,endpoint=True) - #polar = np.array([np.sin(phi),np.cos(phi)]).T - #Dtheta = np.pi/n_theta - #theta = np.linspace(Dtheta/2.0,np.pi - Dtheta/2.0,n_theta) - #dtheta = 2.0*np.sin(Dtheta/2.0)*np.sin(theta) - #domega = np.outer(dtheta,Dphi * np.ones(n_phi)) - #ftheta = (1-np.cos(2*theta)*np.sinc(Dtheta/np.pi))/(np.sinc(Dtheta/2/np.pi)*(1-np.cos(2*theta))) - #fphi = np.sinc(Dphi/2.0/np.pi) - #polar_ave = polar*fphi - #azimuthal_ave = np.array([ftheta*np.sin(theta),ftheta*np.sin(theta),np.cos(theta)]).T - #direction_ave = np.einsum('tj,pj->tpj',azimuthal_ave,polar_ave) - #direction_int = np.einsum('ijl,ij->ijl',direction_ave,domega) - #------------------------------------------------------ - - kappa_bulk = argv['kappa'] - kappa = argv['kappa']*np.eye(3) - mfp_bulk = argv['mfp'] - #kappa_bulk = np.eye(3) - n_phi = int(argv.setdefault('n_phi',48)) - n_mfp = int(argv.setdefault('n_mfp',50)) #this is K - n_theta = int(argv.setdefault('n_theta',48)) - nm = n_phi * n_mfp - #Create sampled MFPs - min_mfp = 1e-10 #m - max_mfp = 1e-3 #m - mfp = np.logspace(np.log10(min_mfp),np.log10(max_mfp),n_mfp) - n_mfp = len(mfp) - #Polar Angle--------- - Dphi = 2*np.pi/n_phi - phi = np.linspace(0,2.0*np.pi,n_phi,endpoint=False) - #Azimuthal Angle------------------------------ - Dtheta = np.pi/n_theta - theta = np.linspace(Dtheta/2.0,np.pi - Dtheta/2.0,n_theta) - dtheta = 2.0*np.sin(Dtheta/2.0)*np.sin(theta) - domega = np.outer(dtheta,Dphi * np.ones(n_phi)) - - #Compute directions--- - polar = np.array([np.sin(phi),np.cos(phi),np.ones(n_phi)]).T - azimuthal = np.array([np.sin(theta),np.sin(theta),np.cos(theta)]).T - direction = np.einsum('tj,pj->tpj',azimuthal,polar) - - #Compute average--- - ftheta = (1-np.cos(2*theta)*np.sinc(Dtheta/np.pi))/(np.sinc(Dtheta/2/np.pi)*(1-np.cos(2*theta))) - fphi = np.sinc(Dphi/2.0/np.pi) - polar_ave = polar*fphi - azimuthal_ave = np.array([ftheta*np.sin(theta),ftheta*np.sin(theta),np.cos(theta)]).T - direction_ave = np.einsum('tj,pj->tpj',azimuthal_ave,polar_ave) - direction_int = np.einsum('ijl,ij->ijl',direction_ave,domega) - - #----------------- - - #Gray 2D Sym-------- - n_tot = n_phi*n_theta - kappa_directional = np.zeros((1,n_tot,3)) - temp_coeff = np.zeros((1,n_tot)) - dirr = np.zeros((n_tot,3)) - for p in range(n_phi): - for t in range(n_theta): - index = n_phi * t + p - kappa_directional[0,index] += kappa_bulk/mfp_bulk * direction_ave[t,p]/(4.0*np.pi)*3 - temp_coeff[0,index] = domega[t,p] - #----------------- - - #Gray 2D-------- - #n_tot = n_phi - #kappa_directional = np.zeros((1,n_tot,3)) - #temp_coeff = np.zeros((1,n_tot)) - #dirr = np.zeros((n_tot,3)) - #for p in range(n_tot): - # index = p - # kappa_directional[0,p] += kappa_bulk/mfp_bulk * polar_ave[p]*Dphi/(2*np.pi)*2 - # dirr[index] = polar_ave[p] - # temp_coeff[0,index] = Dphi - #----------------- - - - - - tc = temp_coeff/np.sum(temp_coeff) - - #Final---- - return {'tc':tc,\ - 'sigma':kappa_directional[:,:,:2],\ - 'kappa':kappa,\ - 'mfp_average':np.array([mfp_bulk]),\ - 'VMFP':dirr[:,:2],\ - 'mfp_sampled':np.array([mfp_bulk]),\ - 'model':np.array([1]),\ - 'suppression':np.zeros(1),\ - 'phi': phi ,\ - 'directions': polar ,\ - 'kappam':np.array([kappa_bulk])} - - diff --git a/openbte/gui.py b/openbte/gui.py deleted file mode 100644 index 9ae2582e..00000000 --- a/openbte/gui.py +++ /dev/null @@ -1,7 +0,0 @@ -from openbte import Plot - -def main(): - Plot(model='maps',repeat=[2,2,1]) - - - diff --git a/openbte/helpers.py b/openbte/helpers.py index 55d5b262..da4e819f 100644 --- a/openbte/helpers.py +++ b/openbte/helpers.py @@ -36,6 +36,30 @@ def honeycomb_lattice(options_honeycomb_lattice)->'options_geometry': return {'lx':lx,'ly':ly,'porosity':phi,'step':options_honeycomb_lattice['step'],\ 'lz':options_honeycomb_lattice['lz'],'shape':'circle','base':[[0.0,1.0/3.0],[0.0,-1.0/3.0],[-0.5,-1.0/6.0],[-0.5,1.0/6.0]],'direction':options_honeycomb_lattice.setdefault('direction','x')} +def pore_grid(options_pore_grid)->'options_geometry': + + import numpy as np + + #Import grid--- + grid = np.array(options_pore_grid['grid']) + N = int(np.sqrt(len(grid))) + grid = grid.reshape((N,N)).nonzero() + base = np.zeros((len(grid[0]),2)) + + for n,(x,y) in enumerate(zip(grid[0],grid[1])): + base[n] = [(y+1)/N-1/2/N-0.5,1-((x+1)/N-1/2/N-0.5)] + + phi = len(base)/N/N*options_pore_grid['porosity'] + + l = options_pore_grid['L'] + + if len(base) > 0: + options_pore_grid.update({'model':'lattice','lx':l,'ly':l,'porosity':phi,'step':options_pore_grid['step']*l,'shape':options_pore_grid.setdefault('shape','square'),'base':base}) + else: #Bulk + options_pore_grid.update({'model':'bulk','lx':l,'ly':l,'step':options_pore_grid['step']*l}) + + return options_pore_grid + def square_lattice(options_square_lattice)->'options_geometry': @@ -51,8 +75,10 @@ def square_lattice(options_square_lattice)->'options_geometry': shape = options_square_lattice.setdefault('shape','circle') - return {'lx':lx,'ly':ly,'porosity':porosity,'step':options_square_lattice['step'],\ - 'lz':options_square_lattice.setdefault('lz',0),'shape':shape,'base':[[0.0,0.0]],'direction':options_square_lattice.setdefault('direction','x')} + options_square_lattice.update({'lx':lx,'ly':ly,'porosity':porosity,'step':options_square_lattice['step'],\ + 'lz':options_square_lattice.setdefault('lz',0),'shape':shape,'base':[[0.0,0.0]],'direction':options_square_lattice.setdefault('direction','x')}) + + return options_square_lattice diff --git a/openbte/kappa_mode.py b/openbte/kappa_mode.py index 7b5cac3a..33e65990 100644 --- a/openbte/kappa_mode.py +++ b/openbte/kappa_mode.py @@ -1,6 +1,8 @@ def plot_kappa_mode(kappa_mode): + from mpi4py import MPI + comm = MPI.COMM_WORLD if comm.rank == 0: from pubeasy import MakeFigure @@ -18,7 +20,8 @@ def kappa_mode_2DSym(material,solver)->'kappa_mode': import openbte.utils as utils from mpi4py import MPI comm = MPI.COMM_WORLD - data = None + data = None + if comm.rank == 0: mfp_bulk = material['mfp_bulk'] r_bulk = material['r_bulk'] @@ -41,10 +44,11 @@ def kappa_mode_2DSym(material,solver)->'kappa_mode': np.add.at(mfp_nano,np.arange(n_bulk),a2*b1*mfp_nano_sampled[m2,p1]) np.add.at(mfp_nano,np.arange(n_bulk),a2*b2*mfp_nano_sampled[m2,p2]) + kappa_nano = sigma_bulk[:,0]*mfp_nano data = {'kappa_nano':kappa_nano,'mfp_nano':mfp_nano,'kappa_bulk':kappa_bulk,'mfp_bulk':mfp_bulk,'f':material['f']} - + return utils.create_shared_memory_dict(data) diff --git a/openbte/line_data.py b/openbte/line_data.py index 7c538869..c0fb1c3c 100644 --- a/openbte/line_data.py +++ b/openbte/line_data.py @@ -31,6 +31,19 @@ def plot_line_data(line_data): fig.finalize(grid=True,show=True,xlim=[minl,maxl]) +def interpolate_data(solver,geometry,variable,points)->'interpolated_data': + + from matplotlib.tri import LinearTriInterpolator as TriInterpolator + from matplotlib.tri import Triangulation as triangulation + + data_master = utils.extract_variables(solver,geometry) + data_master = utils.expand_variables(data_master,geometry) + utils.get_node_data(data_master,geometry) + data = data_master[variable]['data'] + + tri = triangulation(geometry['nodes'][:,0],geometry['nodes'][:,1],geometry['elems']) + + return np.array(TriInterpolator(tri,data)(points[:,0],points[:,1]) ) def compute_line_data(solver,geometry,compute_line_data_options)->'line_data': @@ -54,7 +67,7 @@ def compute_line_data(solver,geometry,compute_line_data_options)->'line_data': p2 = np.array([geometry['size'][0]/2-delta,y]) #-------------------- - data_master = utils.extract_variables(solver) + data_master = utils.extract_variables(solver,geometry) data_master = utils.expand_variables(data_master,geometry) if dof=='nodes': diff --git a/openbte/material.py b/openbte/material.py index 3f01422d..7737f36b 100755 --- a/openbte/material.py +++ b/openbte/material.py @@ -2,25 +2,23 @@ import os import math from .gray2D import * -from .gray2DSym import * from .rta2DSym import * from .rta3D import * from mpi4py import MPI import shutil +import openbte.utils as utils comm = MPI.COMM_WORLD def database(options_database)->'rta': - import openbte.utils as utils data = None + + database_material = options_database['filename'] + prefix = options_database.setdefault('prefix',os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/openbte/materials') - if comm.rank == 0: - database_material = options_database['filename'] - prefix = options_database.setdefault('prefix',os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/openbte/materials') - - filename = prefix + '/' + database_material + filename = prefix + '/' + database_material - data = utils.load_data(filename) + data = utils.load(filename) return utils.create_shared_memory_dict(data) @@ -36,10 +34,11 @@ def Material(**argv): #set up database if not model == 'gray': source = argv.setdefault('source','database') + if source == 'database': rta = database({'filename':argv['filename']}) else : - rta = load_data(argv['filename']) + rta = load(argv['filename']) if model == 'rta2DSym': data = rta2DSym(rta,argv) @@ -68,7 +67,7 @@ def Material(**argv): if argv.setdefault('save',True): if comm.rank == 0: - utils.save_data(argv.setdefault('output_filename','material'),data) + utils.save(argv.setdefault('output_filename','material'),data) return data diff --git a/openbte/materials/rta_Si_300.npz b/openbte/materials/rta_Si_300.npz index 2277b491..7dbe182a 100644 Binary files a/openbte/materials/rta_Si_300.npz and b/openbte/materials/rta_Si_300.npz differ diff --git a/openbte/mesh.py b/openbte/mesh.py index b71a9f99..cd6115e9 100644 --- a/openbte/mesh.py +++ b/openbte/mesh.py @@ -20,22 +20,32 @@ comm = MPI.COMM_WORLD def compute_boundary_condition_data(data,**argv): - side_elem_map = data['side_elem_map_vec'] face_normals = data['face_normals'] volumes = data['volumes'] dim = data['dim'] + #Apply gradient + if argv.setdefault('apply_gradient',True): + DeltaT = 1 + else: + DeltaT = 1e-18 + #-------------------- + + direction = data['direction'] + + gradir = 0 + applied_grad = [0,0,0] if direction == 0: gradir = 0 - applied_grad = [1,0,0] + applied_grad = [DeltaT,0,0] if direction == 1: gradir = 1 - applied_grad = [0,1,0] + applied_grad = [0,DeltaT,0] if direction == 2: - applied_grad = [0,0,1] + applied_grad = [0,0,DeltaT] gradir = 2 if gradir == 0: @@ -49,6 +59,7 @@ def compute_boundary_condition_data(data,**argv): if gradir == 2: flux_dir = [0,0,1] length = data['size'][2] + #---------------- delta = 1e-2 nsides = len(data['sides']) @@ -56,13 +67,12 @@ def compute_boundary_condition_data(data,**argv): tmp = list(data['periodic_sides']) + list(data['inactive_sides']) - DeltaT = 1 + for kl,ll in enumerate(tmp) : Ee1,e2 = side_elem_map[ll] normal = face_normals[ll] tmp = np.dot(normal,flux_dir) - if tmp < - delta : side_value[ll] = -DeltaT if tmp > delta : @@ -104,7 +114,6 @@ def compute_boundary_condition_data(data,**argv): B[i,j] = side_value[side[0]] B[j,i] = side_value[side[1]] - if abs(side_value[side[0]]) > 0: pp += ((data['ij'].index([i,j]),side_value[side[0]]),) @@ -123,42 +132,71 @@ def compute_boundary_condition_data(data,**argv): #Get flux sides for thermal conductivity calculations-- flux_sides = [] #where flux goes total_area = 0 - area_flux = -1 + #area_flux = -1 + + #Compute area flux-- + if dim == 2 and direction == 0: + area_flux = data['size'][1] + elif dim == 3 and direction == 0: + area_flux = data['size'][1]*data['size'][2] + elif dim == 2 and direction == 1: + area_flux = data['size'][0] + elif dim == 3 and direction == 1: + area_flux = data['size'][0]*data['size'][2] + elif direction == 2: + area_flux = data['size'][0]*data['size'][1] + elif direction == -1: + area_flux = 0 + else: + raise Exception("Can't recongnize combination of dimension and direction of the applied gradient",dim,direction) + #========================== + #From Periodic sides - for ll in data['periodic_sides']: + kappa_mask_thermalizing = np.zeros(len(data['elems'])) + for ll in list(data['periodic_sides']) + list(data['fixed_temperature_sides']): e1,e2 = side_elem_map[ll] normal = data['face_normals'][ll] - tmp = np.abs(np.dot(normal,flux_dir)) - if tmp > delta : #either negative or positive - if abs(normal[0]) == 1: - if dim == 2: - area_flux = data['size'][1] - else: - area_flux = data['size'][1]*data['size'][2] - total_area += data['areas'][ll] - - elif abs(normal[1]) == 1: - if data['dim'] == 2: - area_flux = data['size'][0] - else: - area_flux = data['size'][0]*data['size'][2] - total_area += data['areas'][ll] - - else : #along z - area_flux = data['size'][0]*data['size'][1] - total_area += data['areas'][ll] - + tmp = np.dot(normal,flux_dir) + if tmp == 1: flux_sides.append(ll) + total_area += data['areas'][ll] + #if mode == 'isothermal': + # kappa_mask_thermalizing[e1] = data['areas'][ll] + + #if abs(tmp) > delta : #either negative or positive + # if (mode =='isothermal' and (ll in data['hot_sides'])) or mode =='periodic': #If isothermal we only consider cold contact + # if abs(normal[0]) == 1: + # if dim == 2: + # area_flux = data['size'][1] + # else: + # area_flux = data['size'][1]*data['size'][2] + # total_area += data['areas'][ll] + + # elif abs(normal[1]) == 1: + # if data['dim'] == 2: + # area_flux = data['size'][0] + # else: + # area_flux = data['size'][0]*data['size'][2] + # total_area += data['areas'][ll] + + # else : #along z + # area_flux = data['size'][0]*data['size'][1] + # total_area += data['areas'][ll] + + # flux_sides.append(ll) + # if mode == 'isothermal': + # kappa_mask_thermalizing[e1] = data['areas'][ll] + + #------- - - #-------THIS + if argv.setdefault('contact_area','box') == 'box': - kappa_factor = data['size'][gradir]/area_flux + kappa_factor = data['size'][gradir]/area_flux if area_flux > 0 else 0 else: - kappa_factor = data['size'][gradir]/total_area + kappa_factor = data['size'][gradir]/total_area if total_area > 0 else 0 - data['kappa_mask']= -np.array(np.sum(B_with_area_old.todense(),axis=0))[0]*kappa_factor*1e-18 + data['kappa_mask']= (-np.array(np.sum(B_with_area_old,axis=0))[0]-kappa_mask_thermalizing)*kappa_factor*1e-18 data['periodic_side_values'] = np.array([periodic_side_values[ll] for ll in data['periodic_sides']]) data['pp'] = np.array(pp) data['applied_gradient'] = np.array(applied_grad) @@ -168,20 +206,26 @@ def compute_boundary_condition_data(data,**argv): def compute_dists(data): - dists_side = np.zeros((len(data['sides']),3)) side_elem_map = data['side_elem_map_vec'] centroids = data['centroids'] for ll in data['active_sides']: - if not (ll in (list(data['boundary_sides']) + list(data['fixed_sides']))): - elem_1 = side_elem_map[ll][0] - elem_2 = side_elem_map[ll][1] - c1 = centroids[elem_1] + elem_1 = side_elem_map[ll][0] + elem_2 = side_elem_map[ll][1] + c1 = centroids[elem_1] + #if not (ll in (list(data['boundary_sides']) + list(data['fixed_sides']) + list(data['cold_sides']) + list(data['hot_sides']))): + if not (ll in (list(data['boundary_sides']) + list(data['fixed_temperature_sides']))): c2 = get_next_elem_centroid(elem_1,ll,data) dist = c2 - c1 - dists_side[ll] = dist + else: + c2 = data['side_centroids'][ll] + #dist = data['face_normals'][ll]*(np.dot(data['face_normals'][ll],c2-c1)) + dist = (c2-c1) + + dists_side[ll] = dist + data['dists'] = np.array(dists_side) @@ -192,7 +236,8 @@ def compute_interpolation_weigths(data): #from here: http://geomalgorithms.com/a05-_intersect-1.html - net_sides=data['active_sides'][~np.isin(data['active_sides'],list(data['boundary_sides'])+list(data['fixed_sides']))] #Get only the nonboundary sides + #net_sides=data['active_sides'][~np.isin(data['active_sides'],list(data['boundary_sides'])+list(data['fixed_sides']) + list(data['hot_sides']) + list(data['cold_sides']) )] #Get only the nonboundary sides + net_sides=data['active_sides'][~np.isin(data['active_sides'],list(data['boundary_sides'])+list(data['fixed_temperature_sides']))] #Get only the nonboundary sides e1 = data['side_elem_map_vec'][net_sides,0] @@ -206,8 +251,6 @@ def compute_interpolation_weigths(data): interp_weigths = np.zeros(len(data['sides'])) interp_weigths[net_sides] = 1+tmp2[net_sides]/tmp[net_sides] #this is the relative distance with the centroid of the second element - - if len(data['boundary_sides']) > 0: interp_weigths[data['boundary_sides']] = 1 @@ -223,13 +266,12 @@ def compute_connecting_matrix(data): side_areas = data['areas'] dim = data['dim'] face_normals = data['face_normals'] - a_sides = data['active_sides'] - b_sides = data['boundary_sides'] - c_sides = data['interface_sides'] + a_sides = data['active_sides'] + b_sides = data['boundary_sides'] + c_sides = data['fixed_temperature_sides'] #Active sides----- - #net_sides=a_sides[~np.isin(a_sides,list(b_sides)+list(c_sides))] - net_sides=a_sides[~np.isin(a_sides,list(b_sides))] + net_sides=a_sides[~np.isin(a_sides,list(b_sides)+list(c_sides))] i2 = side_elem_map[net_sides].flatten() j2 = np.flip(side_elem_map[net_sides],1).flatten() common = np.einsum('u,ui->ui',side_areas[net_sides],face_normals[net_sides,:dim]) @@ -238,42 +280,31 @@ def compute_connecting_matrix(data): k2 = np.zeros((2*t1.shape[0],dim)) k2[np.arange(t1.shape[0])*2] = t1 k2[np.arange(t1.shape[0])*2+1] = -t2 - k2 = k2.T - #---------------- + k2 = k2.T + #Compute simple connectivity matrix-- + normals = np.zeros((2*t1.shape[0],dim)) + normals[np.arange(t1.shape[0])*2] = face_normals[net_sides,:dim] + normals[np.arange(t1.shape[0])*2+1] = -face_normals[net_sides,:dim] + data['normals'] = normals + #------------------------------------ #ij - it will need to be removed ij2 = np.zeros((2*t1.shape[0],2)) ij2[np.arange(t1.shape[0])*2 ] = side_elem_map[net_sides] ij2[np.arange(t1.shape[0])*2+1] = np.flip(side_elem_map[net_sides],1) - #------------------------------------- - - #Interface sides---- - #if len(c_sides) >0 : - # inti2 = side_elem_map[c_sides].flatten() - # intj2 = np.flip(side_elem_map[c_sides],1).flatten() - # tmp = np.flip(side_elem_map[c_sides],1).flatten() - # common = np.einsum('u,ui->ui',side_areas[c_sides],face_normals[c_sides,:dim]) - # t1 = common/elem_volumes[side_elem_map[c_sides][:,0],None] - # t2 = common/elem_volumes[side_elem_map[c_sides][:,1],None] - # intk2 = np.zeros((2*t1.shape[0],dim)) - # intk2[np.arange(t1.shape[0])*2] = t1 - # intk2[np.arange(t1.shape[0])*2+1] = -t2 - # intk2 = intk2.T - #else: - inti2 = [] - intj2 = [] - intk2 = [] - - #boundary sides---- + #Boundary sides---- sb2 = a_sides[np.isin(a_sides,b_sides)] eb2 = side_elem_map[sb2][:,0] db2 = face_normals[sb2,:dim].T*side_areas[sb2]/elem_volumes[eb2] #------------- - - data['intk'] = np.asarray(intk2,dtype=np.int64); - data['inti'] = np.asarray(inti2,dtype=np.int64); - data['intj'] = np.asarray(intj2,dtype=np.int64); + + #Isothermal sides---- + sfixed = a_sides[np.isin(a_sides,c_sides)] + efixed = side_elem_map[sfixed][:,0] + dfixed = face_normals[sfixed,:dim].T*side_areas[sfixed]/elem_volumes[efixed] + #------------------ + data['i'] = np.array(i2); data['j'] = np.array(j2); data['k'] = np.array(k2); @@ -281,6 +312,9 @@ def compute_connecting_matrix(data): data['sb'] = sb2; data['db'] = db2; data['ij'] = ij2.tolist(); + data['efixed'] = efixed; + data['sfixed'] = sfixed; + data['dfixed'] = dfixed; def get_next_elem_centroid(elem,side,data): @@ -323,19 +357,18 @@ def compute_least_square_weigths(data): dist = dists[ll] kc1 = elems[0] ind1 = list(elem_side_map[kc1]).index(ll) - if not ll in data['boundary_sides'] : + #if not ll in (list(data['boundary_sides'])+list(data['cold_sides']) + list(data['hot_sides'])): + if not ll in (list(data['boundary_sides']) + list(data['fixed_temperature_sides'])): kc2 = elems[1] ind2 = list(elem_side_map[kc2]).index(ll) diff_dist[kc1,ind1] = dist[:dim] diff_dist[kc2,ind2] = -dist[:dim] - else : - dist = side_centroids[ll] - elem_centroids[kc1] - diff_dist[kc1,ind1] = dist[:dim] + #else : + # diff_dist[kc1,ind1] = dist[:dim] #We solve the pinv for a stack of matrices. We do so for each element group index3 = np.where(data['side_per_elem'] == 3)[0] G3 = np.linalg.pinv(diff_dist[index3,:3]) - index4 = np.where(data['side_per_elem'] == 4)[0] G4 = np.linalg.pinv(diff_dist[index4,:4]) @@ -356,7 +389,6 @@ def compute_boundary_connection(data): side_elem_map = data['side_elem_map_vec'] face_normals = data['face_normals'] - bconn = [] for s in data['boundary_sides']: ce = elem_centroids[side_elem_map[s][0]] @@ -371,7 +403,6 @@ def compute_boundary_connection(data): def compute_face_normals(data): - sides = data['sides'] nodes = data['nodes'] dim = data['dim'] @@ -389,7 +420,7 @@ def compute_face_normals(data): v = np.cross(v1,v2) normal = v.T/np.linalg.norm(v,axis=1) - + c = side_centroids - elem_centroids[side_elem_map[:,0]] index = np.where(np.einsum('iu,ui->u',normal,c) < 0)[0] normal[:,index] = -normal[:,index] @@ -401,8 +432,10 @@ def compute_face_normals(data): def import_mesh(**argv): + + heat_source = argv.setdefault('heat_source',[]) data = {} - + elem_region_map = {} region_elem_map = {} @@ -437,6 +470,14 @@ def import_mesh(**argv): elems = [list(np.array(lines[current_line + n][5:],dtype=int)-1) for n in bulk_tags] side_per_elem = np.array([len(e) for e in elems]) n_elems = len(elems) + + #Bulk physical regions-- + physical_regions = {} + for n in bulk_tags: + tag = int(lines[current_line + n][3]) + physical_regions.setdefault(blabels[tag],[]).append(n-len(face_tags)) + #----------------------- + boundary_sides = np.array([ sorted(np.array(lines[current_line + n][5:],dtype=int)) for n in face_tags] ) -1 #generate sides and maps--- @@ -460,10 +501,21 @@ def import_mesh(**argv): side_elem_map = { i:[] for i in range(len(sides))} for key, value in elem_side_map.items(): - for v in value: - side_elem_map[v].append(key) + for v in value: + side_elem_map[v].append(key) + #---------------------------------------------------- + #(TO BE IMPROVED). + face_physical_regions = {} + sides_list = sides.tolist() + boundary_sides_list = boundary_sides.tolist() + for n in face_tags: + tag = int(lines[current_line + n][3]) + face_physical_regions.setdefault(blabels[tag],[]).append(sides_list.index(boundary_sides_list[n])) + #--------------------- + + #Build relevant data side_areas = compute_side_areas(nodes,sides,dim) @@ -481,6 +533,22 @@ def import_mesh(**argv): data.update({'side_per_elem':np.array(side_per_elem)}) + #Compute generation rate------------- + generation = np.zeros(n_elems) + for key,value in physical_regions.items(): + if len(key) > 10: + if key[:10] == 'GENERATION': + v = float(key[11:]) + generation[value]=float(key[11:]) + #generation = np.zeros(n_elems) + #kk = 0 + #for g in heat_source: + # if not g == None: + # generation[physical_regions['GENERATION_' + str(kk)]]=g + # kk +=1 + data['generation'] = generation + #------------------------------------ + #match the boundary sides with the global side. physical_boundary = {} for n,bs in enumerate(boundary_sides): #match with the boundary side @@ -500,6 +568,8 @@ def import_mesh(**argv): #self.pairs = [] #global (all periodic pairs) side_list.setdefault('Boundary',[]) + side_list.setdefault('Cold',[]) + side_list.setdefault('Hot',[]) side_list.setdefault('Fixed',[]) side_list.setdefault('Interface',[]) side_list.setdefault('Periodic',[]) @@ -565,16 +635,29 @@ def import_mesh(**argv): [side_list['Periodic'].append(i) for i in physical_boundary[contact_1]] [side_list['Inactive'].append(i) for i in physical_boundary[contact_2]] - + + #Consolidate fixed-temperature boundaries + fixed_temperature_sides = [] + fixed_temperature = [] + for key,value in physical_boundary.items(): + if key[:3] == 'ISO': + fixed_temperature_sides += value + fixed_temperature += [float(key[4:])]*len(value) + #-------------------------------- + if 'Boundary' in physical_boundary.keys(): side_list.update({'Boundary':physical_boundary['Boundary']}) - if 'Fixed' in physical_boundary.keys(): side_list.update({'Fixed':physical_boundary['Fixed']}) - if 'Interface' in physical_boundary.keys(): side_list.update({'Interface':physical_boundary['Interface']}) + side_list['Fixed_temperature'] = fixed_temperature_sides - for side in side_list['Boundary'] :# + self.side_list['Hot'] + self.side_list['Cold']: - side_elem_map[side].append(side_elem_map[side][0]) + #if 'Fixed' in physical_boundary.keys(): side_list.update({'Fixed':physical_boundary['Fixed']}) + #if 'Interface' in physical_boundary.keys(): side_list.update({'Interface':physical_boundary['Interface']}) + #if 'Cold' in physical_boundary.keys(): side_list.update({'Cold':physical_boundary['Cold']}) + #if 'Hot' in physical_boundary.keys(): side_list.update({'Hot':physical_boundary['Hot']}) + + #for side in side_list['Boundary'] + side_list['Fixed'] + side_list['Cold'] + side_list['Hot'] : + # side_elem_map[side].append(side_elem_map[side][0]) - for side in side_list['Fixed'] :# + self.side_list['Hot'] + self.side_list['Cold']: + for side in side_list['Boundary'] + side_list['Fixed_temperature'] : side_elem_map[side].append(side_elem_map[side][0]) @@ -600,19 +683,28 @@ def import_mesh(**argv): elem_side_map[n].append(-1) #---------------------------------------- - data.update({'active_sides':np.array(side_list['active'])}) data.update({'n_non_boundary_side_per_elem':np.array(n_non_boundary_side_per_elem)}) data.update({'boundary_sides':np.array(side_list['Boundary'])}) + data.update({'cold_sides':np.array(side_list['Cold'])}) + data.update({'hot_sides':np.array(side_list['Hot'])}) data.update({'fixed_sides':np.array(side_list['Fixed'])}) data.update({'periodic_sides':np.array(side_list['Periodic'])}) data.update({'side_periodicity':side_periodicity}) data.update({'areas':np.array(side_areas)}) + data.update({'fixed_temperature':np.array(fixed_temperature)}) + data.update({'fixed_temperature_sides':np.array(fixed_temperature_sides)}) + + #------- + #data.update({'physical_regions':physical_regions}) + #data.update({'face_physical_regions':face_physical_regions}) data.update({'elem_mat_map':np.zeros(len(elems))}) data.update({'dim':dim}) if 'dmin' in argv.keys(): data.update({'dmin':argv['dmin']}) + #if 'overlap' in argv.keys(): + # data.update({'overlap':argv['overlap']}) data.update({'pairs':np.array(pairs)}) data.update({'elem_side_map_vec': np.array([elem_side_map[ll] for ll in range(len(elems))]) }) data.update({'side_elem_map_vec': np.array([side_elem_map[ll] for ll in range(len(sides))]) }) @@ -623,7 +715,7 @@ def import_mesh(**argv): elif direction == 'y': data['direction'] = 1 else: - data['direction'] = 2 + data['direction'] = -1 data['size'] = np.array(size) data['inactive_sides'] = np.array(side_list['Inactive']) @@ -649,6 +741,8 @@ def compute_data(data,**argv): compute_boundary_condition_data(data,**argv) #Some adjustement-- + data.setdefault('dmin',0) + #data.setdefault('overlap',False) data['meta'] = np.asarray([len(data['elems']),data['kappa_factor'],data['dim'],len(data['nodes']),len(data['active_sides']),data['direction'],data['dmin']],np.float64) del data['direction'] diff --git a/openbte/mesher.py b/openbte/mesher.py index 1696f631..d18aad6c 100644 --- a/openbte/mesher.py +++ b/openbte/mesher.py @@ -10,14 +10,57 @@ import os import time +def generate_disk(argv): + + strc =''' + +SetFactory("OpenCASCADE"); + +Mesh.CharacteristicLengthMin = STEP; +Mesh.CharacteristicLengthMax = STEP; + +Circle(1) = {0,0,0, R1}; +Circle(2) = {0,0,0, R2}; + +Line Loop(1) = {1}; +Line Loop(2) = {2}; + +Plane Surface(1) = {1}; +Plane Surface(2) = {1,2}; + + +Physical Surface('GENERATION_Q') = {1}; +Physical Surface('Bulk') = {2}; +Physical Line('ISO_0.0') = {2}; + +''' + + + strc = strc.replace('STEP',str(argv['step'])) + strc = strc.replace('R1',str(argv['Rh'])) + strc = strc.replace('R2',str(argv['R'])) + strc = strc.replace('Q',str(argv['heat_source'])) + + store = open('mesh.geo', 'w+') + + store.write(strc) + + store.close() + with open(os.devnull, 'w') as devnull: + output = subprocess.check_output("gmsh -optimize_netgen -format msh2 -2 mesh.geo -o mesh.msh".split(), stderr=devnull) + + + def generate_bulk_2D(argv): direction = argv.setdefault('direction','x') + argv.setdefault('boundary',['Periodic','Periodic','Periodic']) + argv['heat_source']=[] frame = generate_frame(**argv) Lx = float(argv['lx']) - Ly = float(argv['ly']) + Ly = float(argv.setdefault('ly',Lx)) mesh_ext = argv['step'] points = [] @@ -51,27 +94,33 @@ def generate_bulk_2D(argv): strc = r'''Physical Surface('Matrix') = {10};'''+'\n' store.write(strc) + bs = [] - if argv.setdefault('Periodic',[True,True,True])[1] : + if argv['boundary'][1] == 'Periodic': strc = r'''Physical Line('Periodic_1') = {1};''' + '\n' store.write(strc) strc = r'''Physical Line('Periodic_2') = {3};''' + '\n' store.write(strc) + strc = 'Periodic Line{1}={-3};\n' + store.write(strc) else: if direction=='y': - strc = r'''Physical Line('Cold') = {2};''' + '\n' + strc = r'''Physical Line('Cold') = {3};''' + '\n' store.write(strc) - strc = r'''Physical Line('Hot') = {4};''' +'\n' + strc = r'''Physical Line('Hot') = {1};''' +'\n' store.write(strc) else: bs.append(1) bs.append(3) + - if argv.setdefault('Periodic',[True,True,True])[0] : + if argv['boundary'][0] =='Periodic': strc = r'''Physical Line('Periodic_3') = {2};''' + '\n' store.write(strc) strc = r'''Physical Line('Periodic_4') = {4};''' +'\n' store.write(strc) + strc = 'Periodic Line{2}={-4};\n' + store.write(strc) else: if direction=='x': strc = r'''Physical Line('Hot') = {2};''' + '\n' @@ -94,11 +143,6 @@ def generate_bulk_2D(argv): store.write(strc) - strc = 'Periodic Line{1}={-3};\n' - store.write(strc) - strc = 'Periodic Line{2}={-4};\n' - store.write(strc) - if argv.setdefault('structured',False): store.write('Transfinite Surface {10};\n') store.write('Recombine Surface {10};\n') @@ -114,10 +158,11 @@ class Mesher(object): def __init__(self,argv): - argv['dmin'] = 0 + #argv['dmin'] = 0.0 model = argv.setdefault('model','lattice') if model == 'lattice': + argv.setdefault('ly',argv['lx']) #create polygons----- self.add_symmetry(argv) shapes = get_shape(argv) @@ -127,8 +172,10 @@ def __init__(self,argv): polygons = [translate_shape(shapes[n],i,**argv) for n,i in enumerate(argv['base'])] argv.update({'polygons':np.array(polygons,dtype=object)}) + repeat_merge_scale(argv) + elif model == 'custom': repeat_merge_scale(argv) @@ -146,15 +193,17 @@ def __init__(self,argv): self.add_symmetry(argv) self.add_polygons(argv) repeat_merge_scale(argv) + + elif model =='disk': + return generate_disk(argv) #--------------------- #if not argv.setdefault('only_geo',False): if argv.setdefault('lz',0) == 0: - self.generate_mesh_2D(argv) + self.generate_mesh_2D_new(argv) else: self.generate_mesh_3D(argv) - def generate_bulk_3D(self,argv): @@ -336,8 +385,6 @@ def add_symmetry(self,argv): - - def create_loop(self,loops,line_list,store): #create external loop @@ -352,11 +399,256 @@ def create_loop(self,loops,line_list,store): return strc + + def generate_mesh_2D_new(self,argv): + + #Create polygons + #polygons = unary_union([Polygon(poly) for poly in argv['polygons']]) + polygons = [Polygon(poly) for poly in argv['polygons']] + polygons_union = unary_union(polygons) + + #Create frame + frame = Polygon(generate_frame(**argv)) + + #Creat heat source-- + heat_source = argv.setdefault('heat_source',len(argv['polygons'])*[None]) + + + step = argv['step'] + lx = argv['lx'] + ly = argv['ly'] + self.lx = lx + self.ly = ly + + #Init data + points = [] + lines = [] + loops = 1000 + ss = 1 + pore_wall = [] + bulk_surface = [] + #inclusions = [] + #--------- + + #Start File-- + store = open('mesh.geo', 'w+') + store.write('h='+str(step) + ';\n') + store.write('lx='+str(lx) + ';\n') + store.write('ly='+str(ly) + ';\n') + + #polygons = frame.intersection(polygons) + heat_source_surface = [] + heat_source_lines = [] + internal_loops = [] + kg = 0 + for n,polygon in enumerate(polygons): + + polygon_intersected = frame.intersection(polygon) + pp = list(polygon_intersected.exterior.coords)[:-1] + cross = abs(polygon.area - polygon_intersected.area)/polygon.area >1e-4 + + if not heat_source[n] == None: #Heat source + line_list = self.create_line_list(pp,points,lines,store) + heat_source_lines += line_list + loops +=1 + self.create_loop(loops,line_list,store) + self.create_surface_old([loops],ss,store) + heat_source_surface.append(str(ss)) + strc = r'''Physical Surface('GENERATION_''' + str(heat_source[n])+ "') = {" + str(heat_source_surface[kg]) + '''};\n''';store.write(strc) + ss += 1 + kg += 1 + if not cross: + internal_loops.append(loops) + + + if heat_source[n] == None and not cross: #Internal pore + line_list = self.create_line_list(pp,points,lines,store) + loops +=1 + self.create_loop(loops,line_list,store) + internal_loops.append(loops) + + + #---------------------------------------------------- + #Create bulk region-- + diff = frame.difference(polygons_union) + pp = list(diff.exterior.coords)[:-1] + line_list = self.create_line_list(pp,points,lines,store) + loops +=1 + self.create_loop(loops,line_list,store) + self.create_surface_old([loops] + internal_loops,ss,store) + #bulk_surface = heat_source_surface + [ss] + bulk_surface = [ss] + ss += 1 + #------------------------------------------------------ + + strc = r'''Physical Surface('Matrix') = {''' + for n,s in enumerate(bulk_surface): + strc += str(s) + if n == len(bulk_surface)-1: + strc += '};\n' + else: + strc += ',' + store.write(strc) + + left = [] + right = [] + upper = [] + lower = [] + + pul = np.array([-self.lx/2.0,self.ly/2.0]) + pur = np.array([self.lx/2.0,self.ly/2.0]) + pll = np.array([-self.lx/2.0,-self.ly/2.0]) + plr = np.array([self.lx/2.0,-self.ly/2.0]) + + + delta = 1e-12 + pore_wall = [] + for l,line in enumerate(lines): + + pl = (np.array(points[line[0]])+np.array(points[line[1]]))/2.0 + + is_on_boundary = True + if self.compute_line_point_distance(pul,pur,pl) < delta: + upper.append(l+1) + is_on_boundary = False + + if self.compute_line_point_distance(pll,plr,pl) < delta: + lower.append(l+1) + is_on_boundary = False + + if self.compute_line_point_distance(plr,pur,pl) < delta: + left.append(l+1) + is_on_boundary = False + + if self.compute_line_point_distance(pll,pul,pl) < delta: + right.append(l+1) + is_on_boundary = False + + if is_on_boundary and (not l+1 in heat_source_lines): + pore_wall.append(l+1) + + + + #Setting the temperature--- + boundary_bc = argv.setdefault('boundary',['Periodic','Periodic','Periodic']) + direction = argv.setdefault('direction','x') + additional_boundary = [] + boundary_conditions = {} + if direction == 'x': + + #bc along x + if boundary_bc[0] == 'Isothermal': + boundary_conditions['ISO_-0.5'] = left + boundary_conditions['ISO_0.5'] = right + elif boundary_bc[0] == 'Periodic': + boundary_conditions['Periodic_1'] = left + boundary_conditions['Periodic_2'] = right + elif boundary_bc[0] == 'Diffuse': + additional_boundary += left + right + else: + raise Exception('No applied gradient direction recognized') + + + #bc along y + if boundary_bc[1] == 'Isothermal': + boundary_conditions['ISO_0.0'] = lower + boundary_conditions['ISO_0.0'] += upper + elif boundary_bc[1] == 'Periodic': + boundary_conditions['Periodic_3'] = upper + boundary_conditions['Periodic_4'] = lower + elif boundary_bc[1] == 'Diffuse': + additional_boundary += upper + lower + else: + raise Exception('No applied gradient direction recognized') + + + elif direction == 'y': + + #bc along x + if boundary_bc[0] == 'Isothermal': + boundary_conditions['ISO_0.0'] = left + boundary_conditions['ISO_0.0'] = right + elif boundary_bc[0] == 'Periodic': + boundary_conditions['Periodic_1'] = left + boundary_conditions['Periodic_2'] = right + elif boundary_bc[0] == 'Diffuse': + additional_boundary += left + right + else: + raise Exception('No applied gradient direction recognized') + + + #bc along y + if boundary_bc[1] == 'Isothermal': + boundary_conditions['ISO_-0.5'] = lower + boundary_conditions['ISO_0.5'] = upper + elif boundary_bc[1] == 'Periodic': + boundary_conditions['Periodic_3'] = upper + boundary_conditions['Periodic_4'] = lower + elif boundary_bc[1] == 'Diffuse': + additional_boundary += lower + upper + else: + raise Exception('No applied gradient direction recognized') + + elif direction == '-1': + if boundary_bc[0] == 'Isothermal': + boundary_conditions['ISO_0.0'] = left + boundary_conditions['ISO_0.0'] += right + if boundary_bc[1] == 'Isothermal': + boundary_conditions['ISO_0.0'] += lower + boundary_conditions['ISO_0.0'] += upper + + else: + raise Exception('No applied gradient direction recognized') + #------------------------- + + #Write to file + for key, value in boundary_conditions.items(): + store.write('Physical Line("' + key + '") = {' + ','.join([str(s) for s in value]) + '};') + + + + #Thermalizing Boundaries--- + interface_surfaces = [] + boundary_surfaces = pore_wall + additional_boundary + + #Interface surface + if len(boundary_surfaces)> 0: + strc = r'''Physical Line('Boundary') = {''' + for r,region in enumerate(boundary_surfaces) : + strc += str(region) + if r == len(boundary_surfaces)-1: + strc += '};\n' + store.write(strc) + else : + strc += ',' + + + if len(interface_surfaces)> 0: + strc = r'''Physical Line('Interface') = {''' + for r,region in enumerate(interface_surfaces) : + strc += str(region) + if r == len(interface_surfaces)-1: + strc += '};\n' + store.write(strc) + else : + strc += ',' + + +#------------------------------------------------------- + store.close() + + with open(os.devnull, 'w') as devnull: + output = subprocess.check_output("gmsh -format msh2 -2 mesh.geo -o mesh.msh".split(), stderr=devnull) + + def generate_mesh_2D(self,argv): polygons = argv['polygons'] + mesh_ext = argv['step'] - + + heat_source = argv.setdefault('heat_source',len(polygons)*[None]) + #Create the frame frame = generate_frame(**argv) Frame = Polygon(frame) @@ -382,15 +674,13 @@ def generate_mesh_2D(self,argv): delta = 1e-2 #------------------------- - - bulk = Frame.difference(unary_union(polypores)) if not (isinstance(bulk, shapely.geometry.multipolygon.MultiPolygon)): bulk = [bulk] bulk_surface = [] inclusions = [] - + for r,region in enumerate(bulk): pp = list(region.exterior.coords)[:-1] @@ -401,45 +691,37 @@ def generate_mesh_2D(self,argv): self.create_loop(loops,line_list,store) #Create internal loops------------- - a = 0 + iloop = [] + pore_lines = [] for interior in region.interiors: - a +=1 pp = list(interior.coords)[:-1] line_list = self.create_line_list(pp,points,lines,store) + pore_lines.append(line_list) loops +=1 + iloop.append(loops) local_loops.append(loops) self.create_loop(loops,line_list,store) #--------------------------------- ss +=1 bulk_surface.append(ss) self.create_surface_old(local_loops,ss,store) + ss +=1 #Create Inclusion surfaces-------- - if argv.setdefault('inclusion',False): - - for poly in polygons: - thin = Polygon(poly).intersection(Frame) - - #Points------- - pp = list(thin.exterior.coords)[:-1] - line_list = create_line_list(pp,points,lines,store) - loops +=1 - self.create_loop(loops,line_list,store) - ss +=1 - create_surface([loops],ss,store) - inclusions.append(ss) - - strc = r'''Physical Surface('Inclusion') = {''' - for n,s in enumerate(inclusions): - strc += str(s) - if n == len(inclusions)-1: - strc += '};\n' - else: - strc += ',' - store.write(strc) + #if argv.setdefault('inclusion',False): + kg = 0 + pore_wall2 = [] + for ng,g in enumerate(heat_source): + if not g == None: + self.create_surface_old([iloop[argv['corr'][ng]]],ss,store) + strc = r'''Physical Surface('GENERATION_''' + str(kg)+ "') = {" + str(ss) + '''};\n''' + kg += 1 + store.write(strc) + ss +=1 + else: + pore_wall2 += pore_lines[argv['corr'][ng]] #------------------------------- - strc = r'''Physical Surface('Matrix') = {''' for n,s in enumerate(bulk_surface): strc += str(s) @@ -484,10 +766,17 @@ def generate_mesh_2D(self,argv): hot.append(l+1) is_on_boundary = False - if is_on_boundary: pore_wall.append(l+1) + + #Rationale: pore_wall trackes all the surfaces/lines that are NOT on the boundary. + #pore_wall2, on the other side, tracks all the surfaces/lines that are actually pores (as opposed to heat source) + #In some cases, however, + #print(pore_wall) + #print(pore_wall2) + #quit() + additional_boundary = [] argv.setdefault('Periodic',[True,True,True]) if argv['Periodic'][0]: @@ -527,12 +816,13 @@ def generate_mesh_2D(self,argv): additional_boundary.append(k) #Collect Wall - if argv.setdefault('inclusion',False): - interface_surfaces = pore_wall - boundary_surfaces = additional_boundary - else: - boundary_surfaces = pore_wall + additional_boundary - interface_surfaces = [] + #if argv.setdefault('inclusion',False): + # interface_surfaces = pore_wall + # boundary_surfaces = additional_boundary + #else: + + interface_surfaces = [] + boundary_surfaces = pore_wall2 + additional_boundary #Interface surface if len(boundary_surfaces)> 0: @@ -703,13 +993,12 @@ def get_points_from_surface(self,s1): def apply_periodic_mesh(self): - self.argv.setdefault('Periodic',[True,True,False]) #Find periodic surfaces---- self.periodic = {} for s1 in range(len(self.surfaces)): for s2 in range(s1+1,len(self.surfaces)): (a,per) = self.isperiodic(s1,s2) - if not a == None and self.argv['Periodic'][a]: + if not a == None and self.argv['Boundary'][a]=='Periodic': corr = {} pts1,ind1 = self.get_points_from_surface(s1) @@ -962,35 +1251,29 @@ def line_exists_ordered_old(self,l,lines): def create_line_list(self,pp,points,lines,store): - #Eliminate unncessesary points-- - - - #------------------------------ - p_list = [] - for p in pp: + for k,p in enumerate(pp): tmp = self.already_included_old(points,p,p_list) if tmp == -1: points.append(p) p_list.append(len(points)-1) + point = p + store.write( 'Point('+str(len(points)-len(p_list)+k) +') = {' + str(point[0]/self.lx) +'*lx,'+ str(point[1]/self.ly)+'*ly,0,h};\n') else: p_list.append(tmp) - for k,p in enumerate(p_list): - point = points[-len(p_list)+k] - store.write( 'Point('+str(len(points)-len(p_list)+k) +') = {' + str(point[0]/self.lx) +'*lx,'+ str(point[1]/self.ly)+'*ly,0,h};\n') - - + #for k,p in enumerate(p_list): + # point = points[-len(p_list)+k] + # store.write( 'Point('+str(len(points)-len(p_list)+k) +') = {' + str(point[0]/self.lx) +'*lx,'+ str(point[1]/self.ly)+'*ly,0,h};\n') line_list = [] for l in range(len(p_list)): p1 = p_list[l] p2 = p_list[(l+1)%len(p_list)] if not p1 == p2: - #f 1 == 1: tmp = self.line_exists_ordered_old([p1,p2],lines) - if tmp == 0 : #craete line + if tmp == 0 : #create line lines.append([p1,p2]) store.write( 'Line('+str(len(lines)) +') = {' + str(p1) +','+ str(p2)+'};\n') line_list.append(len(lines)) diff --git a/openbte/mfp2D.py b/openbte/mfp2D.py deleted file mode 100644 index 5bfdd0d8..00000000 --- a/openbte/mfp2D.py +++ /dev/null @@ -1,162 +0,0 @@ -import sys -import numpy as np -import scipy -from mpi4py import MPI -import time -import os -from openbte.utils import * - - -comm = MPI.COMM_WORLD - -def get_linear_indexes(mfp,value): - - - n = len(mfp) - - if value < mfp[0]: - aj = (value-mfp[0])/(mfp[1]-mfp[0]) - return 0,1-aj,1,aj - elif value > mfp[-1]: - aj = (value-mfp[n-2])/(mfp[n-1]-mfp[n-2]) - return n-2,1-aj,n-1,aj - else: - for m in range(n-1): - if (value <= mfp[m+1]) and (value >= mfp[m]) : - aj = (value-mfp[m])/(mfp[m+1]-mfp[m]) - return m,1-aj,m+1,aj - - -def mfp2D(**argv): - - #load data - if argv.setdefault('read_from_file',True): - data = load_data('mfp') - Kacc = data['Kacc'] - mfp_bulk = data['mfp'] - else: - mfp_bulk = argv['mfp'] - Kacc = argv['Kacc'] - - #Get the distribution - kappa_bulk = Kacc[-1] - kappa_bulk = np.zeros_like(Kacc) - kappa_bulk[0] = Kacc[0] - for n in range(len(Kacc)-1): - kappa_bulk[n+1] = Kacc[n+1]-Kacc[n] - #------------------------------- - - #pruning-- - I = np.where(mfp_bulk > 0) - kappa_bulk = kappa_bulk[I] - mfp_bulk = mfp_bulk[I] - kappa= np.eye(3) * np.sum(kappa_bulk) - - #Get options---- - n_phi = int(argv.setdefault('n_phi',48)) - - - #Create sampled MFPs - n_mfp_bulk = len(mfp_bulk) - if argv.setdefault('interpolation',True): - n_mfp = int(argv.setdefault('n_mfp',50)) - mfp_sampled = np.logspace(min([-2,np.log10(min(mfp_bulk)*0.99)]),np.log10(max(mfp_bulk)*1.01),n_mfp)#min MFP = 1e-2 nm - else: - mfp_sampled = mfp_bulk - n_mfp = len(mfp_bulk) - - nm = n_phi * n_mfp - #Polar Angle--------- - Dphi = 2*np.pi/n_phi - phi = np.linspace(Dphi/2.0,2.0*np.pi-Dphi/2.0,n_phi,endpoint=True) - #phi = np.linspace(0,2.0*np.pi,n_phi,endpoint=False) - polar = np.array([np.sin(phi),np.cos(phi)]).T - fphi= np.sinc(Dphi/2.0/np.pi) - polar_ave = polar*fphi - #-------------------- - - n_mfp_bulk = len(mfp_bulk) - n_mfp = argv.setdefault('n_mfp',100) - - mfp = np.logspace(np.log10(max([min(mfp_bulk),1e-11])),np.log10(max(mfp_bulk)*1.01),n_mfp) - - n_mfp = len(mfp) - - temp_coeff_p,temp_coeff = np.zeros((2,n_mfp,n_phi)) - kappa_directional_p,kappa_directional = np.zeros((2,n_mfp,n_phi,3)) - #suppression_p,suppression = np.zeros((2,n_mfp,n_phi,n_mfp_bulk)) - - - - kdp = np.zeros((n_mfp,n_phi,2)) - kd = np.zeros((n_mfp,n_phi,2)) - tcp = np.zeros((n_mfp,n_phi)) - tc = np.zeros((n_mfp,n_phi)) - p = np.arange(n_phi) - - g1 = kappa_bulk/mfp_bulk - g2 = kappa_bulk/mfp_bulk/mfp_bulk - - - n_tot = n_mfp_bulk - - block = n_tot//comm.size - rr = range(block*comm.rank,n_tot) if comm.rank == comm.size-1 else range(block*comm.rank,block*(comm.rank+1)) - - for m in rr: - - (m1,a1,m2,a2) = get_linear_indexes(mfp,mfp_bulk[m]) - - tmp = g1[m]*polar_ave - kdp[m1] += a1*tmp - kdp[m2] += a2*tmp - - #Temperature - tmp = g2[m]*Dphi - tcp[m1] += a1*tmp - tcp[m2] += a2*tmp - - #Suppression - #tmp = dirr[t,:,0]/mfp_bulk[m] - #sp[m1,:,m] += a1 * tmp - #sp[m2,:,m] += a2 * tmp - - comm.Allreduce([kdp,MPI.DOUBLE],[kd,MPI.DOUBLE],op=MPI.SUM) - comm.Allreduce([tcp,MPI.DOUBLE],[tc,MPI.DOUBLE],op=MPI.SUM) - #comm.Allreduce([sp,MPI.DOUBLE],[s,MPI.DOUBLE],op=MPI.SUM) - - kd *= 2/n_phi - #s *= 3*2/4.0/np.pi - tc /= np.sum(tc) - - #Test for bulk--- - if comm.rank == 0: - kappa_bulk = np.zeros((2,2)) - for m in range(n_mfp): - for p in range(n_phi): - kappa_bulk += mfp[m]*np.outer(kd[m,p],polar_ave[p,:2]) - - #Compute RHS average for multiscale modeling--- - rhs_average = mfp_sampled*mfp_sampled/2 - #a = np.zeros((3,3)) - #for i in range(len(polar_ave)): - # a += np.outer(polar_ave[i],polar_ave[i])/2/np.pi*2*np.pi/n_phi - #rhs_average = mfp_sampled*mfp_sampled - #rhs_average *= a[0,0] - #------------------------------------------------ - - #Final---- - return {'tc':tc,\ - 'sigma':kd,\ - 'kappa':kappa,\ - 'mfp_average':rhs_average*1e18,\ - 'VMFP':polar_ave[:,:2],\ - 'mfp_sampled':mfp,\ - 'phi':phi,\ - 'directions': polar ,\ - 'sampling': np.array([n_phi,n_mfp]),\ - 'model':np.array([4]),\ - 'suppression':np.zeros(1),\ - 'kappam':kappa_bulk} - - diff --git a/openbte/mfp2DSym.py b/openbte/mfp2DSym.py deleted file mode 100644 index c175bf74..00000000 --- a/openbte/mfp2DSym.py +++ /dev/null @@ -1,173 +0,0 @@ -import sys -import numpy as np -import scipy -from mpi4py import MPI -import time -import os -from openbte.utils import * - - -comm = MPI.COMM_WORLD - -def get_linear_indexes(mfp,value): - - - n = len(mfp) - - if value < mfp[0]: - aj = (value-mfp[0])/(mfp[1]-mfp[0]) - return 0,1-aj,1,aj - elif value > mfp[-1]: - aj = (value-mfp[n-2])/(mfp[n-1]-mfp[n-2]) - return n-2,1-aj,n-1,aj - else: - for m in range(n-1): - if (value <= mfp[m+1]) and (value >= mfp[m]) : - aj = (value-mfp[m])/(mfp[m+1]-mfp[m]) - return m,1-aj,m+1,aj - - - - -def mfp2DSym(**argv): - - #load data - - data = load_data('mfp') - - Kacc = data['Kacc'] - mfp_bulk = data['mfp'] - - kappa_bulk = Kacc[-1] - kappa_bulk = np.zeros_like(Kacc) - kappa_bulk[0] = Kacc[0] - for n in range(len(Kacc)-1): - kappa_bulk[n+1] = Kacc[n+1]-Kacc[n] - - #pruning-- - I = np.where(mfp_bulk > 0) - kappa_bulk = kappa_bulk[I] - mfp_bulk = mfp_bulk[I] - kappa= np.eye(3) * np.sum(kappa_bulk) - - #Get options---- - n_phi = int(argv.setdefault('n_phi',48)) - n_mfp = int(argv.setdefault('n_mfp',50)) - n_theta = int(argv.setdefault('n_theta',24)) - - nm = n_phi * n_mfp - - #Create sampled MFPs - n_mfp_bulk = len(mfp_bulk) - mfp_sampled = np.logspace(min([-2,np.log10(min(mfp_bulk)*0.99)]),np.log10(max(mfp_bulk)*1.01),n_mfp)#min MFP = 1e-2 nm - - #Polar Angle--------- - Dphi = 2*np.pi/n_phi - phi = np.linspace(0,2.0*np.pi,n_phi,endpoint=False) - #-------------------- - - #Azimuthal Angle------------------------------ - Dtheta = np.pi/n_theta/2.0 - theta = np.linspace(Dtheta/2.0,np.pi/2.0 - Dtheta/2.0,n_theta) - dtheta = 2.0*np.sin(Dtheta/2.0)*np.sin(theta) - domega = np.outer(dtheta,Dphi * np.ones(n_phi)) - - #Compute directions--- - polar = np.array([np.sin(phi),np.cos(phi),np.zeros(n_phi)]).T - azimuthal = np.array([np.sin(theta),np.sin(theta),np.zeros(n_theta)]).T - direction = np.einsum('ij,kj->ikj',azimuthal,polar) - - #Compute average--- - ftheta = (1-np.cos(2*theta)*np.sinc(Dtheta/np.pi))/(np.sinc(Dtheta/2/np.pi)*(1-np.cos(2*theta))) - fphi= np.sinc(Dphi/2.0/np.pi) - polar_ave = polar*fphi - azimuthal_ave = np.array([ftheta*np.sin(theta),ftheta*np.sin(theta),np.zeros(n_theta)]).T - direction_ave = np.einsum('ij,kj->ikj',azimuthal_ave,polar_ave) - direction_int = np.einsum('ijl,ij->ijl',direction_ave,domega) - #------------------------------------------------------ - - n_mfp_bulk = len(mfp_bulk) - n_mfp = argv.setdefault('n_mfp',100) - - mfp = np.logspace(np.log10(max([min(mfp_bulk),1e-11])),np.log10(max(mfp_bulk)*1.01),n_mfp) - - n_mfp = len(mfp) - - temp_coeff_p,temp_coeff = np.zeros((2,n_mfp,n_phi)) - kappa_directional_p,kappa_directional = np.zeros((2,n_mfp,n_phi,3)) - #suppression_p,suppression = np.zeros((2,n_mfp,n_phi,n_mfp_bulk)) - - - dirr = np.einsum('tpi,tp->tpi',direction_ave[:,:,:2],domega) - - - kdp = np.zeros((n_mfp,n_phi,2)) - kd = np.zeros((n_mfp,n_phi,2)) - tcp = np.zeros((n_mfp,n_phi)) - tc = np.zeros((n_mfp,n_phi)) - p = np.arange(n_phi) - - - g1 = kappa_bulk/mfp_bulk - g2 = kappa_bulk/mfp_bulk/mfp_bulk - g3 = ftheta*np.sin(theta) - n_tot = n_mfp_bulk - - block = n_tot//comm.size - rr = range(block*comm.rank,n_tot) if comm.rank == comm.size-1 else range(block*comm.rank,block*(comm.rank+1)) - - #change to mfp - for t in range(n_theta): - - for m in rr: - - (m1,a1,m2,a2) = get_linear_indexes(mfp,mfp_bulk[m]*g3[t]) - - tmp = g1[m]*dirr[t] - kdp[m1] += a1*tmp - kdp[m2] += a2*tmp - - #Temperature - tmp = g2[m]*domega[t] - tcp[m1] += a1*tmp - tcp[m2] += a2*tmp - - - comm.Allreduce([kdp,MPI.DOUBLE],[kd,MPI.DOUBLE],op=MPI.SUM) - comm.Allreduce([tcp,MPI.DOUBLE],[tc,MPI.DOUBLE],op=MPI.SUM) - - kd *= 2*3/4.0/np.pi - tc /= np.sum(tc) - - - #Test for bulk--- - - if comm.rank == 0: - kappa_bulk = np.zeros((2,2)) - for m in range(n_mfp): - for p in range(n_phi): - kappa_bulk += mfp[m]*np.outer(kd[m,p],polar_ave[p,:2]) - - - #replicate bulk values--- - - rhs_average = mfp_sampled*mfp_sampled/2 - a = np.zeros((3,3)) - for i in range(len(polar_ave)): - a += np.outer(polar_ave[i],polar_ave[i])/2/np.pi*2*np.pi/n_phi - rhs_average = mfp_sampled*mfp_sampled - rhs_average *= a[0,0] - - #Final---- - return {'tc':tc,\ - 'sigma':kd,\ - 'kappa':kappa,\ - 'mfp_average':rhs_average*1e18,\ - 'VMFP':polar_ave[:,:2],\ - 'mfp_sampled':mfp,\ - 'sampling': np.array([n_phi,n_theta,n_mfp]),\ - 'model':np.array([5]),\ - 'suppression':np.zeros(1),\ - 'kappam':kappa_bulk} - - diff --git a/openbte/mfp3D.py b/openbte/mfp3D.py deleted file mode 100644 index b9b390d3..00000000 --- a/openbte/mfp3D.py +++ /dev/null @@ -1,164 +0,0 @@ -import sys -import numpy as np -import scipy -from .utils import * -from mpi4py import MPI - -comm = MPI.COMM_WORLD - - - -def get_linear_indexes(mfp,value): - - - n = len(mfp) - - if value < mfp[0]: - aj = (value-mfp[0])/(mfp[1]-mfp[0]) - return 0,1-aj,1,aj - elif value > mfp[-1]: - aj = (value-mfp[n-2])/(mfp[n-1]-mfp[n-2]) - return n-2,1-aj,n-1,aj - else: - for m in range(n-1): - if (value <= mfp[m+1]) and (value >= mfp[m]) : - aj = (value-mfp[m])/(mfp[m+1]-mfp[m]) - return m,1-aj,m+1,aj - - - - -def mfp3D(**argv): - - #load data - if argv.setdefault('read_from_file',True): - data = load_data('mfp') - Kacc = data['Kacc'] - mfp_bulk = data['mfp'] - else: - mfp_bulk = argv['mfp'] - Kacc = argv['Kacc'] - - #Get the distribution - kappa_bulk = Kacc[-1] - kappa_bulk = np.zeros_like(Kacc) - kappa_bulk[0] = Kacc[0] - - - - #Polar Angle----- - n_phi = int(argv.setdefault('n_phi',48)); Dphi = 2.0*np.pi/n_phi - #phi = np.linspace(Dphi/2.0,2.0*np.pi-Dphi/2.0,n_phi,endpoint=True) - phi = np.linspace(0,2.0*np.pi,n_phi,endpoint=False) - #-------------------- - #Azimuthal Angle------------------------------ - n_theta = int( argv.setdefault('n_theta',24)) - sym = 1 - - Dtheta = np.pi/n_theta - theta = np.linspace(Dtheta/2.0,np.pi - Dtheta/2.0,n_theta) - dtheta = 2.0*np.sin(Dtheta/2.0)*np.sin(theta) - - #Compute directions--- - polar = np.array([np.sin(phi),np.cos(phi),np.ones(n_phi)]).T - azimuthal = np.array([np.sin(theta),np.sin(theta),np.cos(theta)]).T - direction = np.einsum('ij,kj->ikj',azimuthal,polar) - - ftheta = (1-np.cos(2*theta)*np.sinc(Dtheta/np.pi))/(np.sinc(Dtheta/2/np.pi)*(1-np.cos(2*theta))) - fphi= np.sinc(Dphi/2.0/np.pi) - polar_ave = np.array([fphi*np.ones(n_phi),fphi*np.ones(n_phi),np.ones(n_phi)]).T - azimuthal_ave = np.array([ftheta,ftheta,np.cos(Dtheta/2)*np.ones(n_theta)]).T - - direction_ave = np.multiply(np.einsum('ij,kj->ikj',azimuthal_ave,polar_ave),direction) - domega = np.outer(dtheta, Dphi * np.ones(n_phi)) - - n_mfp = argv.setdefault('n_mfp',50) - #Total numbre of momentum discretization---- - nm = n_phi * n_mfp * n_theta - #------------------------------------------ - - - n_mfp_bulk = len(mfp_bulk) - - if len(mfp_bulk) == n_mfp: - mfp_sampled = mfp_bulk - else: - mfp_sampled = np.logspace(min([-2,np.log10(min(mfp_bulk)*0.99)]),np.log10(max(mfp_bulk)*1.01),n_mfp)#min MFP = 1e-2 nm - - - n_mfp = len(mfp_sampled) - temp_coeff = np.zeros(nm) - kappa_directional = np.zeros((n_mfp,n_theta*n_phi,3)) - kdp = np.zeros((n_mfp,n_theta*n_phi,3)) - kd = np.zeros((n_mfp,n_theta*n_phi,3)) - - #suppression = np.zeros((n_mfp,n_phi*n_theta,n_mfp_bulk)) - temp_coeff = np.zeros((n_mfp,n_theta*n_phi)) - tcp = np.zeros((n_mfp,n_theta*n_phi)) - tc = np.zeros((n_mfp,n_theta*n_phi)) - - g1 = kappa_bulk/mfp_bulk/mfp_bulk*sym - g2 = kappa_bulk/mfp_bulk*sym - - n_angles = n_theta*n_phi - dirr = sym*np.einsum('tpi,tp->tpi',direction_ave,domega).reshape((n_angles,3)) - domega = domega.reshape((n_angles)) - - block = n_mfp_bulk//comm.size - rr = range(block*comm.rank,n_mfp_bulk) if comm.rank == comm.size-1 else range(block*comm.rank,block*(comm.rank+1)) - - mfp_sampled_log = np.log10(mfp_sampled) - for m in rr: - - if argv.setdefault('log_interpolation',False): - (m1,a1,m2,a2) = get_linear_indexes(mfp_sampled_log,np.log10(mfp_bulk[m])) - else: - (m1,a1,m2,a2) = get_linear_indexes(mfp_sampled,mfp_bulk[m]) - - kdp[m1] += a1 * g2[m]*dirr - kdp[m2] += a2 * g2[m]*dirr - tcp[m1] += a1 * g1[m]*domega - tcp[m2] += a2 * g1[m]*domega - - - comm.Allreduce([kdp,MPI.DOUBLE],[kd,MPI.DOUBLE],op=MPI.SUM) - comm.Allreduce([tcp,MPI.DOUBLE],[tc,MPI.DOUBLE],op=MPI.SUM) - - - #---- - kd *= 3/(4.0*np.pi) - tc /= np.sum(tc) - #---- - - direction = direction_ave.reshape((n_theta * n_phi,3)) - F = np.einsum('m,dj->mdj',mfp_sampled,direction) - - rhs_average = mfp_sampled*mfp_sampled/3 - thermal_conductance = 3*mfp_sampled/2 - #----------------------------- - - #replicate bulk values--- - kappa = np.zeros((3,3)) - for t in range(n_theta): - for p in range(n_phi): - for m in range(n_mfp): - index = t * n_phi + p - tmp = kd[m,index] - kappa += np.outer(tmp,direction_ave[t,p])*mfp_sampled[m] - - - - return {'tc':tc,\ - 'VMFP':direction,\ - 'sigma':kd,\ - 'kappa':kappa,\ - 'ang_coeff':domega.reshape(n_phi*n_theta)/4/np.pi,\ - 'model':np.array([6]),\ - 'mfp_average':rhs_average*1e18,\ - 'thermal_conductance':thermal_conductance*1e9,\ - 'mfp_sampled':mfp_sampled,\ - 'suppression':np.zeros(1),\ - 'kappam':np.zeros(1),\ - 'mfp_bulk':mfp_bulk} - - diff --git a/openbte/mg2DSym.py b/openbte/mg2DSym.py deleted file mode 100644 index e0898e9f..00000000 --- a/openbte/mg2DSym.py +++ /dev/null @@ -1,146 +0,0 @@ -import sys -import numpy as np -import scipy -from .utils import * -import time - - - - -def rta_no_interpolation(**argv): - - - data = load_data(argv.setdefault('basename','rta')) - - mfp_0 = 1e-10 - mfp_max = 1e-4 - mfp_bulk = np.einsum('ki,k->ki',data['v'],data['tau']) #the minimum MFP is calculated on the real MFP - I = np.where(np.linalg.norm(mfp_bulk,axis=1) > mfp_0)[0] - tau = data['tau'][I] - v = data['v'][I] - C = data['C'][I] - f = np.divide(np.ones_like(tau),tau, out=np.zeros_like(tau), where=tau!=0) - kappam = np.einsum('u,u,u,u->u',tau,C,v[:,0],v[:,0]) - Wdiag = C*f - Jc = np.einsum('k,ki->ki',C,v[:,:2]) - nm = len(Wdiag) - - return { 'Wdiag':Wdiag,\ - 'sigma':Jc,\ - 'kappa':data['kappa'],\ - 'model':np.array([11])} - - -def mg2DSym(**argv): - - if not argv.setdefault('interpolation',True): - - return rta_no_interpolation(**argv) - - #Import data---- - #Get options---- - n_phi = int(argv.setdefault('n_phi',48)) - n_mfp = int(argv.setdefault('n_mfp',50)) - n_theta = 48 - - nm = n_phi * n_mfp - - #Create sampled MFPs - #Polar Angle--------- - Dphi = 2*np.pi/n_phi - phi = np.linspace(Dphi/2.0,2.0*np.pi-Dphi/2.0,n_phi,endpoint=True) - #phi = np.linspace(0,2.0*np.pi,n_phi,endpoint=False) - polar_ave = np.array([np.sin(phi),np.cos(phi),np.zeros(n_phi)]).T - - - #Import data----------- - data = load_data(argv.setdefault('filename','rta')) - - #small cut on MFPs - mfp_0 = 1e-10 - mfp_bulk = np.einsum('ki,k->ki',data['v'],data['tau']) #the minimum MFP is calculated on the real MFP - I = np.where(np.linalg.norm(mfp_bulk,axis=1) > mfp_0)[0] - tau = data['tau'][I] - v = data['v'][I] - C = data['C'][I] - f = np.divide(np.ones_like(tau),tau, out=np.zeros_like(tau), where=tau!=0) - kappam = np.einsum('u,u,u,u->u',tau,C,v[:,0],v[:,0]) - tc = C*f - tc /= tc.sum() - Jc = np.einsum('k,ki->ki',C,v[:,:2]) - nm = len(tc) - mfp_bulk = np.einsum('ki,k->ki',v[:,:2],tau) #here we have the 2D one - r = np.linalg.norm(mfp_bulk,axis=1) - phi_bulk = np.array([np.arctan2(m[0],m[1]) for m in mfp_bulk]) - phi_bulk[np.where(phi_bulk < 0) ] = 2*np.pi + phi_bulk[np.where(phi_bulk <0)] - kappa = data['kappa'] - - mfp_max = argv.setdefault('mfp_max',np.max(mfp_bulk)*10) - mfp_min = argv.setdefault('mfp_min',mfp_0) - mfp_sampled = np.logspace(np.log10(mfp_min)*1.01,np.log10(mfp_max),n_mfp,endpoint=True)#min MFP = 1e-1 nm - - - #----------------------- - n_mfp_bulk = len(mfp_bulk) - - temp_coeff = np.zeros((n_mfp,n_phi)) - kappa_directional = np.zeros((n_mfp,n_phi,2)) - - lo = 0 - - #Interpolation in the MFPs - r_cut = r[r>mfp_0] - tc_cut = tc[r>mfp_0] - Jc_cut = Jc[r>mfp_0] - phi_cut = phi_bulk[r>mfp_0] - - #Interpolation in the MFPs - a1,a2,m1,m2 = fast_interpolation(r_cut,mfp_sampled) - - #Interpolation in phi--- - b1,b2,p1,p2 = fast_interpolation(phi_cut,phi,bound='periodic') - - #CHECKED - np.add.at(temp_coeff,(m1, p1),a1*b1*tc_cut) - np.add.at(temp_coeff,(m1, p2),a1*b2*tc_cut) - np.add.at(temp_coeff,(m2, p1),a2*b1*tc_cut) - np.add.at(temp_coeff,(m2, p2),a2*b2*tc_cut) - - #CHECKED - np.add.at(kappa_directional,(m1, p1),Jc_cut*(a1*b1)[:,np.newaxis]) - np.add.at(kappa_directional,(m1, p2),Jc_cut*(a1*b2)[:,np.newaxis]) - np.add.at(kappa_directional,(m2, p1),Jc_cut*(a2*b1)[:,np.newaxis]) - np.add.at(kappa_directional,(m2, p2),Jc_cut*(a2*b2)[:,np.newaxis]) - - lo_cut = np.sum(tc[r'material': polar_ave = np.array([np.sin(phi),np.cos(phi)]).T #Compute mfp_bulk-- - tau = rta['tau'] - v = rta['v'] - C = rta['C'] + tau = rta['scattering_time'] + v = rta['group_velocity'] + C = rta['heat_capacity'] sigma = np.einsum('k,ki->ki',C,v[:,:2]) mfp_bulk = np.einsum('ki,k->ki',v[:,:2],tau) #here we have the 2D one f = np.divide(np.ones_like(tau),tau, out=np.zeros_like(tau), where=tau!=0) @@ -32,11 +32,11 @@ def rta2DSym(rta,options_material)->'material': phi_bulk = phi_bulk[I] Wdiag = Wdiag[I] sigma = sigma[I] - #------------------------ + #Sampling - mfp_max = np.max(r_bulk)*1.1 - mfp_min = np.min(r_bulk)*0.9 + mfp_max = options_material.setdefault('mfp_max',np.max(r_bulk)*1.1) + mfp_min = options_material.setdefault('mfp_min',np.min(r_bulk)*0.9) mfp_sampled = np.logspace(np.log10(mfp_min)*1.01,np.log10(mfp_max),n_mfp,endpoint=True) #----------------- @@ -53,9 +53,6 @@ def rta2DSym(rta,options_material)->'material': np.add.at(Wdiag_sampled,(m2, p1),a2*b1*Wdiag) np.add.at(Wdiag_sampled,(m2, p2),a2*b2*Wdiag) - #lo_cut = np.sum(tc[r'material': Wdiag_inv = np.divide(1, Wdiag_sampled, out=np.zeros_like(Wdiag_sampled), where=Wdiag_sampled!=0) kappa_sampled = np.einsum('mqi,mq,mqj->ij',sigma_sampled,Wdiag_inv,sigma_sampled) - #print(kappa_sampled) - #kappa_sampled = np.einsum('mqi,mqj->ij',VMFP,sigma_sampled)/rta['alpha'][0][0] #Second method - #VMFP = np.einsum('m,qj->mqj',mfp_sampled,polar_ave[:,:2]).reshape(nm,2) data = {} data['sigma'] = sigma_sampled - data['kappa'] = kappa_sampled data['tc'] = Wdiag_sampled/np.sum(Wdiag_sampled) + data['kappa'] = kappa_sampled data['r_bulk'] = r_bulk data['F'] = VMFP data['VMFP'] = polar_ave @@ -81,7 +75,8 @@ def rta2DSym(rta,options_material)->'material': data['mfp_bulk'] = mfp_bulk data['mfp_sampled'] = mfp_sampled data['sigma_bulk'] = sigma - data['f'] = rta['f'] + data['f'] = rta['frequency'][I] + return utils.create_shared_memory_dict(data) diff --git a/openbte/rta2mfp.py b/openbte/rta2mfp.py deleted file mode 100644 index 88c5bc1b..00000000 --- a/openbte/rta2mfp.py +++ /dev/null @@ -1,50 +0,0 @@ -import numpy as np -from numpy import inf -import sys -from .utils import * -import sys - - -def main(): - data = load_data(sys.argv[1][:-4]) - - kappam = np.einsum('u,u,u,u->u',data['tau'],data['C'],data['v'][:,0],data['v'][:,0]) - mfp_bulk = np.einsum('ki,k->ki',data['v'],data['tau']) - - mfp = np.array([np.linalg.norm(m) for m in mfp_bulk]) - - I = np.argsort(mfp) - - kappa = kappam[I] - mfp = mfp[I] - Kacc = np.zeros_like(kappa) - - for n,k in enumerate(kappa): - if n == 0: - Kacc[n] = k - else: - Kacc[n] = k + Kacc[n-1] - - #select only meaningful MFPs - a = np.where(Kacc < Kacc[-1]*1e-5)[0][-1] - Kacc = Kacc[a:] - mfp = mfp[a:] - - #DownSamplimg - N = 100 - - mfp_sampled = np.logspace(np.log10(min(mfp)),np.log10(max(mfp)),N,endpoint=True) - mfp_sampled[-1]= mfp[-1] - - - Kacc_d = np.zeros(N) - for n,m in enumerate(mfp_sampled): - Kacc_d[n] = Kacc[np.where(mfp >= m)[0][0]] - - Kacc_d[-1] = Kacc[-1] - save_data('mfp',{'Kacc':np.array(Kacc_d),'mfp':np.array(mfp_sampled)}) - - - - - diff --git a/openbte/shape.py b/openbte/shape.py index ca4e634f..c7b5e018 100755 --- a/openbte/shape.py +++ b/openbte/shape.py @@ -49,6 +49,10 @@ def get_shape(argv): if shape == 'square': shapes.append(np.array(make_polygon(4,area))) + if shape == 'rectangle': + shape_factor = argv.setdefault('shape_factor',1) + shapes.append(np.array(make_rectangle(area,shape_factor))) + if shape == 'triangle': shapes.append(np.array(make_polygon(3,area))) diff --git a/openbte/solve_rta.py b/openbte/solve_rta.py index c188ef25..b93760b0 100644 --- a/openbte/solve_rta.py +++ b/openbte/solve_rta.py @@ -1,21 +1,7 @@ -def solve_rta(geometry,material,temperatures,options_solve_rta)->'solver': - - def get_m(Master,data): - Master.data = data - return Master - - def get_boundary(RHS,eb,n_elems): - - if len(eb) > 0: - RHS_tmp = np.zeros(n_elems) - np.add.at(RHS_tmp,eb,RHS) - else: - return np.zeros(n_elems) - - return RHS_tmp +def solve_rta(geometry,material,first_guess,options_solve_rta)->'solver': import numpy as np @@ -30,44 +16,41 @@ def get_boundary(RHS,eb,n_elems): from cachetools import cached,LRUCache from cachetools.keys import hashkey import scipy.sparse.linalg as spla + from openbte.first_guess import first_guess as fourier comm = MPI.COMM_WORLD - + #Options verbose = options_solve_rta.setdefault('verbose',True) max_bte_iter = options_solve_rta.setdefault('max_bte_iter',20) max_bte_error = options_solve_rta.setdefault('max_bte_error',1e-3) keep_lu = options_solve_rta.setdefault('keep_lu',True) method = options_solve_rta.setdefault('method','direct') - verbose = options_solve_rta.setdefault('verbose',False) + verbose = options_solve_rta.setdefault('verbose',False) #------------- Nlu = 1e5 if keep_lu else 0 cache_compute_lu = LRUCache(maxsize=Nlu) #------------ - X = temperatures['data'] - kappa_fourier = temperatures['kappa'][0] + X = first_guess['Temperature_Fourier'] + kappa_fourier = first_guess['kappa_fourier'][0] - - - #T_mat = fdata['Temperature_Fourier'] - jnp.einsum('qi,ci->qc',F,gradT) - n_elems = len(geometry['elems']) mfp = material['mfp_sampled']*1e9 - sigma = material['sigma']*1e9 dim = int(geometry['meta'][2]) F = material['VMFP'] - #Get discretization---- + + scaling_factor = np.linalg.norm(sigma[0,0])/mfp/np.linalg.norm(F[0])*1e-18 n_serial,n_parallel = sigma.shape[:2] block = n_parallel//comm.size rr = range(block*comm.rank,n_parallel) if comm.rank == comm.size-1 else range(block*comm.rank,block*(comm.rank+1)) - if comm.rank == 0 and ['verbose']: + if comm.rank == 0 and verbose: print(flush=True) - print(' Iter Thermal Conductivity [W/m/K] Error ''',flush=True) + print(' Iter Thermal Conductivity [W/m/K] Residual ''',flush=True) print(colored(' -----------------------------------------------------------','green'),flush=True) if len(geometry['db']) > 0: @@ -76,19 +59,30 @@ def get_boundary(RHS,eb,n_elems): Gbp2 = Gb.clip(min=0) tmp = Gb.clip(max=0).sum(axis=0).sum(axis=0) tot = np.divide(1, tmp, out=np.zeros_like(tmp), where=tmp!=0) - #data = {'GG': np.einsum('mqs,s->mqs',Gbp2,tot),'tot':tot} data = {'tot':tot} - #del tot, Gbp2 del Gbp2 else: data = None if comm.size > 1: data = utils.create_shared_memory_dict(data) - #GG = data['GG'] tot = data['tot'] else: - #GG = data['GG'] tot = data['tot'] + + def get_m(Master,data): + Master.data = data + return Master + + def get_boundary(RHS): + + if len(geometry['eb']) > 0: + RHS_tmp = np.zeros(n_elems) + np.add.at(RHS_tmp,geometry['eb'],RHS) + else: + return np.zeros(n_elems) + + return RHS_tmp + Gbp_new = np.einsum('mqj,js->mqs',material['sigma'][:,rr,:],geometry['db'],optimize=True).clip(min=0) GG_new = np.einsum('mqs,s->mqs',Gbp_new,tot) @@ -100,6 +94,7 @@ def get_boundary(RHS,eb,n_elems): np.add.at(D.T,geometry['i'],Gp.T) DeltaT = X.copy() + #---boundary---------------------- if len(geometry['db']) > 0: #boundary tmp = np.einsum('rj,jn->rn',material['VMFP'][rr],geometry['db'],optimize=True) @@ -107,16 +102,25 @@ def get_boundary(RHS,eb,n_elems): np.add.at(D.T,geometry['eb'],Gbp2.T) TB = DeltaT[geometry['eb']] + #---Thermalizing---------------------- + + RHS_isothermal = np.zeros((len(rr),n_elems)) + tmp = np.einsum('rj,jn->rn',material['VMFP'][rr],geometry['dfixed'],optimize=True) + np.add.at(D.T,geometry['efixed'],tmp.clip(min=0).T) + np.add.at(RHS_isothermal.T,geometry['efixed'],(tmp.clip(max=0)*geometry['fixed_temperature']).T) + #------------------------------------ + #Periodic--- - sss = np.asarray(geometry['pp'][:,0],dtype=int) P = np.zeros((len(rr),n_elems)) - np.add.at(P.T,geometry['i'][sss],-(geometry['pp'][:,1]*Gm[:,sss]).T) + if len(geometry['pp']) > 0: + sss = np.asarray(geometry['pp'][:,0],dtype=int) + np.add.at(P.T,geometry['i'][sss],-(geometry['pp'][:,1]*Gm[:,sss]).T) del G,Gp #---------------------------------------------------- kappa_vec = [kappa_fourier] kappa_old = kappa_vec[-1] - error = 1 + error = 1e6 kk = 0 kappa_tot = np.zeros(1) @@ -137,63 +141,88 @@ def compute_lu(n,m): A = get_m(Master,data[conversion]) return sp.linalg.splu(A) - def solve_iterative(n,m,B,X0): + def solve_iterative(n,m,B,X0): #Faster for larger structures data = np.concatenate((mfp[m]*Gm[n],mfp[m]*D[n]+np.ones(n_elems))) A = get_m(Master,data[conversion]) - return spla.lgmres(A,B,x0=X0)[0] + #TODO: This needs to be improved + B_res = geometry['generation'][np.newaxis,np.newaxis,:]+np.einsum('m,qc->mqc',mfp,P) + RHS_isothermal + ap = np.array([np.sum(np.square(B_res[0]))]) + a = np.array([0.0]) + comm.Allreduce([ap,MPI.DOUBLE],[a,MPI.DOUBLE],op=MPI.SUM) + B_res = np.sqrt(a[0]) + + #--------------------------------- + + def residual(m,n,q,x): + """Compute Residual""" + return mfp[m]*np.einsum('c,c->c',D[n],x) + mfp[m]*utils.sparse_dense_product(i,j,Gm[n],x) + x - geometry['generation']-mfp[m]*(P[n]+ RHS_isothermal[n]) + + + DeltaT_old = np.zeros_like(DeltaT) + Boundary_old = np.zeros((len(rr),len(geometry['eb']))) while kk max_bte_error: #Multiscale scheme----------------------------- - DeltaTp = np.zeros_like(DeltaT) - TBp = np.zeros_like(TB) - Jp = np.zeros_like(J) + DeltaTp = np.zeros_like(DeltaT) + r,rp = np.zeros((2,n_elems)) + divJ,divJp = np.zeros((2,n_elems)) + TBp = np.zeros_like(TB) + Jp = np.zeros_like(J) gradDeltaT = utils.compute_grad_common(DeltaT,geometry) - #kappa_bal,kappa_balp = np.zeros((2,n_parallel)) - - - RHS = -np.einsum('c,nc->nc',TB,Gbm2) if len(geometry['db']) > 0 else np.zeros((n_parallel,0)) + Boundary = -np.einsum('c,nc->nc',TB,Gbm2) if len(geometry['db']) > 0 else np.zeros((n_parallel,0)) for n,q in enumerate(rr): for m in range(n_serial): #---------------------------------------- - B = DeltaT + mfp[m]*(P[n] + get_boundary(RHS[n],geometry['eb'],n_elems)) + B = DeltaT + mfp[m]*(P[n] + get_boundary(Boundary[n]) + RHS_isothermal[n]) + geometry['generation'] + if method =='direct': - X = compute_lu(n,m).solve(B) + X = compute_lu(n,m).solve(B) else: X = solve_iterative(n,m,B,X) + rp += residual(m,n,q,X)-DeltaT_old - mfp[m]*get_boundary(Boundary_old[n]) + kappap[m,q] = np.dot(geometry['kappa_mask'],X-DeltaT) DeltaTp += X*material['tc'][m,q] if len(geometry['eb']) > 0: - #np.add.at(TBp,np.arange(geometry['eb'].shape[0]),-X[geometry['eb']]*GG[m,q]) np.add.at(TBp,np.arange(geometry['eb'].shape[0]),-X[geometry['eb']]*GG_new[m,n]) Jp += np.einsum('c,j->cj',X,sigma[m,q,0:dim])*1e-9 - DeltaT_old = DeltaT.copy() - + DeltaT_old = DeltaT.copy() + Boundary_old = Boundary.copy() comm.Barrier() comm.Allreduce([DeltaTp,MPI.DOUBLE],[DeltaT,MPI.DOUBLE],op=MPI.SUM) comm.Allreduce([Jp,MPI.DOUBLE],[J,MPI.DOUBLE],op=MPI.SUM) comm.Allreduce([kappap,MPI.DOUBLE],[kappa,MPI.DOUBLE],op=MPI.SUM) + comm.Allreduce([rp,MPI.DOUBLE],[r,MPI.DOUBLE],op=MPI.SUM) comm.Allreduce([TBp,MPI.DOUBLE],[TB,MPI.DOUBLE],op=MPI.SUM) - kappa_totp = np.array([np.einsum('mq,mq->',sigma[:,rr,int(geometry['meta'][-2])],kappa[:,rr])]) + kappa_totp = np.array([np.einsum('mq,mq->',sigma[:,rr,int(geometry['meta'][5])],kappa[:,rr])]) comm.Allreduce([kappa_totp,MPI.DOUBLE],[kappa_tot,MPI.DOUBLE],op=MPI.SUM) - + if kk == 0: + R0 = np.linalg.norm(r) + error = 1 + else: + error = np.linalg.norm(r)/R0 + kk +=1 - error = abs(kappa_old-kappa_tot[0])/abs(kappa_tot[0]) kappa_old = kappa_tot[0] kappa_vec.append(kappa_tot[0]) if verbose and comm.rank == 0: - print('{0:8d} {1:24.4E} {2:22.4E}'.format(kk,kappa_vec[-1],error),flush=True) + print('{0:8d} {1:24.4E} {2:26.4E}'.format(kk,kappa_vec[-1],error),flush=True) + + comm.Barrier() + + comm.Barrier() if verbose and comm.rank == 0: print(colored(' -----------------------------------------------------------','green'),flush=True) @@ -204,12 +233,17 @@ def solve_iterative(n,m,B,X0): #Add a dummy dimesion-- if dim == 2:J = np.append(J, np.zeros((n_elems,1)), axis=1) - data = {'Temperature_BTE':DeltaT,'Flux_BTE':J,'mfp_nano_sampled':kappa,'kappa':kappa_tot,'Temperature_Fourier':temperatures['data'],'Flux_Fourier':temperatures['flux'],'kappa_fourier':temperatures['kappa']} + data = {'Temperature_BTE':DeltaT,'Flux_BTE':J,'mfp_nano_sampled':kappa,'kappa':kappa_tot} + + comm.Barrier() #Compute vorticity--- if options_solve_rta.setdefault('compute_vorticity',False): - data.update({'vorticity_BTE' :compute_vorticity(geometry,J)['vorticity']}) - data.update({'vorticity_Fourier':compute_vorticity(geometry,temperatures['flux'])['vorticity']}) + tmp2 = None + if comm.rank == 0: + tmp2 = {'vorticity_BTE':utils.compute_vorticity(geometry,J)['vorticity'],'vorticity_Fourier':utils.compute_vorticity(geometry,first_guess['Flux_Fourier'])['vorticity']} + tmp = comm.bcast(tmp2,root=0) + data.update(comm.bcast(tmp)) #------------------- comm.Barrier() return data diff --git a/openbte/solver.py b/openbte/solver.py index 157070e4..fc1937d2 100644 --- a/openbte/solver.py +++ b/openbte/solver.py @@ -36,7 +36,7 @@ def print_bulk_info(mat,mesh): print(colored(' Bulk Thermal Conductivity (zz)[W/m/K]: ','green')+ str(round(kappa[2,2],2)),flush=True) dirr = ['x','y','z'] - print(colored(' Applied Thermal Gradient along : ','green')+ dirr[int(mesh['meta'][-2])],flush=True) + print(colored(' Applied Thermal Gradient along : ','green')+ dirr[int(mesh['meta'][5])],flush=True) def print_options(**argv): @@ -50,7 +50,7 @@ def print_options(**argv): def print_logo(): - v = pkg_resources.require("OpenBTE")[0].version + #v = pkg_resources.require("OpenBTE")[0].version print(' ',flush=True) print(colored(r''' ___ ____ _____ _____ ''','green'),flush=True) print(colored(r''' / _ \ _ __ ___ _ __ | __ )_ _| ____|''','green'),flush=True) @@ -58,7 +58,7 @@ def print_logo(): print(colored(r''' | |_| | |_) | __/ | | | |_) || | | |___ ''','green'),flush=True) print(colored(r''' \___/| .__/ \___|_| |_|____/ |_| |_____|''','green'),flush=True) print(colored(r''' |_| ''','green'),flush=True) - print(colored(r''' v. ''' + str(v) ,'blue'),flush=True) + #print(colored(r''' v. ''' + str(v) ,'blue'),flush=True) print(flush=True) print(' GENERAL INFO',flush=True) print(colored(' -----------------------------------------------------------','green'),flush=True) @@ -70,7 +70,6 @@ def print_logo(): print(flush=True) - def Solver(**argv): #COMMON OPTIONS-------------------------------------- @@ -78,8 +77,8 @@ def Solver(**argv): only_fourier = argv.setdefault('only_fourier',False) #--------------------------------------------------- - geometry = argv['geometry'] if 'geometry' in argv.keys() else utils.load_shared('geometry') - material = argv['material'] if 'material' in argv.keys() else utils.load_shared('material') + geometry = argv['geometry'] if 'geometry' in argv.keys() else utils.load('geometry') + material = argv['material'] if 'material' in argv.keys() else utils.load('material') #Print relevant options----------------------------- if comm.rank == 0 : @@ -93,15 +92,16 @@ def Solver(**argv): #--------------------------------------------------- #Solve fourier-- - X = first_guess(geometry,material,{}) + output = first_guess(geometry,material,{'verbose':verbose}) #Solve bte-- if not only_fourier: - output = solve_rta(geometry,material,X,argv) - + output_rta = solve_rta(geometry,material,output,argv) + output.update(output_rta) + #prepare_data(argv) if comm.rank == 0 and argv.setdefault('save',True): - utils.save_data(argv.setdefault('filename','solver'),output) + utils.save(argv.setdefault('filename','solver'),output) if argv['verbose'] and comm.rank == 0: print(' ',flush=True) diff --git a/openbte/suppression.py b/openbte/suppression.py index c38f396d..915d1da7 100644 --- a/openbte/suppression.py +++ b/openbte/suppression.py @@ -7,115 +7,7 @@ comm = MPI.COMM_WORLD -def suppression_3D(solver,material,options_suppression)->'suppression': - - data = None - if comm.rank == 0: - - #m1 = options_suppression.setdefault('m',0) - #l1 = options_suppression.setdefault('l',3) - - phi = material['phi'] - mfp_sampled = material['mfp_sampled'] - theta = material['theta'] - n_mfp = len(mfp_sampled) - n_phi = len(phi) - n_theta = len(theta) - - ##--------------- - #l2 = int(n_phi/2)-l1 - #l3 = int(n_phi/2)+l1 - #l4 = int(n_phi) -l1 - - #Mesh-- - - #----------- - mfp_0 = 1e-10 - mfp = np.log10(mfp_sampled) - mfp -= np.min(mfp) - mfp +=1 - #------------ - - #R, P = np.meshgrid(mfp, phi) - #X, Y = R*np.cos(P+np.pi/2), R*np.sin(P+np.pi/2) - #---------- - mfp_nano = solver['mfp_nano_sampled'].reshape((n_mfp,n_theta,n_phi)) - - - S = np.divide(mfp_nano,material['F'][:,:,:,0],\ - out=np.zeros_like(mfp_nano), where=material['F'][:,:,:,0]!=0)*1e9 - - - #S = np.divide(mfp_nano,np.linalg.norm(material['F'][:,:,:],axis=3),\ - # out=np.zeros_like(mfp_nano), where=material['F'][:,:,:,0]!=0)*1e9 - - - #S = np.log10(S.clip(min=1e-5)) - #print(S[30,24,:]) - #fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}) - #ax.plot(phi, S[10,24,:]) - #plt.show() - #quit() - - #S -= np.min(S) - #S += 1 - - THETA, PHI = np.meshgrid(theta, phi) - - #R = np.absolute(mfp_nano[20].T)*1e18 - R = np.absolute(S[20].T) - #R = np.ones_like(THETA) - - X = R * np.sin(PHI) * np.sin(THETA) - Y = R * np.cos(PHI) * np.sin(THETA) - Z = R * np.cos(THETA) - - fig = plt.figure() - ax = fig.add_subplot(1,1,1, projection='3d') - plot = ax.plot_surface( - X, Y, Z, rstride=1, cstride=1, cmap=plt.get_cmap('jet'), - linewidth=0, antialiased=False, alpha=0.5) - - v = [0.2,0.2,0.01] - #ax.set_xlim(-150, 150) - #ax.set_ylim(-150, 150) - #ax.set_zlim(-150, 150) - ax.set_xlim(-v[0], v[0]) - ax.set_ylim(-v[1], v[1]) - ax.set_zlim(-v[2], v[2]) - - plt.show() - - - - print(S.shape) - quit() - - Sm = np.mean(S,axis=0) - - k = 0.5484670176525239 - - plt.plot(Sm) - plt.plot([0,len(Sm)],[k,k]) - - plt.show() - #print(Sm.shape) - quit() - - - #Sm = 0.5*(np.mean(S[l1:l2,m1:] + S[l3:l4,m1:],axis=0)) - #Sm = 0.5*(np.mean(S[l1:l2,m1:] + S[l3:l4,m1:],axis=0)) - - #Cut unnecessary data - S[0 :l1] = 0 - S[l2:l3] = 0 - S[l4: ] = 0 - S[:,:m1] = 0 - - data = {'S':S,'X':X,'Y':Y,'mfp':mfp_sampled,'Sm':Sm} - - -def suppression_2DSym(solver,material,options_suppression)->'suppression': +def suppression(solver,material,options_suppression)->'suppression': data = None if comm.rank == 0: @@ -133,8 +25,6 @@ def suppression_2DSym(solver,material,options_suppression)->'suppression': l3 = int(n_phi/2)+l1 l4 = int(n_phi) -l1 - #Mesh-- - #----------- mfp_0 = 1e-10 mfp = np.log10(mfp_sampled) @@ -159,34 +49,16 @@ def suppression_2DSym(solver,material,options_suppression)->'suppression': S[l4: ] = 0 S[:,:m1] = 0 - - data = {'S':S,'X':X,'Y':Y,'mfp':mfp_sampled,'Sm':Sm} - - return utils.create_shared_memory_dict(data) + if options_suppression['show']: + plt.plot(mfp_sampled,Sm) + plt.xscale('log') + plt.show() -def plot_suppression(suppression): - from pubeasy import MakeFigure - - Sm = suppression['Sm'] - mfp = suppression['mfp'] - fig = MakeFigure() - fig.add_plot(mfp,Sm,color='k') - fig.add_labels('MFP [nm]','Suppression') - fig.finalize(grid=True,xscale='log',write = True,show=True) - + data = {'S':S,'X':X,'Y':Y,'mfp':mfp_sampled,'Sm':Sm} -def plot_angular_suppression(suppression): + return utils.create_shared_memory_dict(data) - S = suppression['S'] - X = suppression['X'] - Y = suppression['Y'] - axs = plt.subplot(111,projection='3d') - axs.set_xlabel('X') - axs.set_ylabel('Y') - axs.plot_surface(X, Y, S,antialiased=True,cmap='viridis', edgecolor='none',vmin=0,vmax=0.5) - axs.set_zlim([0,1]) - plt.show() diff --git a/openbte/utils.py b/openbte/utils.py index 4f099253..c92a3e6d 100755 --- a/openbte/utils.py +++ b/openbte/utils.py @@ -15,9 +15,9 @@ import time import functools -def compute_grad_common(data,geometry): +def compute_grad_common(data,geometry,**argv): """ Compute grad via least-square method """ - jump = True + jump = argv.setdefault('jump',True) #Compute deltas------------------- rr = [] for i in geometry['side_per_elem']: @@ -39,24 +39,33 @@ def compute_grad_common(data,geometry): rr[kc1][ind1] = [kc2,kc1, delta] rr[kc2][ind2] = [kc1,kc2,-delta] + #if ll in geometry['hot_sides']: + # rr[kc1][ind1] = [-0.5,kc1, 0] + + #if ll in geometry['cold_sides']: + # rr[kc1][ind1] = [-1.5,kc1, 0] + diff_data = [[data[j[0]]-data[j[1]]+j[2] for j in f] for f in rr] + #diff_data = [[(data[j[0]] if j[0] >= 0 else (j[0]+1))-data[j[1]]+j[2] for j in f] for f in rr] return np.array([np.einsum('js,s->j',geometry['weigths'][k,:,:geometry['n_non_boundary_side_per_elem'][k]],np.array(dt)) for k,dt in enumerate(diff_data)]) -def fix_instability(F,B,scale=True): +def fix_instability(F,B,scale=True,floating=True): n_elems = F.shape[0] if scale: scale = 1/F.max(axis=0).toarray()[0] else: scale = np.ones(n_elems) - n = np.random.randint(n_elems) - scale[n] = 0 + #n = np.random.randint(n_elems) + #if floating: scale[n] = 0 F.data = F.data * scale[F.indices] - F[n,n] = 1 - B[n] = 0 + + #if floating: + # F[n,n] = 1 + # B[n] = 0 return scale @@ -70,36 +79,52 @@ def fix_instability(F,B,scale=True): def extract_variables(solver,geometry): #Create variables from data-- + tags = solver.keys() variables = {} - variables['Temperature_BTE'] = {'data':solver['Temperature_BTE'],'units':'K','increment':-geometry['applied_gradient']} - variables['Temperature_Fourier'] = {'data':solver['Temperature_Fourier'],'units':'K','increment':-geometry['applied_gradient']} - variables['Flux_BTE'] = {'data':solver['Flux_BTE'],'units':'W/m/m','increment':[0,0,0]} - variables['Flux_Fourier'] = {'data':solver['Flux_Fourier'],'units':'W/m/m','increment':[0,0,0]} - if 'vorticity_BTE' in solver.keys(): + if 'Temperature_BTE' in tags: + variables['Temperature_BTE'] = {'data':solver['Temperature_BTE'],'units':'K','increment':-geometry['applied_gradient']} + variables['Flux_BTE'] = {'data':solver['Flux_BTE'],'units':'W/m/m','increment':[0,0,0]} + if 'Temperature_Fourier' in tags: + variables['Temperature_Fourier'] = {'data':solver['Temperature_Fourier'],'units':'K','increment':-geometry['applied_gradient']} + variables['Flux_Fourier'] = {'data':solver['Flux_Fourier'],'units':'W/m/m','increment':[0,0,0]} + variables['Heat_Generation'] = {'data':solver['Heat_Generation'],'units':'W/m/m','increment':[0,0,0]} + if 'vorticity_BTE' in tags: variables['vorticity_BTE'] = {'data':solver['vorticity_BTE'] ,'units':'W/m/m/m','increment':[0,0,0]} - if 'vorticity_Fourier' in solver.keys(): + if 'vorticity_Fourier' in tags: variables['vorticity_Fourier'] = {'data':solver['vorticity_Fourier'],'units':'W/m/m/m','increment':[0,0,0]} - return variables +def compute_laplacian(data,geometry): + + n = len(data) + laplacian = np.zeros((n,2,2)) + + grad = compute_grad_common(data,geometry) + + laplacian[:,0,:] = compute_grad_common(grad[:,0],geometry,jump=False) + laplacian[:,1,:] = compute_grad_common(grad[:,1],geometry,jump=False) + + return laplacian + + def compute_vorticity(geometry,J): """Compute vorticity (only for 2D cases)""" - data = None - if comm.rank == 0: - vorticity = np.zeros((len(geometry['elems']),3)) - grad_x = compute_grad_common(J[:,0],geometry) - grad_y = compute_grad_common(J[:,1],geometry) - #Defines only for 2D - vorticity[:,2] = grad_y[:,0]-grad_x[:,1] - data = {'vorticity':vorticity*1e9} #W/m/m/m + #data = None + #if comm.rank == 0: + vorticity = np.zeros((len(geometry['elems']),3)) + grad_x = compute_grad_common(J[:,0],geometry) + grad_y = compute_grad_common(J[:,1],geometry) + #Defines only for 2D + vorticity[:,2] = grad_y[:,0]-grad_x[:,1] + data = {'vorticity':vorticity*1e9} #W/m/m/m + + return data - return create_shared_memory_dict(data) + #return create_shared_memory_dict(data) - - def expand_variables(data,geometry): @@ -183,23 +208,13 @@ def fast_interpolation(fine,coarse,bound=False,scale='linear') : +def make_rectangle(area,aspect_ratio): + Lx = np.sqrt(area/aspect_ratio) + Ly = aspect_ratio*Lx -def periodic_kernel(x1, x2, p,l,variance): - return variance*np.exp(-2/l/l * np.sin(np.pi*abs(x1-x2)/p) ** 2) - -def gram_matrix(xs,p,l,variance): - return [[periodic_kernel(x1,x2,p,l,variance) for x2 in xs] for x1 in xs] + return [[-Lx/2,-Ly/2],[-Lx/2,Ly/2],[Lx/2,Ly/2],[Lx/2,-Ly/2]] -def generate_random_interface(p,l,variance,scale): - - xs = np.arange(-p/2, p/2,p/200) - mean = [0 for x in xs] - gram = gram_matrix(xs,p,l,variance) - ys = np.random.multivariate_normal(mean, gram)*scale - f = interpolate.interp1d(xs, ys,fill_value='extrapolate') - - return f def make_polygon(Na,A): @@ -288,30 +303,24 @@ def already_included_old(all_points,new_point,p_list): return line_list - - - -def save_data(fh, namedict): +def save(fh, namedict): if comm.rank == 0: with gzip.GzipFile(fh + '.npz', 'w') as f: pickle.dump(namedict, f,protocol=pickle.HIGHEST_PROTOCOL) -def load(fh): - - return {f:load_data(f) for f in fh} - -def load_data(fh): +def load(filename): if comm.rank == 0: - if os.path.isfile(fh + '.npz'): - with gzip.open(fh + '.npz', 'rb') as f: - return pickle.load(f) - print("Can't load " + fh) - quit() - return -1 - comm.Barrier() + try: + with gzip.open(filename + '.npz', 'rb') as f: + data = pickle.load(f) + except ValueError: + print("We could not find the file: ",filename) + else: data = None + + return create_shared_memory_dict(data) @@ -356,7 +365,6 @@ def find_elem(mesh,p,guess): def generate_frame(**argv): - argv.setdefault('bounding_box',[0,0,0]) Lx = float(argv.setdefault('lx',argv['bounding_box'][0])) Ly = float(argv.setdefault('ly',argv['bounding_box'][1])) @@ -385,9 +393,11 @@ def translate_shape(s,b,**argv): return out + def repeat_merge_scale(argv): polygons = argv['polygons'] + argv.setdefault('heat_source',[None]*len(polygons)) lx = argv['lx'] ly = argv['ly'] @@ -413,67 +423,80 @@ def repeat_merge_scale(argv): frame = Polygon([[-dx/2,-dy/2],[-dx/2,dy/2],[dx/2,dy/2],[dx/2,-dy/2]]) #--------------------- - #Store only the intersecting polygons + + #Create artificial base in case of 'custom' polygon + if not 'base' in argv.keys(): + base = [] + for p in polygons: + base.append(np.mean(p,axis=0)) + argv['base'] = base + #----------------------------- + extended_base = [] - final = [] - #stretch base: - #base = [[tmp[0]*scale[0],tmp[1]*scale[1]] for tmp in argv['base']] + #final = [] + new_poly = [] + corr1 = {} + heat_source = [] for pp,poly in enumerate(polygons): for kp in range(len(pbc)): - new_base = [argv['base'][pp][i]+pbc[kp][i] for i in range(2)] - - if not new_base in extended_base: #periodicity takes back to original shape + new_base = [argv['base'][pp][i]+pbc[kp][i] for i in range(2)] + #if not new_base in extended_base: #periodicity takes back to original shape tmp = [[ p[0] + pbc[kp][0],p[1] + pbc[kp][1] ] for p in poly] p1 = Polygon(tmp) if p1.intersects(frame): extended_base.append(new_base) - - thin = Polygon(p1).intersection(frame) - if isinstance(thin, shapely.geometry.multipolygon.MultiPolygon): - tmp = list(thin) - for t in tmp: - final.append(t) - else: - final.append(p1) - - - #plot_shapes(final,frame) - #Create bulk surface---get only the exterior to avoid holes - #[plt.plot(*p.exterior.xy) for p in final] - #plt.plot(*frame.exterior.xy) - #plt.gca().set_aspect('equal') - #plt.gca().axis('off') - #plt.show() - - MP = MultiPolygon(final) - - conso = unary_union(MP) - - new_poly = [] - if isinstance(conso, shapely.geometry.multipolygon.MultiPolygon): - for i in conso: - new_poly.append(list(i.exterior.coords)) - else: - new_poly.append(list(conso.exterior.coords)) + #corr1[len(extended_base)-1] = pp + + #thin = Polygon(p1).intersection(frame) + #if isinstance(thin, shapely.geometry.multipolygon.MultiPolygon): + # tmp = list(thin) + # for t in tmp: + # final.append(t) + #else: + #final.append(p1) + new_poly.append(list(p1.exterior.coords)) + heat_source += [argv['heat_source'][pp]] + #argv['heat_source'] = [argv['heat_source'][c] for c in corr] + + argv['heat_source'] = heat_source + #MP = MultiPolygon(final) + + #conso = unary_union(MP) + + #new_poly = [] + #if isinstance(conso, shapely.geometry.multipolygon.MultiPolygon): + # for i in conso: + # new_poly.append(list(i.exterior.coords)) + #else: + # new_poly.append(list(conso.exterior.coords)) #Find the closest centroids-- - extended_base = np.array(extended_base) - tmp = np.zeros_like(extended_base) - for n,p in enumerate(new_poly): - index = np.argmin(np.linalg.norm(extended_base - np.mean(p,axis=0)[np.newaxis,...],axis=1)) - tmp[n] = extended_base[index] - extended_base = tmp.copy() - #--------------------------------- + #extended_base = np.array(extended_base) #this is the original one but repeated + #tmp = np.zeros_like(extended_base) + #corr = [] + #for n,p in enumerate(new_poly): + # index = np.argmin(np.linalg.norm(extended_base - np.mean(p,axis=0)[np.newaxis,...],axis=1)) + # corr.append(corr1[index]) + # tmp[n] = extended_base[index] + #extended_base = tmp.copy() + #argv['heat_source'] = [argv['heat_source'][c] for c in corr] + #----- + + #original_number = len(final) + #final_number = len(new_poly) + #overlap = final_number < original_number + overlap = False #cut redundant points - if argv.setdefault('cut_redundant_point',False): - new_poly_2 = [] + #This cut points that are very close to each other + n_cut = 0 + new_poly2 = [] for poly in new_poly: N = len(poly) tmp = [] @@ -482,16 +505,14 @@ def repeat_merge_scale(argv): p2 = poly[(n+1)%N] if np.linalg.norm(np.array(p1)-np.array(p2)) >1e-4: tmp.append(p1) + else: + n_cut +=1 new_poly2.append(tmp) new_poly = new_poly2.copy() - #cut redundant points - discard = 0 - while discard > 0: - - discard = 0 - new_poly_2 = [] - for gg,poly in enumerate(new_poly): + discard = 0 + new_poly_2 = [] + for gg,poly in enumerate(new_poly): N = len(poly) tmp = [] for n in range(N): @@ -504,12 +525,13 @@ def repeat_merge_scale(argv): else: discard += 1 new_poly_2.append(tmp) - new_poly = new_poly_2.copy() - + new_poly = new_poly_2.copy() + #---------------------------- - dmin = check_distances(final) - #dmin = check_distances(new_poly) + #dmin = check_distances(final) + dmin = 0 + #scale----------------------- if argv.setdefault('relative',True): @@ -530,16 +552,11 @@ def repeat_merge_scale(argv): argv['polygons'] = polygons argv['dmin'] = dmin - - - - - + argv['overlap'] = int(overlap) def check_distances(new_poly): - #Merge pores is they are too close Pores = [ Polygon(p) for p in new_poly] dmin = 1e4 @@ -577,7 +594,6 @@ def check_distances(new_poly): #print('left',p1) dmin = d - #print(dmin) return dmin @@ -624,15 +640,6 @@ def get_linear_indexes(mfp,value,scale,extent): return i,ai,j,aj -def load_shared(filename): - - data = None - if comm.rank == 0: - data = load_data(filename) - data = create_shared_memory_dict(data) - - return data - def shared_array(value): @@ -668,6 +675,8 @@ def compute_spherical(mfp_bulk): return r,phi_bulk,theta_bulk + + def create_shared_memory_dict(varss): dtype = [np.int32,np.int64,np.float32,np.float64] @@ -676,13 +685,9 @@ def create_shared_memory_dict(varss): if comm.Get_rank() == 0: var_meta = {} for var,value in varss.items(): - if callable(value) or type(value) == str or type(value) == int or type(value) == float: - var_meta[var] = [None,None,None,None,False] - continue - if type(value) == list: - value = np.array(value) - - #Check types + if callable(value) or type(value) == str or type(value) == int or type(value) == float or type(value) == list or type(value) == dict or type(value) == bool: + var_meta[var] = [None,None,None,None,False] + continue if value.dtype == np.int32: data_type = 0 itemsize = MPI.INT32_T.Get_size() @@ -718,7 +723,7 @@ def create_shared_memory_dict(varss): dict_output[var] = output else: if comm.rank == 0: - dict_output[var] = varss[var] + dict_output[var] = varss[var] del varss comm.Barrier() diff --git a/openbte/viewer.py b/openbte/viewer.py index d16efdd3..9ddae32e 100644 --- a/openbte/viewer.py +++ b/openbte/viewer.py @@ -37,7 +37,7 @@ def plot_results(solver,geometry,material,options_maps): dim = int(geometry['meta'][2]) - data = utils.extract_variables(solver) + data = utils.extract_variables(solver,geometry) data = utils.expand_variables(data,geometry) #this is needed to get the component-wise data for plotly @@ -166,8 +166,8 @@ def plot_results(solver,geometry,material,options_maps): fig.update_layout(scene_camera=camera) fig.update_layout(updatemenus=updatemenus) - #if argv.setdefault('write_html',False): - # fig.write_html("plotly.html") + if options_maps.setdefault('write_html',False): + fig.write_html("plotly.html") #plotly.io.write_json(fig,'ff.json') # plotly.io.to_image(fig,format='png') diff --git a/recipe/Dockerfile b/recipe/Dockerfile deleted file mode 100644 index 4d1a3572..00000000 --- a/recipe/Dockerfile +++ /dev/null @@ -1,49 +0,0 @@ -FROM continuumio/anaconda3 -MAINTAINER Giuseppe Romano - -RUN apt-get update - -RUN apt-get install -y vim - -RUN conda update -n base -c defaults conda - -RUN apt-get install -y build-essential libopenmpi-dev libgmsh-dev - -RUN wget https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.3.tar.gz - -RUN tar -xzf openmpi-4.0.3.tar.gz - -RUN cd openmpi-4.0.3 && ./configure --prefix=/usr/local/openmpi && make all && make install && rm -f openmpi-4.0.3.tar.gz - -COPY requirements.txt . -RUN pip install -r requirements.txt - -COPY openbte-2.47.tar.gz . -RUN pip install --no-cache openbte-2.47.tar.gz -RUN rm openbte-2.47.tar.gz - - -RUN useradd -ms /bin/bash openbte - -ENV OMPI_MCA_btl_vader_single_copy_mechanism none - -ENV OMPI_MCA_btl_base_warn_component_unused 0 - -ENV MPLCONFIGDIR /tmp/matplotlib - -RUN mkdir /workspace - -USER openbte - -WORKDIR /home/openbte - -#RUN load openbte - -EXPOSE 8050 - - - - - - - diff --git a/recipe/README b/recipe/README deleted file mode 100644 index 7d23ed63..00000000 --- a/recipe/README +++ /dev/null @@ -1 +0,0 @@ -This directory contains setup files for Docker and Conda diff --git a/recipe/build_conda.sh b/recipe/build_conda.sh deleted file mode 100755 index 069e7c89..00000000 --- a/recipe/build_conda.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -conda config --set anaconda_upload no -#cd ../ -#sudo python setup.py sdist -#cd recipe -#conda build --python=3.7 -c conda-forge meta.yaml -conda build --python=3.7 --numpy=1.14 -c conda-forge -c ostrokach-forge meta.yaml -conda build purge diff --git a/recipe/build_docker.sh b/recipe/build_docker.sh deleted file mode 100755 index e4ac26db..00000000 --- a/recipe/build_docker.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - - -docker build -t romanodev/openbte . -docker push romanodev/openbte diff --git a/recipe/meta.yaml b/recipe/meta.yaml deleted file mode 100644 index 29419901..00000000 --- a/recipe/meta.yaml +++ /dev/null @@ -1,58 +0,0 @@ - -package: - name: "openbte" - version: "2.8" - -source: - url: ../dist/openbte-2.8.tar.gz -build: - - number: 1 - noarch: python - script: "{{ PYTHON }} -m pip install . --no-deps --ignore-installed -vvv " - -requirements: - host: - - mpi4py - - numpy - - pip - - plotly - - python >=3.6 - - pyvtk - - scipy - - gmsh - - shapely - - twisted - - pandas - - termcolor - - unittest2 - run: - - future - - mpi4py - - plotly - - numpy - - python >=3.6 - - pyvtk - - scipy - - shapely - - termcolor - - unittest2 - - gmsh - - pyqt - -test: - imports: - - openbte -about: - home: The package home page - license: GPLv3 - license_family: GPL3 - license_file: - summary: Boltzmann Transport Equation for Phonons - doc_url: - dev_url: - description: For Windows you'll need to install MSMPI. Install with conda install -c conda-forge -c gromano openbte - -extra: - recipe-maintainers: - - romanodev diff --git a/recipe/ref.png b/recipe/ref.png deleted file mode 100644 index ac47ee2e..00000000 Binary files a/recipe/ref.png and /dev/null differ diff --git a/recipe/requirements.txt b/recipe/requirements.txt deleted file mode 100644 index d144af30..00000000 --- a/recipe/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -scikit-optimize -gmsh diff --git a/setup.cfg b/setup.cfg deleted file mode 100755 index adf5ed72..00000000 --- a/setup.cfg +++ /dev/null @@ -1,7 +0,0 @@ -[bdist_wheel] -universal = 1 - -[egg_info] -tag_build = -tag_date = 0 - diff --git a/setup.py b/setup.py index 0ef90e0e..dede1fce 100755 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ import os setup(name='openbte', - version='2.49', + version='2.71.0', description='Boltzmann Transport Equation for Phonons', author='Giuseppe Romano', author_email='romanog@mit.edu', @@ -11,6 +11,7 @@ install_requires=['shapely', 'pandas', 'docutils==0.16', + 'numpy', 'dash', 'unittest2', 'nbsphinx', @@ -20,7 +21,6 @@ 'alabaster', 'cachetools', 'mpi4py', - 'numpy', 'scipy', 'dash_core_components', 'dash_html_components', @@ -33,15 +33,10 @@ ], license='GPLv3',\ packages = ['openbte'], - package_data = {'openbte':['materials/*.npz','efunx.yaml']}, + package_data = {'openbte':['materials/*.npz']}, + include_package_data=True, entry_points = { 'console_scripts': ['AlmaBTE2OpenBTE=openbte.almabte2openbte:main',\ - 'AlmaBTE2OpenBTE2=openbte.almabte2openbte_new:main',\ - 'Phono3py2OpenBTE=openbte.phono3py2openbte:main',\ - 'rta2mfp=openbte.rta2mfp:main',\ - 'bundle_data=openbte.bundle_data:main',\ - 'gui=openbte.gui:main',\ - 'app=openbte.app:App',\ - 'OpenBTE=openbte.openbte:main'], + 'Phono3py2OpenBTE=openbte.phono3py2openbte:main']\ }, - zip_safe=False) + zip_safe=True) diff --git a/tests/solver.npz b/tests/solver.npz deleted file mode 100644 index e66d0b7f..00000000 Binary files a/tests/solver.npz and /dev/null differ diff --git a/tests/solver_2DSym.npz b/tests/solver_2DSym.npz index 4aeb8550..d06bed76 100644 Binary files a/tests/solver_2DSym.npz and b/tests/solver_2DSym.npz differ diff --git a/tests/solver_3D.npz b/tests/solver_3D.npz deleted file mode 100644 index 120acdaf..00000000 Binary files a/tests/solver_3D.npz and /dev/null differ diff --git a/tests/solver_disk.npz b/tests/solver_disk.npz new file mode 100644 index 00000000..f6091b56 Binary files /dev/null and b/tests/solver_disk.npz differ diff --git a/tests/solver_disk.py b/tests/solver_disk.py new file mode 100644 index 00000000..5957569f Binary files /dev/null and b/tests/solver_disk.py differ diff --git a/tests/test_rta.py b/tests/test_rta.py index 22bcecfd..014953f1 100644 --- a/tests/test_rta.py +++ b/tests/test_rta.py @@ -3,29 +3,27 @@ from mpi4py import MPI comm = MPI.COMM_WORLD +mat = Material(source='database',filename='rta_Si_300',model='rta2DSym',save=False) class TestClass(object): def test_rta2DSym(self): - mat = Material(source='database',filename='rta_Si_300',model='rta2DSym',save=False) geo = Geometry(model='lattice',lx = 10,ly = 10, step = 1, base = [[0,0]],porosity=0.2,shape='square',save=False,delete_gmsh_files=True) - sol = Solver(geometry = geo,material=mat,save=True) + sol = Solver(geometry = geo,material=mat,save=False) if comm.rank == 0: - assert np.allclose(sol['kappa'],load_data('solver_2DSym')['kappa'],rtol=1e-3) + assert np.allclose(sol['kappa'],load('solver_2DSym')['kappa'],rtol=1e-3) - def test_rta3D(self): - - mat = Material(source='database',filename='rta_Si_300',model='rta3D',save=False,n_theta=12,n_mfp=30,n_phi=24) - geo = Geometry(model='lattice',lx = 10,ly = 10, lz=2,step = 2, base = [[0,0]],porosity=0.2,shape='square',save=False,delete_gmsh_files=True) - sol = Solver(geometry = geo,material=mat,save=False,max_bte_iter=10) - if comm.rank == 0: - assert np.allclose(sol['kappa'],load_data('solver_3D')['kappa'],rtol=1e-2) + def test_disk(self): + geo = Geometry(model='disk',Rh=1,R=10,step=1,heat_source=0.5,save=False) + sol = Solver(geometry = geo,material=mat,save=False) + if comm.rank == 0: + assert np.allclose(sol['Temperature_BTE'],load('solver_disk')['Temperature_BTE'],rtol=1e-3)