diff --git a/.gitignore b/.gitignore index f59b5da737..c5b402d8e4 100644 --- a/.gitignore +++ b/.gitignore @@ -309,3 +309,5 @@ perun_results/ bench_data/ my_dev_stuff/ docs/source/autoapi + +.DS_Store diff --git a/.readthedocs.yaml b/.readthedocs.yaml index e09599c26e..ec6effe97d 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,28 +1,13 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required version: 2 -# Set the version of Python and other tools you might need build: os: ubuntu-22.04 tools: - python: "3.11" - apt_packages: - - pandoc - - libopenmpi-dev - -# Build documentation in the docs/ directory with Sphinx -sphinx: - configuration: doc/source/conf.py + python : "3.12" -# We recommend specifying your dependencies to enable reproducible builds: -# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - - method: pip - path: . - extra_requirements: - - docs + - requirements: doc/requirements.txt + +mkdocs: + configuration: mkdocs.yml diff --git a/.talismanrc b/.talismanrc new file mode 100644 index 0000000000..0f8f34e332 --- /dev/null +++ b/.talismanrc @@ -0,0 +1,27 @@ +allowed_patterns: +- 'uses: [A-Za-z-\/]+@[\w\d]+ # v\d+\.\d+\.\d+' +fileignoreconfig: +- filename: doc/api/heat/core/communication.md + checksum: bc3c94844563914d8699c1402a4abde3299fd116770d7ef5e1c070a033f4eacf +- filename: doc/api/heat/core/random.md + checksum: c1eb4ea8d8435c712639e949296411aff4789c2d89751fd2951019bbe6bfc1da +- filename: doc/api/heat/core/tiling.md + checksum: a64c827bbf08bd0f61470d3352ee9e711b23521aea8944854c34c7c00c393f33 +- filename: doc/api/heat/core/io.md + checksum: f5289cd5507487a1cfd432a038083c18b53b30b338a2a6187116b1ee3d821536 +- filename: doc/api/heat/core/exponential.md + checksum: 2a840d5c6bb43caada5ed7b4aa92739fdfa9523ce50fc31f606ddc1dd00bcddf +- filename: doc/api/heat/core/factories.md + checksum: 3be4e1f2ec3ffc1fc55bca4a082eff60cb2426917814dcb0c6789b60f5995c27 +- filename: doc/api/heat/core/linalg/basics.md + checksum: 59448ee5640ea8007856ce844e68faf195571ba99ec4588ed9a20cf310c58d1f +- filename: doc/api/heat/core/dndarray.md + checksum: 904826a791036d03728861a1d432faa95f3d4f9a358f678baea4ada7fddb789f +- filename: doc/api/heat/graph/laplacian.md + checksum: d8d4ec48750ae7c5d34e11ff7e6a79157f87a220accd57f4d8fa4d1a82254605 +- filename: doc/api/heat/sparse/factories.md + checksum: 7f57c6834ad98632f9a5e599c9fb9ec927c391fa8d44041d8a6efb1806c6a98e +- filename: doc/api/heat/optim/dp_optimizer.md + checksum: 47492db3eb665a09e16b9346487925554f2c96d020f3d8a0d2180a5f0b5d511b +- filename: doc/api/heat/optim/index.md + checksum: 0965bdff7bc43743551b0043720b525cc0992247ed4840a69228d4fe28ebb812 diff --git a/doc/api/heat/classification/index.md b/doc/api/heat/classification/index.md new file mode 100644 index 0000000000..ffbd417180 --- /dev/null +++ b/doc/api/heat/classification/index.md @@ -0,0 +1,8 @@ +Module heat.classification +========================== +Provides classification algorithms. + +Sub-modules +----------- +* heat.classification.kneighborsclassifier +* heat.classification.tests diff --git a/doc/api/heat/classification/kneighborsclassifier.md b/doc/api/heat/classification/kneighborsclassifier.md new file mode 100644 index 0000000000..19f81b4b01 --- /dev/null +++ b/doc/api/heat/classification/kneighborsclassifier.md @@ -0,0 +1,75 @@ +Module heat.classification.kneighborsclassifier +=============================================== +Implements the k-nearest neighbors (kNN) classifier + +Classes +------- + +`KNeighborsClassifier(n_neighbors: int = 5, effective_metric_: Callable = None)` +: Implementation of the k-nearest-neighbors Algorithm [1]. + + This algorithm predicts labels to data vectors by using an labeled training dataset as reference. The input vector + to be predicted is compared to the training vectors by calculating the Euclidean distance between each of them. A + majority vote of the k-nearest, i.e. closest or smallest distanced, training vectors labels is selected as + predicted class. + + Parameters + ---------- + n_neighbors : int, optional, default: 5 + Number of neighbours to consider when choosing label. + effective_metric_ : Callable, optional + The distance function used to identify the nearest neighbors, defaults to the Euclidean distance. + + References + ---------- + [1] T. Cover and P. Hart, "Nearest Neighbor Pattern Classification," in IEEE Transactions on Information Theory, + vol. 13, no. 1, pp. 21-27, January 1967, doi: 10.1109/TIT.1967.1053964. + + ### Ancestors (in MRO) + + * heat.core.base.BaseEstimator + * heat.core.base.ClassificationMixin + + ### Static methods + + `one_hot_encoding(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : One-hot-encodes the passed vector or single-column matrix. + + Parameters + ---------- + x : DNDarray + The data to be encoded. + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray)` + : Fit the k-nearest neighbors classifier from the training dataset. + + Parameters + ---------- + x : DNDarray + Labeled training vectors used for comparison in predictions, Shape=(n_samples, n_features). + y : DNDarray + Corresponding labels for the training feature vectors. Must have the same number of samples as ``x``. + Shape=(n_samples) if integral labels or Shape=(n_samples, n_classes) if one-hot-encoded. + + Raises + ------ + TypeError + If ``x`` or ``y`` are not DNDarrays. + ValueError + If ``x`` and ``y`` shapes mismatch or are not two-dimensional matrices. + + Examples + -------- + >>> samples = ht.rand(10, 3) + >>> knn = KNeighborsClassifier(n_neighbors=1) + >>> knn.fit(samples) + + `predict(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Predict the class labels for the provided data. + + Parameters + ---------- + x : DNDarray + The test samples. diff --git a/doc/api/heat/classification/tests/index.md b/doc/api/heat/classification/tests/index.md new file mode 100644 index 0000000000..a6b2cd2802 --- /dev/null +++ b/doc/api/heat/classification/tests/index.md @@ -0,0 +1,6 @@ +Module heat.classification.tests +================================ + +Sub-modules +----------- +* heat.classification.tests.test_knn diff --git a/doc/api/heat/classification/tests/test_knn.md b/doc/api/heat/classification/tests/test_knn.md new file mode 100644 index 0000000000..dd88e53920 --- /dev/null +++ b/doc/api/heat/classification/tests/test_knn.md @@ -0,0 +1,63 @@ +Module heat.classification.tests.test_knn +========================================= + +Classes +------- + +`TestKNN(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_exception(self)` + : + + `test_fit_one_hot(self)` + : + + `test_split_none(self)` + : + + `test_split_zero(self)` + : + + `test_utility(self)` + : diff --git a/doc/api/heat/cli.md b/doc/api/heat/cli.md new file mode 100644 index 0000000000..b9e14115f3 --- /dev/null +++ b/doc/api/heat/cli.md @@ -0,0 +1,12 @@ +Module heat.cli +=============== +Heat command line interface module. + +Functions +--------- + +`cli() ‑> None` +: Command line interface entrypoint. + +`plaform_info()` +: Print the current software stack being used by heat, including available devices. diff --git a/doc/api/heat/cluster/batchparallelclustering.md b/doc/api/heat/cluster/batchparallelclustering.md new file mode 100644 index 0000000000..233de3c0b6 --- /dev/null +++ b/doc/api/heat/cluster/batchparallelclustering.md @@ -0,0 +1,82 @@ +Module heat.cluster.batchparallelclustering +=========================================== +Module implementing some clustering algorithms that work in parallel on batches of data. + +Variables +--------- + +`self` +: Auxiliary single-process functions and base class for batch-parallel k-clustering + +Classes +------- + +`BatchParallelKMeans(n_clusters: int = 8, init: str = 'k-means++', max_iter: int = 300, tol: float = 0.0001, random_state: int = None, n_procs_to_merge: int = None)` +: Batch-parallel K-Means clustering algorithm from Ref. [1]. + The input must be a ``DNDarray`` of shape `(n_samples, n_features)`, with split=0 (i.e. split along the sample axis). + This method performs K-Means clustering on each batch (i.e. on each process-local chunk) of data individually and in parallel. + After that, all centroids from the local K-Means are gathered and another instance of K-means is performed on them in order to determine the final centroids. + To improve scalability of this approach also on a large number of processes, this procedure can be applied in a hierarchical manner using the parameter `n_procs_to_merge`. + + Attributes + ---------- + n_clusters : int + The number of clusters to form as well as the number of centroids to generate. + init : str + Method for initialization for local and global k-means: + - ‘k-means++’ : selects initial cluster centers for the clustering in a smart way to speed up convergence [2]. + - ‘random’: choose k observations (rows) at random from data for the initial centroids. (Not implemented yet) + max_iter : int + Maximum number of iterations of the local/global k-means algorithms. + tol : float + Relative tolerance with regards to inertia to declare convergence, both for local and global k-means. + random_state : int + Determines random number generation for centroid initialization. + n_procs_to_merge : int + Number of processes to merge after each iteration of the local k-means. If None, all processes are merged after each iteration. + + + References + ---------- + [1] Rasim M. Alguliyev, Ramiz M. Aliguliyev, Lyudmila V. Sukhostat, Parallel batch k-means for Big data clustering, Computers & Industrial Engineering, Volume 152 (2021). https://doi.org/10.1016/j.cie.2020.107023. + + ### Ancestors (in MRO) + + * heat.cluster.batchparallelclustering._BatchParallelKCluster + * heat.core.base.ClusteringMixin + * heat.core.base.BaseEstimator + +`BatchParallelKMedians(n_clusters: int = 8, init: str = 'k-medians++', max_iter: int = 300, tol: float = 0.0001, random_state: int = None, n_procs_to_merge: int = None)` +: Batch-parallel K-Medians clustering algorithm, in analogy to the K-means algorithm from Ref. [1]. + This requires data to be given as DNDarray of shape (n_samples, n_features) with split=0 (i.e. split along the sample axis). + The idea of the method is to perform the classical K-Medians on each batch of data (i.e. on each process-local chunk of data) individually and in parallel. + After that, all centroids from the local K-Medians are gathered and another instance of K-Medians is performed on them in order to determine the final centroids. + To improve scalability of this approach also on a range number of processes, this procedure can be applied in a hierarchical manor using the parameter n_procs_to_merge. + + Attributes + ---------- + n_clusters : int + The number of clusters to form as well as the number of centroids to generate. + init : str + Method for initialization for local and global k-medians: + - ‘k-medians++’ : selects initial cluster centers for the clustering in a smart way to speed up convergence [2]. + - ‘random’: choose k observations (rows) at random from data for the initial centroids. (Not implemented yet) + max_iter : int + Maximum number of iterations of the local/global k-Medians algorithms. + tol : float + Relative tolerance with regards to inertia to declare convergence, both for local and global k-Medians. + random_state : int + Determines random number generation for centroid initialization. + n_procs_to_merge : int + Number of processes to merge after each iteration of the local k-Medians. If None, all processes are merged after each iteration. + + + References + ---------- + [1] Rasim M. Alguliyev, Ramiz M. Aliguliyev, Lyudmila V. Sukhostat, Parallel batch k-means for Big data clustering, Computers & Industrial Engineering, Volume 152 (2021). https://doi.org/10.1016/j.cie.2020.107023. + + ### Ancestors (in MRO) + + * heat.cluster.batchparallelclustering._BatchParallelKCluster + * heat.core.base.ClusteringMixin + * heat.core.base.BaseEstimator diff --git a/doc/api/heat/cluster/index.md b/doc/api/heat/cluster/index.md new file mode 100644 index 0000000000..4f26f04966 --- /dev/null +++ b/doc/api/heat/cluster/index.md @@ -0,0 +1,12 @@ +Module heat.cluster +=================== +Add the clustering functions to the ht.cluster namespace + +Sub-modules +----------- +* heat.cluster.batchparallelclustering +* heat.cluster.kmeans +* heat.cluster.kmedians +* heat.cluster.kmedoids +* heat.cluster.spectral +* heat.cluster.tests diff --git a/doc/api/heat/cluster/kmeans.md b/doc/api/heat/cluster/kmeans.md new file mode 100644 index 0000000000..3d1f1390c6 --- /dev/null +++ b/doc/api/heat/cluster/kmeans.md @@ -0,0 +1,69 @@ +Module heat.cluster.kmeans +========================== +Module Implementing the Kmeans Algorithm + +Classes +------- + +`KMeans(n_clusters: int = 8, init: str | heat.core.dndarray.DNDarray = 'random', max_iter: int = 300, tol: float = 0.0001, random_state: int | None = None)` +: K-Means clustering algorithm. An implementation of Lloyd's algorithm [1]. + + Attributes + ---------- + n_clusters : int + The number of clusters to form as well as the number of centroids to generate. + init : str or DNDarray + Method for initialization: + + - ‘k-means++’ : selects initial cluster centers for the clustering in a smart way to speed up convergence [2]. + - ‘random’: choose k observations (rows) at random from data for the initial centroids. + - 'batchparallel': initialize by using the batch parallel algorithm (see BatchParallelKMeans for more information). + - DNDarray: it should be of shape (n_clusters, n_features) and gives the initial centers. + max_iter : int + Maximum number of iterations of the k-means algorithm for a single run. + tol : float + Relative tolerance with regards to inertia to declare convergence. + random_state : int + Determines random number generation for centroid initialization. + + Notes + ----- + The average complexity is given by :math:`O(k \cdot n \cdot T)`, were n is the number of samples and :math:`T` is the number of iterations. + In practice, the k-means algorithm is very fast, but it may fall into local minima. That is why it can be useful + to restart it several times. If the algorithm stops before fully converging (because of ``tol`` or ``max_iter``), + ``labels_`` and ``cluster_centers_`` will not be consistent, i.e. the ``cluster_centers_`` will not be the means of the + points in each cluster. Also, the estimator will reassign ``labels_`` after the last iteration to make ``labels_`` + consistent with predict on the training set. + + References + ---------- + [1] Lloyd, Stuart P., "Least squares quantization in PCM", IEEE Transactions on Information Theory, 28 (2), pp. + 129–137, 1982. + + [2] Arthur, D., Vassilvitskii, S., "k-means++: The Advantages of Careful Seeding", Proceedings of the Eighteenth + Annual ACM-SIAM Symposium on Discrete Algorithms, Society for Industrial and Applied Mathematics + Philadelphia, PA, USA. pp. 1027–1035, 2007. + + ### Ancestors (in MRO) + + * heat.cluster._kcluster._KCluster + * heat.core.base.ClusteringMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, oversampling: float = 2, iter_multiplier: float = 1) ‑> ~self` + : Computes the centroid of a k-means clustering. Reduce the values of the parameters 'oversampling' + and 'iter_multiplier' to speed up the computation, if necessary. However, for too low values the + initialization of cluster centers might fail and raise a corresponding ValueError. + + Parameters + ---------- + x : DNDarray + Training instances to cluster. Shape = (n_samples, n_features) + + oversampling : float + oversampling factor used for the k-means|| initializiation of centroids + + iter_multiplier : float + factor that increases the number of iterations used in the initialization of centroids diff --git a/doc/api/heat/cluster/kmedians.md b/doc/api/heat/cluster/kmedians.md new file mode 100644 index 0000000000..9b16da831a --- /dev/null +++ b/doc/api/heat/cluster/kmedians.md @@ -0,0 +1,54 @@ +Module heat.cluster.kmedians +============================ +Module Implementing the Kmedians Algorithm + +Classes +------- + +`KMedians(n_clusters: int = 8, init: str | heat.core.dndarray.DNDarray = 'random', max_iter: int = 300, tol: float = 0.0001, random_state: int = None)` +: K-Medians clustering algorithm [1]. + Uses the Manhattan (City-block, :math:`L_1`) metric for distance calculations + + Parameters + ---------- + n_clusters : int, optional, default: 8 + The number of clusters to form as well as the number of centroids to generate. + init : str or DNDarray, default: ‘random’ + Method for initialization: + + - ‘k-medians++’ : selects initial cluster centers for the clustering in a smart way to speed up convergence [2]. + - ‘random’: choose k observations (rows) at random from data for the initial centroids. + - 'batchparallel': initialize by using the batch parallel algorithm (see BatchParallelKMedians for more information). + - DNDarray: gives the initial centers, should be of Shape = (n_clusters, n_features) + max_iter : int, default: 300 + Maximum number of iterations of the k-means algorithm for a single run. + tol : float, default: 1e-4 + Relative tolerance with regards to inertia to declare convergence. + random_state : int + Determines random number generation for centroid initialization. + + References + ---------- + [1] Hakimi, S., and O. Kariv. "An algorithmic approach to network location problems II: The p-medians." SIAM Journal on Applied Mathematics 37.3 (1979): 539-560. + + ### Ancestors (in MRO) + + * heat.cluster._kcluster._KCluster + * heat.core.base.ClusteringMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, oversampling: float = 2, iter_multiplier: float = 1)` + : Computes the centroid of a k-medians clustering. + + Parameters + ---------- + x : DNDarray + Training instances to cluster. Shape = (n_samples, n_features) + + oversampling : float + oversampling factor used in the k-means|| initializiation of centroids + + iter_multiplier : float + factor that increases the number of iterations used in the initialization of centroids diff --git a/doc/api/heat/cluster/kmedoids.md b/doc/api/heat/cluster/kmedoids.md new file mode 100644 index 0000000000..c7255be12f --- /dev/null +++ b/doc/api/heat/cluster/kmedoids.md @@ -0,0 +1,51 @@ +Module heat.cluster.kmedoids +============================ +Module Implementing the Kmedoids Algorithm + +Classes +------- + +`KMedoids(n_clusters: int = 8, init: str | heat.core.dndarray.DNDarray = 'random', max_iter: int = 300, random_state: int = None)` +: Kmedoids with the Manhattan distance as fixed metric, calculating the median of the assigned cluster points as new cluster center + and snapping the centroid to the the nearest datapoint afterwards. + This is not the original implementation of k-medoids using PAM as originally proposed by in [1]. + + Parameters + ---------- + n_clusters : int, optional, default: 8 + The number of clusters to form as well as the number of centroids to generate. + init : str or DNDarray, default: ‘random’ + Method for initialization: + + - ‘k-medoids++’ : selects initial cluster centers for the clustering in a smart way to speed up convergence [2]. + - ‘random’: choose k observations (rows) at random from data for the initial centroids. + - DNDarray: gives the initial centers, should be of Shape = (n_clusters, n_features) + max_iter : int, default: 300 + Maximum number of iterations of the algorithm for a single run. + random_state : int + Determines random number generation for centroid initialization. + + References + ---------- + [1] Kaufman, L. and Rousseeuw, P.J. (1987), Clustering by means of Medoids, in Statistical Data Analysis Based on the L1 Norm and Related Methods, edited by Y. Dodge, North-Holland, 405416. + + ### Ancestors (in MRO) + + * heat.cluster._kcluster._KCluster + * heat.core.base.ClusteringMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, oversampling: float = 2, iter_multiplier: float = 1)` + : Computes the centroid of a k-medoids clustering. + + Parameters + ---------- + x : DNDarray + Training instances to cluster. Shape = (n_samples, n_features) + oversampling : float + oversampling factor used in the k-means|| initializiation of centroids + + iter_multiplier : float + factor that increases the number of iterations used in the initialization of centroids diff --git a/doc/api/heat/cluster/spectral.md b/doc/api/heat/cluster/spectral.md new file mode 100644 index 0000000000..8f33331050 --- /dev/null +++ b/doc/api/heat/cluster/spectral.md @@ -0,0 +1,77 @@ +Module heat.cluster.spectral +============================ +Module for Spectral Clustering, a graph-based machine learning algorithm + +Classes +------- + +`Spectral(n_clusters: int = None, gamma: float = 1.0, metric: str = 'rbf', laplacian: str = 'fully_connected', threshold: float = 1.0, boundary: str = 'upper', n_lanczos: int = 300, assign_labels: str = 'kmeans', **params)` +: Spectral clustering + + Attributes + ---------- + n_clusters : int + Number of clusters to fit + gamma : float + Kernel coefficient sigma for 'rbf', ignored for metric='euclidean' + metric : string + How to construct the similarity matrix. + + - 'rbf' : construct the similarity matrix using a radial basis function (RBF) kernel. + - 'euclidean' : construct the similarity matrix as only euclidean distance. + laplacian : str + How to calculate the graph laplacian (affinity) + Currently supported : 'fully_connected', 'eNeighbour' + threshold : float + Threshold for affinity matrix if laplacian='eNeighbour' + Ignorded for laplacian='fully_connected' + boundary : str + How to interpret threshold: 'upper', 'lower' + Ignorded for laplacian='fully_connected' + n_lanczos : int + number of Lanczos iterations for Eigenvalue decomposition + assign_labels: str + The strategy to use to assign labels in the embedding space. + **params: dict + Parameter dictionary for the assign_labels estimator + + ### Ancestors (in MRO) + + * heat.core.base.ClusteringMixin + * heat.core.base.BaseEstimator + + ### Instance variables + + `labels_: heat.core.dndarray.DNDarray` + : Returns labels of each point. + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray)` + : Clusters dataset X via spectral embedding. + Computes the low-dim representation by calculation of eigenspectrum (eigenvalues and eigenvectors) of the graph + laplacian from the similarity matrix and fits the eigenvectors that correspond to the k lowest eigenvalues with + a seperate clustering algorithm (currently only kmeans is supported). Similarity metrics for adjacency + calculations are supported via spatial.distance. The eigenvalues and eigenvectors are computed by reducing the + Laplacian via lanczos iterations and using the torch eigenvalue solver on this smaller matrix. If other + eigenvalue decompostion methods are supported, this will be expanded. + + Parameters + ---------- + x : DNDarray + Training instances to cluster. Shape = (n_samples, n_features) + + `predict(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Return the label each sample in X belongs to. + X is transformed to the low-dim representation by calculation of eigenspectrum (eigenvalues and eigenvectors) of + the graph laplacian from the similarity matrix. Inference of lables is done by extraction of the closest + centroid of the n_clusters eigenvectors from the previously fitted clustering algorithm (kmeans). + + Parameters + ---------- + x : DNDarray + New data to predict. Shape = (n_samples, n_features) + + Warning + ------- + Caution: Calculation of the low-dim representation requires some time! diff --git a/doc/api/heat/cluster/tests/index.md b/doc/api/heat/cluster/tests/index.md new file mode 100644 index 0000000000..d0971c033a --- /dev/null +++ b/doc/api/heat/cluster/tests/index.md @@ -0,0 +1,10 @@ +Module heat.cluster.tests +========================= + +Sub-modules +----------- +* heat.cluster.tests.test_batchparallelclustering +* heat.cluster.tests.test_kmeans +* heat.cluster.tests.test_kmedians +* heat.cluster.tests.test_kmedoids +* heat.cluster.tests.test_spectral diff --git a/doc/api/heat/cluster/tests/test_batchparallelclustering.md b/doc/api/heat/cluster/tests/test_batchparallelclustering.md new file mode 100644 index 0000000000..0924c0b6dd --- /dev/null +++ b/doc/api/heat/cluster/tests/test_batchparallelclustering.md @@ -0,0 +1,112 @@ +Module heat.cluster.tests.test_batchparallelclustering +====================================================== + +Classes +------- + +`TestAuxiliaryFunctions(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_BatchParallelKClustering(self)` + : + + `test_initialize_plus_plus(self)` + : + + `test_kmex(self)` + : + +`TestBatchParallelKCluster(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_clusterer(self)` + : + + `test_get_and_set_params(self)` + : + + `test_if_errors_thrown(self)` + : + + `test_spherical_clusters(self)` + : diff --git a/doc/api/heat/cluster/tests/test_kmeans.md b/doc/api/heat/cluster/tests/test_kmeans.md new file mode 100644 index 0000000000..22af444f76 --- /dev/null +++ b/doc/api/heat/cluster/tests/test_kmeans.md @@ -0,0 +1,63 @@ +Module heat.cluster.tests.test_kmeans +===================================== + +Classes +------- + +`TestKMeans(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_clusterer(self)` + : + + `test_exceptions(self)` + : + + `test_fit_iris_unsplit(self)` + : + + `test_get_and_set_params(self)` + : + + `test_spherical_clusters(self)` + : diff --git a/doc/api/heat/cluster/tests/test_kmedians.md b/doc/api/heat/cluster/tests/test_kmedians.md new file mode 100644 index 0000000000..ff92eac757 --- /dev/null +++ b/doc/api/heat/cluster/tests/test_kmedians.md @@ -0,0 +1,63 @@ +Module heat.cluster.tests.test_kmedians +======================================= + +Classes +------- + +`TestKMedians(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_clusterer(self)` + : + + `test_exceptions(self)` + : + + `test_fit_iris_unsplit(self)` + : + + `test_get_and_set_params(self)` + : + + `test_spherical_clusters(self)` + : diff --git a/doc/api/heat/cluster/tests/test_kmedoids.md b/doc/api/heat/cluster/tests/test_kmedoids.md new file mode 100644 index 0000000000..6b215b2a76 --- /dev/null +++ b/doc/api/heat/cluster/tests/test_kmedoids.md @@ -0,0 +1,63 @@ +Module heat.cluster.tests.test_kmedoids +======================================= + +Classes +------- + +`TestKMeans(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_clusterer(self)` + : + + `test_exceptions(self)` + : + + `test_fit_iris_unsplit(self)` + : + + `test_get_and_set_params(self)` + : + + `test_spherical_clusters(self)` + : diff --git a/doc/api/heat/cluster/tests/test_spectral.md b/doc/api/heat/cluster/tests/test_spectral.md new file mode 100644 index 0000000000..ca54a11beb --- /dev/null +++ b/doc/api/heat/cluster/tests/test_spectral.md @@ -0,0 +1,57 @@ +Module heat.cluster.tests.test_spectral +======================================= + +Classes +------- + +`TestSpectral(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_clusterer(self)` + : + + `test_fit_iris(self)` + : + + `test_get_and_set_params(self)` + : diff --git a/doc/api/heat/core/arithmetics.md b/doc/api/heat/core/arithmetics.md new file mode 100644 index 0000000000..cef16e7ad4 --- /dev/null +++ b/doc/api/heat/core/arithmetics.md @@ -0,0 +1,1209 @@ +Module heat.core.arithmetics +============================ +Arithmetic functions for DNDarrays + +Functions +--------- + +`add(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise addition of values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose + elements are to be added as argument and returns a ``DNDarray`` containing the results of + element-wise addition of ``t1`` and ``t2``. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand involved in the addition + t2: DNDarray or scalar + The second operand involved in the addition + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` array + will be set to the added value. Elsewhere, the `out` array will retain its original value. If + an uninitialized `out` array is created via the default `out=None`, locations within it where the + condition is False will remain uninitialized. If distributed, the split axis (after broadcasting + if required) must match that of the `out` array. + + Examples + -------- + >>> import heat as ht + >>> ht.add(1.0, 4.0) + DNDarray(5., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.add(T1, T2) + DNDarray([[3., 4.], + [5., 6.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.add(T1, s) + DNDarray([[3., 4.], + [5., 6.]], dtype=ht.float32, device=cpu:0, split=None) + +`bitwise_and(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Compute the bitwise AND of two :class:`~heat.core.dndarray.DNDarray` ``t1`` and ``t2`` + element-wise. Only integer and boolean types are handled. If ``t1.shape!=t2.shape``, they must + be broadcastable to a common shape (which becomes the shape of the output) + + Parameters + ---------- + t1: DNDarray or scalar + The first operand involved in the operation + t2: DNDarray or scalar + The second operand involved in the operation + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the added value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.bitwise_and(13, 17) + DNDarray(1, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_and(14, 13) + DNDarray(12, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_and(ht.array([14, 3]), 13) + DNDarray([12, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_and(ht.array([11, 7]), ht.array([4, 25])) + DNDarray([0, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_and(ht.array([2, 5, 255]), ht.array([3, 14, 16])) + DNDarray([ 2, 4, 16], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_and(ht.array([True, True]), ht.array([False, True])) + DNDarray([False, True], dtype=ht.bool, device=cpu:0, split=None) + +`bitwise_not(a: DNDarray, /, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Computes the bitwise NOT of the given input :class:`~heat.core.dndarray.DNDarray`. The input + array must be of integral or Boolean types. For boolean arrays, it computes the logical NOT. + Bitwise_not is an alias for invert. + + Parameters + ---------- + a: DNDarray + The input array to invert. Must be of integral or Boolean types + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output. The dtype of the output will be the one of the input array, unless it is + logical, in which case it will be casted to int8. If not provided or None, a freshly- + allocated array is returned. + + Examples + -------- + >>> ht.invert(ht.array([13], dtype=ht.uint8)) + DNDarray([242], dtype=ht.uint8, device=cpu:0, split=None) + >>> ht.bitwise_not(ht.array([-1, -2, 3], dtype=ht.int8)) + DNDarray([ 0, 1, -4], dtype=ht.int8, device=cpu:0, split=None) + +`bitwise_or(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Compute the bit-wise OR of two :class:`~heat.core.dndarray.DNDarray` ``t1`` and ``t2`` + element-wise. Only integer and boolean types are handled. If ``t1.shape!=t2.shape``, they must + be broadcastable to a common shape (which becomes the shape of the output) + + Parameters + ---------- + t1: DNDarray or scalar + The first operand involved in the operation + t2: DNDarray or scalar + The second operand involved in the operation + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the added value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.bitwise_or(13, 16) + DNDarray(29, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_or(32, 2) + DNDarray(34, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_or(ht.array([33, 4]), 1) + DNDarray([33, 5], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_or(ht.array([33, 4]), ht.array([1, 2])) + DNDarray([33, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_or(ht.array([2, 5, 255]), ht.array([4, 4, 4])) + DNDarray([ 6, 5, 255], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_or(ht.array([2, 5, 255, 2147483647], dtype=ht.int32), + ht.array([4, 4, 4, 2147483647], dtype=ht.int32)) + DNDarray([ 6, 5, 255, 2147483647], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.bitwise_or(ht.array([True, True]), ht.array([False, True])) + DNDarray([True, True], dtype=ht.bool, device=cpu:0, split=None) + +`bitwise_xor(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Compute the bit-wise XOR of two arrays ``t1`` and ``t2`` element-wise. + Only integer and boolean types are handled. If ``x1.shape!=x2.shape``, they must be + broadcastable to a common shape (which becomes the shape of the output). + + Parameters + ---------- + t1: DNDarray or scalar + The first operand involved in the operation + t2: DNDarray or scalar + The second operand involved in the operation + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the added value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.bitwise_xor(13, 17) + DNDarray(28, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_xor(31, 5) + DNDarray(26, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_xor(ht.array([31, 3]), 5) + DNDarray([26, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_xor(ht.array([31, 3]), ht.array([5, 6])) + DNDarray([26, 5], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bitwise_xor(ht.array([True, True]), ht.array([False, True])) + DNDarray([ True, False], dtype=ht.bool, device=cpu:0, split=None) + +`copysign(a: DNDarray, b: Union[DNDarray, float, int], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Create a new floating-point tensor with the magnitude of 'a' and the sign of 'b', element-wise + + Parameters + ---------- + a: DNDarray + The input array + b: DNDarray or Number + value(s) whose signbit(s) are applied to the magnitudes in 'a' + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.copysign(ht.array([3, 2, -8, -2, 4]), 1) + DNDarray([3, 2, 8, 2, 4], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.copysign(ht.array([3.0, 2.0, -8.0, -2.0, 4.0]), ht.array([1.0, -1.0, 1.0, -1.0, 1.0])) + DNDarray([ 3., -2., 8., -2., 4.], dtype=ht.float32, device=cpu:0, split=None) + +`cumprod(a: DNDarray, axis: int, dtype: datatype = None, out=None) ‑> heat.core.dndarray.DNDarray` +: Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : DNDarray + Input array. + axis : int + Axis along which the cumulative product is computed. + dtype : datatype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the datatype of ``a``, unless ``a`` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : DNDarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Examples + -------- + >>> a = ht.full((3, 3), 2) + >>> ht.cumprod(a, 0) + DNDarray([[2., 2., 2.], + [4., 4., 4.], + [8., 8., 8.]], dtype=ht.float32, device=cpu:0, split=None) + +`cumproduct(a: DNDarray, axis: int, dtype: datatype = None, out=None) ‑> heat.core.dndarray.DNDarray` +: Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : DNDarray + Input array. + axis : int + Axis along which the cumulative product is computed. + dtype : datatype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the datatype of ``a``, unless ``a`` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : DNDarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Examples + -------- + >>> a = ht.full((3, 3), 2) + >>> ht.cumprod(a, 0) + DNDarray([[2., 2., 2.], + [4., 4., 4.], + [8., 8., 8.]], dtype=ht.float32, device=cpu:0, split=None) + +`cumsum(a: DNDarray, axis: int, dtype: datatype = None, out=None) ‑> heat.core.dndarray.DNDarray` +: Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : DNDarray + Input array. + axis : int + Axis along which the cumulative sum is computed. + dtype : datatype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the datatype of ``a``, unless ``a`` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : DNDarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + + Examples + -------- + >>> a = ht.ones((3, 3)) + >>> ht.cumsum(a, 0) + DNDarray([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]], dtype=ht.float32, device=cpu:0, split=None) + +`diff(a: DNDarray, n: int = 1, axis: int = -1, prepend: Union[int, float, DNDarray] = None, append: Union[int, float, DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the n-th discrete difference along the given axis. + The first difference is given by ``out[i]=a[i+1]-a[i]`` along the given axis, higher differences + are calculated by using diff recursively. The shape of the output is the same as ``a`` except + along axis where the dimension is smaller by ``n``. The datatype of the output is the same as + the datatype of the difference between any two elements of ``a``. The split does not change. The + output array is balanced. + + Parameters + ---------- + a : DNDarray + Input array + n : int, optional + The number of times values are differenced. If zero, the input is returned as-is. + ``n=2`` is equivalent to ``diff(diff(a))`` + axis : int, optional + The axis along which the difference is taken, default is the last axis. + prepend : Union[int, float, DNDarray] + Value to prepend along axis prior to performing the difference. + Scalar values are expanded to arrays with length 1 in the direction of axis and + the shape of the input array in along all other axes. Otherwise the dimension and + shape must match a except along axis. + append : Union[int, float, DNDarray] + Values to append along axis prior to performing the difference. + Scalar values are expanded to arrays with length 1 in the direction of axis and + the shape of the input array in along all other axes. Otherwise the dimension and + shape must match a except along axis. + +`div(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise true division of values of operand ``t1`` by values of operands ``t2`` (i.e ``t1/t2``). + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided. + t2: DNDarray or scalar + The second operand by whose values is divided. + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` array + will be set to the divided value. Elsewhere, the `out` array will retain its original value. If + an uninitialized `out` array is created via the default `out=None`, locations within it where the + condition is False will remain uninitialized. If distributed, the split axis (after broadcasting + if required) must match that of the `out` array. + + Example + --------- + >>> ht.div(2.0, 2.0) + DNDarray(1., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.div(T1, T2) + DNDarray([[0.5000, 1.0000], + [1.5000, 2.0000]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.div(s, T1) + DNDarray([[2.0000, 1.0000], + [0.6667, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + +`divide(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise true division of values of operand ``t1`` by values of operands ``t2`` (i.e ``t1/t2``). + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided. + t2: DNDarray or scalar + The second operand by whose values is divided. + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` array + will be set to the divided value. Elsewhere, the `out` array will retain its original value. If + an uninitialized `out` array is created via the default `out=None`, locations within it where the + condition is False will remain uninitialized. If distributed, the split axis (after broadcasting + if required) must match that of the `out` array. + + Example + --------- + >>> ht.div(2.0, 2.0) + DNDarray(1., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.div(T1, T2) + DNDarray([[0.5000, 1.0000], + [1.5000, 2.0000]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.div(s, T1) + DNDarray([[2.0000, 1.0000], + [0.6667, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + +`divmod(t1: Union[DNDarray, float], t2: Union[DNDarray, float], out1: DNDarray = None, out2: DNDarray = None, /, out: Tuple[DNDarray, DNDarray] = (None, None), *, where: Union[bool, DNDarray] = True) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Element-wise division remainder and quotient from an integer division of values of operand + ``t1`` by values of operand ``t2`` (i.e. C Library function divmod). Result has the sign as the + dividend ``t1``. Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided (may be floats) + t2: DNDarray or scalar + The second operand by whose values is divided (may be floats) + out1: DNDarray, optional + The output array for the quotient. It must have a shape that the inputs broadcast to and + matching split axis. + If not provided, a freshly allocated array is returned. If provided, it must be of the same + shape as the expected output. Only one of out1 and out can be provided. + out2: DNDarray, optional + The output array for the remainder. It must have a shape that the inputs broadcast to and + matching split axis. + If not provided, a freshly allocated array is returned. If provided, it must be of the same + shape as the expected output. Only one of out2 and out can be provided. + out: tuple of two DNDarrays, optional + Tuple of two output arrays (quotient, remainder), respectively. Both must have a shape that + the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. If provided, they must be of the + same shape as the expected output. out1 and out2 cannot be used at the same time. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out1` + array will be set to the quotient value and the `out2` array will be set to the remainder + value. Elsewhere, the `out1` and `out2` arrays will retain their original value. If an + uninitialized `out1` and `out2` array is created via the default `out1=None` and + `out2=None`, locations within them where the condition is False will remain uninitialized. + If distributed, the split axis (after broadcasting if required) must match that of the + `out1` and `out2` arrays. + + Examples + -------- + >>> ht.divmod(2.0, 2.0) + (DNDarray(1., dtype=ht.float32, device=cpu:0, split=None), DNDarray(0., dtype=ht.float32, device=cpu:0, split=None)) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.divmod(T1, T2) + (DNDarray([[0., 1.], + [1., 2.]], dtype=ht.float32, device=cpu:0, split=None), DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None)) + >>> s = 2.0 + >>> ht.divmod(s, T1) + (DNDarray([[2., 1.], + [0., 0.]], dtype=ht.float32, device=cpu:0, split=None), DNDarray([[0., 0.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None)) + +`floor_divide(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise floor division of value(s) of operand ``t1`` by value(s) of operand ``t2`` + (i.e. ``t1//t2``), not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> T1 = ht.float32([[1.7, 2.0], [1.9, 4.2]]) + >>> ht.floordiv(T1, 1) + DNDarray([[1., 2.], + [1., 4.]], dtype=ht.float64, device=cpu:0, split=None) + >>> T2 = ht.float32([1.5, 2.5]) + >>> ht.floordiv(T1, T2) + DNDarray([[1., 0.], + [1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + +`floordiv(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise floor division of value(s) of operand ``t1`` by value(s) of operand ``t2`` + (i.e. ``t1//t2``), not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> T1 = ht.float32([[1.7, 2.0], [1.9, 4.2]]) + >>> ht.floordiv(T1, 1) + DNDarray([[1., 2.], + [1., 4.]], dtype=ht.float64, device=cpu:0, split=None) + >>> T2 = ht.float32([1.5, 2.5]) + >>> ht.floordiv(T1, T2) + DNDarray([[1., 0.], + [1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + +`fmod(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise division remainder of values of operand ``t1`` by values of operand ``t2`` (i.e. + C Library function fmod). + Result has the sign as the dividend ``t1``. Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided (may be floats) + t2: DNDarray or scalar + The second operand by whose values is divided (may be floats) + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. If provided, it must be of the same + shape as the expected output. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.fmod(2.0, 2.0) + DNDarray(0., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.fmod(T1, T2) + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.fmod(s, T1) + DNDarray([[0., 0.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + +`gcd(a: DNDarray, b: DNDarray, /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Returns the greatest common divisor of |a| and |b| element-wise. + + Parameters + ---------- + a: DNDarray + The first input array, must be of integer type + b: DNDarray + the second input array, must be of integer type + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.int(ht.ones(3)) * 9 + >>> T2 = ht.arange(3) + 1 + >>> ht.gcd(T1, T2) + DNDarray([1, 1, 3], dtype=ht.int32, device=cpu:0, split=None) + +`hypot(t1: DNDarray, t2: DNDarray, /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Given the 'legs' of a right triangle, return its hypotenuse. Equivalent to + :math:`sqrt(a^2 + b^2)`, element-wise. + + Parameters + ---------- + t1: DNDarray + The first input array + t2: DNDarray + the second input array + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> a = ht.array([2.0]) + >>> b = ht.array([1.0, 3.0, 3.0]) + >>> ht.hypot(a, b) + DNDarray([2.2361, 3.6056, 3.6056], dtype=ht.float32, device=cpu:0, split=None) + +`invert(a: DNDarray, /, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Computes the bitwise NOT of the given input :class:`~heat.core.dndarray.DNDarray`. The input + array must be of integral or Boolean types. For boolean arrays, it computes the logical NOT. + Bitwise_not is an alias for invert. + + Parameters + ---------- + a: DNDarray + The input array to invert. Must be of integral or Boolean types + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output. The dtype of the output will be the one of the input array, unless it is + logical, in which case it will be casted to int8. If not provided or None, a freshly- + allocated array is returned. + + Examples + -------- + >>> ht.invert(ht.array([13], dtype=ht.uint8)) + DNDarray([242], dtype=ht.uint8, device=cpu:0, split=None) + >>> ht.bitwise_not(ht.array([-1, -2, 3], dtype=ht.int8)) + DNDarray([ 0, 1, -4], dtype=ht.int8, device=cpu:0, split=None) + +`lcm(a: DNDarray, b: DNDarray, /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Returns the lowest common multiple of |a| and |b| element-wise. + + Parameters + ---------- + a: DNDarray or scalar + The first input (array), must be of integer type + b: DNDarray or scalar + the second input (array), must be of integer type + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> a = ht.array([6, 12, 15]) + >>> b = ht.array([3, 4, 5]) + >>> ht.lcm(a, b) + DNDarray([ 6, 12, 15], dtype=ht.int64, device=cpu:0, split=None) + >>> s = 2 + >>> ht.lcm(s, a) + DNDarray([ 6, 12, 30], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.lcm(b, s) + DNDarray([ 6, 4, 10], dtype=ht.int64, device=cpu:0, split=None) + +`left_shift(t1: DNDarray, t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Shift the bits of an integer to the left. + + Parameters + ---------- + t1: DNDarray + Input array + t2: DNDarray or float + Integer number of zero bits to add + out: DNDarray, optional + Output array for the result. Must have the same shape as the expected output. The dtype of + the output will be the one of the input array, unless it is logical, in which case it will + be casted to int8. If not provided or None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the shifted value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.left_shift(ht.array([1, 2, 3]), 1) + DNDarray([2, 4, 6], dtype=ht.int64, device=cpu:0, split=None) + +`mod(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise division remainder of values of operand ``t1`` by values of operand ``t2`` (i.e. + ``t1%t2``). Result has the same sign as the divisor ``t2``. + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.remainder(2, 2) + DNDarray(0, dtype=ht.int64, device=cpu:0, split=None) + >>> T1 = ht.int32([[1, 2], [3, 4]]) + >>> T2 = ht.int32([[2, 2], [2, 2]]) + >>> ht.remainder(T1, T2) + DNDarray([[1, 0], + [1, 0]], dtype=ht.int32, device=cpu:0, split=None) + >>> s = 2 + >>> ht.remainder(s, T1) + DNDarray([[0, 0], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + +`mul(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise multiplication (NOT matrix multiplication) of values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be + multiplied as argument. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand involved in the multiplication + t2: DNDarray or scalar + The second operand involved in the multiplication + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. If not provided or + None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` array + will be set to the multiplied value. Elsewhere, the `out` array will retain its original value. If + an uninitialized `out` array is created via the default `out=None`, locations within it where the + condition is False will remain uninitialized. If distributed, the split axis (after broadcasting + if required) must match that of the `out` array. + + Examples + -------- + >>> ht.mul(2.0, 4.0) + DNDarray(8., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> s = 3.0 + >>> ht.mul(T1, s) + DNDarray([[ 3., 6.], + [ 9., 12.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.mul(T1, T2) + DNDarray([[2., 4.], + [6., 8.]], dtype=ht.float32, device=cpu:0, split=None) + +`multiply(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise multiplication (NOT matrix multiplication) of values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be + multiplied as argument. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand involved in the multiplication + t2: DNDarray or scalar + The second operand involved in the multiplication + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. If not provided or + None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` array + will be set to the multiplied value. Elsewhere, the `out` array will retain its original value. If + an uninitialized `out` array is created via the default `out=None`, locations within it where the + condition is False will remain uninitialized. If distributed, the split axis (after broadcasting + if required) must match that of the `out` array. + + Examples + -------- + >>> ht.mul(2.0, 4.0) + DNDarray(8., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> s = 3.0 + >>> ht.mul(T1, s) + DNDarray([[ 3., 6.], + [ 9., 12.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.mul(T1, T2) + DNDarray([[2., 4.], + [6., 8.]], dtype=ht.float32, device=cpu:0, split=None) + +`nan_to_num(a: DNDarray, nan: float = 0.0, posinf: float = None, neginf: float = None, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Replaces NaNs, positive infinity values, and negative infinity values in the input 'a' with the + values specified by nan, posinf, and neginf, respectively. By default, NaNs are replaced with + zero, positive infinity is replaced with the greatest finite value representable by input's + dtype, and negative infinity is replaced with the least finite value representable by input's + dtype. + + Parameters + ---------- + a : DNDarray + Input array. + nan : float, optional + Value to be used to replace NaNs. Default value is 0.0. + posinf : float, optional + Value to replace positive infinity values with. If None, positive infinity values are + replaced with the greatest finite value of the input's dtype. Default value is None. + neginf : float, optional + Value to replace negative infinity values with. If None, negative infinity values are + replaced with the greatest negative finite value of the input's dtype. Default value is + None. + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output, but the datatype of the output values will be cast if necessary. + + Examples + -------- + >>> x = ht.array([float("nan"), float("inf"), -float("inf")]) + >>> ht.nan_to_num(x) + DNDarray([ 0.0000e+00, 3.4028e+38, -3.4028e+38], dtype=ht.float32, device=cpu:0, split=None) + +`nanprod(a: DNDarray, axis: Union[int, Tuple[int, ...]] = None, out: DNDarray = None, keepdims: bool = None) ‑> heat.core.dndarray.DNDarray` +: Return the product of array elements over a given axis treating Not a Numbers (NaNs) as one. + + Parameters + ---------- + a : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which a product is performed. The default, ``axis=None``, will calculate + the product of all the elements in the input array. If axis is negative it counts from the + last to the first axis. + If axis is a tuple of ints, a product is performed on all of the axes specified in the tuple + instead of a single axis or all the axes as before. + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output, but the datatype of the output values will be cast if necessary. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions + with size one. With this option, the result will broadcast correctly against the input array. + + Examples + -------- + >>> ht.nanprod(ht.array([4.0, ht.nan])) + DNDarray(4., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.nanprod(ht.array([ + [1.,ht.nan], + [3.,4.]])) + DNDarray(12., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.nanprod(ht.array([ + [1.,ht.nan], + [ht.nan,4.] + ]), axis=1) + DNDarray([ 1., 4.], dtype=ht.float32, device=cpu:0, split=None) + +`nansum(a: DNDarray, axis: Union[int, Tuple[int, ...]] = None, out: DNDarray = None, keepdims: bool = None) ‑> heat.core.dndarray.DNDarray` +: Sum of array elements over a given axis treating Not a Numbers (NaNs) as zero. An array with the + same shape as ``self.__array`` except for the specified axis which becomes one, e.g. + ``a.shape=(1, 2, 3)`` => ``ht.ones((1, 2, 3)).sum(axis=1).shape=(1, 1, 3)`` + + Parameters + ---------- + a : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis along which a sum is performed. The default, ``axis=None``, will sum all of the + elements of the input array. If ``axis`` is negative it counts from the last to the first + axis. If ``axis`` is a tuple of ints, a sum is performed on all of the axes specified in the + tuple instead of a single axis or all the axes as before. + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output, but the datatype of the output values will be cast if necessary. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions + with size one. With this option, the result will broadcast correctly against the input + array. + + Examples + -------- + >>> ht.sum(ht.ones(2)) + DNDarray(2., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sum(ht.ones((3, 3))) + DNDarray(9., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sum(ht.ones((3, 3)).astype(ht.int)) + DNDarray(9, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.sum(ht.ones((3, 2, 1)), axis=-3) + DNDarray([[3.], + [3.]], dtype=ht.float32, device=cpu:0, split=None) + +`neg(a: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Element-wise negation of `a`. + + Parameters + ---------- + a: DNDarray + The input array. + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to + + Examples + -------- + >>> ht.neg(ht.array([-1, 1])) + DNDarray([ 1, -1], dtype=ht.int64, device=cpu:0, split=None) + >>> -ht.array([-1.0, 1.0]) + DNDarray([ 1., -1.], dtype=ht.float32, device=cpu:0, split=None) + +`negative(a: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Element-wise negation of `a`. + + Parameters + ---------- + a: DNDarray + The input array. + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to + + Examples + -------- + >>> ht.neg(ht.array([-1, 1])) + DNDarray([ 1, -1], dtype=ht.int64, device=cpu:0, split=None) + >>> -ht.array([-1.0, 1.0]) + DNDarray([ 1., -1.], dtype=ht.float32, device=cpu:0, split=None) + +`pos(a: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Element-wise positive of `a`. + + Parameters + ---------- + a: DNDarray + The input array. + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to. + + Notes + ----- + Equivalent to a.copy(). + + Examples + -------- + >>> ht.pos(ht.array([-1, 1])) + DNDarray([-1, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> +ht.array([-1.0, 1.0]) + DNDarray([-1., 1.], dtype=ht.float32, device=cpu:0, split=None) + +`positive(a: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Element-wise positive of `a`. + + Parameters + ---------- + a: DNDarray + The input array. + out: DNDarray, optional + The output array. It must have a shape that the inputs broadcast to. + + Notes + ----- + Equivalent to a.copy(). + + Examples + -------- + >>> ht.pos(ht.array([-1, 1])) + DNDarray([-1, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> +ht.array([-1.0, 1.0]) + DNDarray([-1., 1.], dtype=ht.float32, device=cpu:0, split=None) + +`pow(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise power function of values of operand ``t1`` to the power of values of operand + ``t2`` (i.e ``t1**t2``). + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values represent the base + t2: DNDarray or scalar + The second operand whose values represent the exponent + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. If + not provided or None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the exponentiated value. Elsewhere, the `out` array will retain its + original value. If an uninitialized `out` array is created via the default `out=None`, + locations within it where the condition is False will remain uninitialized. If distributed, + the split axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.pow(3.0, 2.0) + DNDarray(9., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[3, 3], [2, 2]]) + >>> ht.pow(T1, T2) + DNDarray([[ 1., 8.], + [ 9., 16.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 3.0 + >>> ht.pow(T1, s) + DNDarray([[ 1., 8.], + [27., 64.]], dtype=ht.float32, device=cpu:0, split=None) + +`power(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise power function of values of operand ``t1`` to the power of values of operand + ``t2`` (i.e ``t1**t2``). + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values represent the base + t2: DNDarray or scalar + The second operand whose values represent the exponent + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. If + not provided or None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the exponentiated value. Elsewhere, the `out` array will retain its + original value. If an uninitialized `out` array is created via the default `out=None`, + locations within it where the condition is False will remain uninitialized. If distributed, + the split axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.pow(3.0, 2.0) + DNDarray(9., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[3, 3], [2, 2]]) + >>> ht.pow(T1, T2) + DNDarray([[ 1., 8.], + [ 9., 16.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 3.0 + >>> ht.pow(T1, s) + DNDarray([[ 1., 8.], + [27., 64.]], dtype=ht.float32, device=cpu:0, split=None) + +`prod(a: DNDarray, axis: Union[int, Tuple[int, ...]] = None, out: DNDarray = None, keepdims: bool = None) ‑> heat.core.dndarray.DNDarray` +: Return the product of array elements over a given axis in form of a DNDarray shaped as a but + with the specified axis removed. + + Parameters + ---------- + a : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which a product is performed. The default, ``axis=None``, will calculate + the product of all the elements in the input array. If axis is negative it counts from the + last to the first axis. If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the axes as before. + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output, but the datatype of the output values will be cast if necessary. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions + with size one. With this option, the result will broadcast correctly against the input + array. + + Examples + -------- + >>> ht.prod(ht.array([1.0, 2.0])) + DNDarray(2., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.prod(ht.array([ + [1.,2.], + [3.,4.]])) + DNDarray(24., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.prod(ht.array([ + [1.,2.], + [3.,4.] + ]), axis=1) + DNDarray([ 2., 12.], dtype=ht.float32, device=cpu:0, split=None) + +`remainder(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise division remainder of values of operand ``t1`` by values of operand ``t2`` (i.e. + ``t1%t2``). Result has the same sign as the divisor ``t2``. + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. + If not provided, a freshly allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the divided value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.remainder(2, 2) + DNDarray(0, dtype=ht.int64, device=cpu:0, split=None) + >>> T1 = ht.int32([[1, 2], [3, 4]]) + >>> T2 = ht.int32([[2, 2], [2, 2]]) + >>> ht.remainder(T1, T2) + DNDarray([[1, 0], + [1, 0]], dtype=ht.int32, device=cpu:0, split=None) + >>> s = 2 + >>> ht.remainder(s, T1) + DNDarray([[0, 0], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + +`right_shift(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Shift the bits of an integer to the right. + + Parameters + ---------- + t1: DNDarray or scalar + Input array + t2: DNDarray or scalar + Integer number of bits to remove + out: DNDarray, optional + Output array for the result. Must have the same shape as the expected output. The dtype of + the output will be the one of the input array, unless it is logical, in which case it will + be casted to int8. If not provided or None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the shifted value. Elsewhere, the `out` array will retain its original + value. If an uninitialized `out` array is created via the default `out=None`, locations + within it where the condition is False will remain uninitialized. If distributed, the split + axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.right_shift(ht.array([1, 2, 3]), 1) + DNDarray([0, 1, 1], dtype=ht.int64, device=cpu:0, split=None) + +`sub(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise subtraction of values of operand ``t2`` from values of operands ``t1`` (i.e + ``t1-t2``). + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand from which values are subtracted + t2: DNDarray or scalar + The second operand whose values are subtracted + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. If + not provided or None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the subtracted value. Elsewhere, the `out` array will retain its + original value. If an uninitialized `out` array is created via the default `out=None`, + locations within it where the condition is False will remain uninitialized. If distributed, + the split axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.sub(4.0, 1.0) + DNDarray(3., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.sub(T1, T2) + DNDarray([[-1., 0.], + [ 1., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.sub(s, T1) + DNDarray([[ 1., 0.], + [-1., -2.]], dtype=ht.float32, device=cpu:0, split=None) + +`subtract(t1: Union[DNDarray, float], t2: Union[DNDarray, float], /, out: Optional[DNDarray] = None, *, where: Union[bool, DNDarray] = True) ‑> heat.core.dndarray.DNDarray` +: Element-wise subtraction of values of operand ``t2`` from values of operands ``t1`` (i.e + ``t1-t2``). + Operation is not commutative. + + Parameters + ---------- + t1: DNDarray or scalar + The first operand from which values are subtracted + t2: DNDarray or scalar + The second operand whose values are subtracted + out: DNDarray, optional + Output array. It must have a shape that the inputs broadcast to and matching split axis. If + not provided or None, a freshly-allocated array is returned. + where: DNDarray, optional + Condition to broadcast over the inputs. At locations where the condition is True, the `out` + array will be set to the subtracted value. Elsewhere, the `out` array will retain its + original value. If an uninitialized `out` array is created via the default `out=None`, + locations within it where the condition is False will remain uninitialized. If distributed, + the split axis (after broadcasting if required) must match that of the `out` array. + + Examples + -------- + >>> ht.sub(4.0, 1.0) + DNDarray(3., dtype=ht.float32, device=cpu:0, split=None) + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> ht.sub(T1, T2) + DNDarray([[-1., 0.], + [ 1., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.sub(s, T1) + DNDarray([[ 1., 0.], + [-1., -2.]], dtype=ht.float32, device=cpu:0, split=None) + +`sum(a: DNDarray, axis: Union[int, Tuple[int, ...]] = None, out: DNDarray = None, keepdims: bool = None) ‑> heat.core.dndarray.DNDarray` +: Sum of array elements over a given axis. An array with the same shape as ``self.__array`` except + for the specified axis which becomes one, e.g. + ``a.shape=(1, 2, 3)`` => ``ht.ones((1, 2, 3)).sum(axis=1).shape=(1, 1, 3)`` + + Parameters + ---------- + a : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis along which a sum is performed. The default, ``axis=None``, will sum all of the + elements of the input array. If ``axis`` is negative it counts from the last to the first + axis. If ``axis`` is a tuple of ints, a sum is performed on all of the axes specified in the + tuple instead of a single axis or all the axes as before. + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output, but the datatype of the output values will be cast if necessary. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions + with size one. With this option, the result will broadcast correctly against the input + array. + + Examples + -------- + >>> ht.sum(ht.ones(2)) + DNDarray(2., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sum(ht.ones((3, 3))) + DNDarray(9., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sum(ht.ones((3, 3)).astype(ht.int)) + DNDarray(9, dtype=ht.int64, device=cpu:0, split=None) + >>> ht.sum(ht.ones((3, 2, 1)), axis=-3) + DNDarray([[3.], + [3.]], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/base.md b/doc/api/heat/core/base.md new file mode 100644 index 0000000000..a8e97cf445 --- /dev/null +++ b/doc/api/heat/core/base.md @@ -0,0 +1,238 @@ +Module heat.core.base +===================== +Provides mixins for high-level algorithms, e.g. classifiers or clustering algorithms. + +Functions +--------- + +`is_classifier(estimator: object) ‑> bool` +: Return ``True`` if the given estimator is a classifier, ``False`` otherwise. + + Parameters + ---------- + estimator : object + Estimator object to test. + +`is_clusterer(estimator: object) ‑> bool` +: Return ``True`` if the given estimator is a clusterer, ``False`` otherwise. + + Parameters + ---------- + estimator : object + Estimator object to test. + +`is_estimator(estimator: object) ‑> bool` +: Return ``True`` if the given estimator is an estimator, ``False`` otherwise. + + Parameters + ---------- + estimator : object + Estimator object to test. + +`is_regressor(estimator: object) ‑> bool` +: Return ``True`` if the given estimator is a regressor, ``False`` otherwise. + + Parameters + ---------- + estimator : object + Estimator object to test. + +`is_transformer(estimator: object) ‑> bool` +: Return ``True`` if the given estimator is a transformer, ``False`` otherwise. + + Parameters + ---------- + estimator : object + Estimator object to test. + +Classes +------- + +`BaseEstimator()` +: Abstract base class for all estimators, i.e. parametrized analysis algorithms, in Heat. Can be used as mixin. + + ### Descendants + + * heat.classification.kneighborsclassifier.KNeighborsClassifier + * heat.cluster._kcluster._KCluster + * heat.cluster.batchparallelclustering._BatchParallelKCluster + * heat.cluster.spectral.Spectral + * heat.decomposition.dmd.DMD + * heat.decomposition.dmd.DMDc + * heat.decomposition.pca.IncrementalPCA + * heat.decomposition.pca.PCA + * heat.naive_bayes.gaussianNB.GaussianNB + * heat.preprocessing.preprocessing.MaxAbsScaler + * heat.preprocessing.preprocessing.MinMaxScaler + * heat.preprocessing.preprocessing.Normalizer + * heat.preprocessing.preprocessing.RobustScaler + * heat.preprocessing.preprocessing.StandardScaler + * heat.regression.lasso.Lasso + + ### Methods + + `get_params(self, deep: bool = True) ‑> Dict[str, object]` + : Get parameters for this estimator. + + Parameters + ---------- + deep : bool, default: True + If ``True``, will return the parameters for this estimator and contained sub-objects that are estimators. + + `set_params(self, **params: Dict[str, object]) ‑> ~self` + : Set the parameters of this estimator. The method works on simple estimators as well as on nested objects + (such as pipelines). The latter have to be nested dictionaries. + + Parameters + ---------- + **params : dict[str, object] + Estimator parameters to bet set. + +`ClassificationMixin()` +: Mixin for all classifiers in Heat. + + ### Descendants + + * heat.classification.kneighborsclassifier.KNeighborsClassifier + * heat.naive_bayes.gaussianNB.GaussianNB + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray)` + : Fits the classification model. + + Parameters + ---------- + x : DNDarray + Training instances to train on. Shape = (n_samples, n_features) + + y : DNDarray + Class values to fit. Shape = (n_samples, ) + + `fit_predict(self, x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Fits model and returns classes for each input sample + Convenience method; equivalent to calling :func:`fit` followed by :func:`predict`. + + Parameters + ---------- + x : DNDarray + Input data to be predicted. Shape = (n_samples, n_features) + y : DNDarray + Class values to fit. Shape = (n_samples, ) + + `predict(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Predicts the class labels for each sample. + + Parameters + ---------- + x : DNDarray + Values to predict the classes for. Shape = (n_samples, n_features) + +`ClusteringMixin()` +: Clustering mixin for all clusterers in Heat. + + ### Descendants + + * heat.cluster._kcluster._KCluster + * heat.cluster.batchparallelclustering._BatchParallelKCluster + * heat.cluster.spectral.Spectral + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray)` + : Computes the clustering. + + Parameters + ---------- + x : DNDarray + Training instances to cluster. Shape = (n_samples, n_features) + + `fit_predict(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Compute clusters and returns the predicted cluster assignment for each sample. + Returns index of the cluster each sample belongs to. + Convenience method; equivalent to calling :func:`fit` followed by :func:`predict`. + + Parameters + ---------- + x : DNDarray + Input data to be clustered. Shape = (n_samples, n_features) + +`RegressionMixin()` +: Mixin for all regression estimators in Heat. + + ### Descendants + + * heat.decomposition.dmd.DMD + * heat.decomposition.dmd.DMDc + * heat.regression.lasso.Lasso + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray)` + : Fits the regression model. + + Parameters + ---------- + x : DNDarray + Training instances to train on. Shape = (n_samples, n_features) + y : DNDarray + Continuous values to fit. Shape = (n_samples,) + + `fit_predict(self, x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Fits model and returns regression predictions for each input sample + Convenience method; equivalent to calling :func:`fit` followed by :func:`predict`. + + Parameters + ---------- + x : DNDarray + Input data to be predicted. Shape = (n_samples, n_features) + y : DNDarray + Continuous values to fit. Shape = (n_samples,) + + `predict(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Predicts the continuous labels for each sample. + + Parameters + ---------- + x : DNDarray + Values to let the model predict. Shape = (n_samples, n_features) + +`TransformMixin()` +: Mixin for all transformations in Heat. + + ### Descendants + + * heat.decomposition.pca.IncrementalPCA + * heat.decomposition.pca.PCA + * heat.preprocessing.preprocessing.MaxAbsScaler + * heat.preprocessing.preprocessing.MinMaxScaler + * heat.preprocessing.preprocessing.Normalizer + * heat.preprocessing.preprocessing.RobustScaler + * heat.preprocessing.preprocessing.StandardScaler + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray)` + : Fits the transformation model. + + Parameters + ---------- + x : DNDarray + Training instances to train on. Shape = (n_samples, n_features) + + `fit_transform(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Fits model and returns transformed data for each input sample + Convenience method; equivalent to calling :func:`fit` followed by :func:`transform`. + + Parameters + ---------- + x : DNDarray + Input data to be transformed. Shape = (n_samples, n_features) + + `transform(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Transforms the input data. + + Parameters + ---------- + x : DNDarray + Values to transform. Shape = (n_samples, n_features) diff --git a/doc/api/heat/core/communication.md b/doc/api/heat/core/communication.md new file mode 100644 index 0000000000..8a05015464 --- /dev/null +++ b/doc/api/heat/core/communication.md @@ -0,0 +1,426 @@ +Module heat.core.communication +============================== +Module implementing the communication layer of HeAT + +Functions +--------- + +`get_comm() ‑> heat.core.communication.Communication` +: Retrieves the currently globally set default communication. + +`sanitize_comm(comm: Optional[Communication]) ‑> heat.core.communication.Communication` +: Sanitizes a device or device identifier, i.e. checks whether it is already an instance of :class:`heat.core.devices.Device` + or a string with known device identifier and maps it to a proper ``Device``. + + Parameters + ---------- + comm : Communication + The comm to be sanitized + + Raises + ------ + TypeError + If the given communication is not the proper type + +`use_comm(comm: Communication = None)` +: Sets the globally used default communicator. + + Parameters + ---------- + comm : Communication or None + The communication to be set + +Classes +------- + +`Communication()` +: Base class for Communications (inteded for other backends) + + ### Descendants + + * heat.core.communication.MPICommunication + + ### Static methods + + `is_distributed() ‑> NotImplementedError` + : Whether or not the Communication is distributed + + ### Methods + + `chunk(self, shape, split) ‑> NotImplementedError` + : Calculates the chunk of data that will be assigned to this compute node given a global data shape and a split + axis. Returns ``(offset, local_shape, slices)``: the offset in the split dimension, the resulting local shape if the + global input shape is chunked on the split axis and the chunk slices with respect to the given shape + + Parameters + ---------- + shape : Tuple[int,...] + The global shape of the data to be split + split : int + The axis along which to chunk the data + +`MPICommunication(handle=)` +: Class encapsulating all MPI Communication + + Parameters + ---------- + handle: MPI.Communicator + Handle for the mpi4py Communicator + + ### Ancestors (in MRO) + + * heat.core.communication.Communication + + ### Class variables + + `COUNT_LIMIT` + : + + ### Static methods + + `as_buffer(obj: torch.Tensor, counts: Optional[Tuple[int]] = None, displs: Optional[Tuple[int]] = None, is_contiguous: Optional[bool] = None) ‑> List[mpi4py.MPI.buffer | Tuple[int, int] | mpi4py.MPI.Datatype]` + : Converts a passed ``torch.Tensor`` into a memory buffer object with associated number of elements and MPI data type. + + Parameters + ---------- + obj : torch.Tensor + The object to be converted into a buffer representation. + counts : Tuple[int,...], optional + Optional counts arguments for variable MPI-calls (e.g. Alltoallv) + displs : Tuple[int,...], optional + Optional displacements arguments for variable MPI-calls (e.g. Alltoallv) + is_contiguous: bool, optional + Optional information on global contiguity of the memory-distributed object. + + `as_mpi_memory(obj: torch.Tensor) ‑> mpi4py.MPI.buffer` + : Converts the passed ``torch.Tensor`` into an MPI compatible memory view. + + Parameters + ---------- + obj : torch.Tensor + The tensor to be converted into a MPI memory view. + + `mpi_type_and_elements_of(obj: Union[DNDarray, torch.Tensor], counts: Optional[Tuple[int]], displs: Tuple[int], is_contiguous: Optional[bool]) ‑> Tuple[mpi4py.MPI.Datatype, Tuple[int, ...]]` + : Determines the MPI data type and number of respective elements for the given tensor (:class:`~heat.core.dndarray.DNDarray` + or ``torch.Tensor). In case the tensor is contiguous in memory, a native MPI data type can be used. + Otherwise, a derived data type is automatically constructed using the storage information of the passed object. + + Parameters + ---------- + obj : DNDarray or torch.Tensor + The object for which to construct the MPI data type and number of elements + counts : Tuple[ints,...], optional + Optional counts arguments for variable MPI-calls (e.g. Alltoallv) + displs : Tuple[ints,...], optional + Optional displacements arguments for variable MPI-calls (e.g. Alltoallv) + is_contiguous: bool + Information on global contiguity of the memory-distributed object. If `None`, it will be set to local contiguity via ``torch.Tensor.is_contiguous()``. + # ToDo: The option to explicitely specify the counts and displacements to be send still needs propper implementation + + `mpi_type_of(dtype: torch.dtype) ‑> mpi4py.MPI.Datatype` + : Determines the MPI Datatype from the torch dtype. + + Parameters + ---------- + dtype : torch.dtype + PyTorch data type + + ### Methods + + `Allgather(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], recv_axis: int = 0)` + : Allgather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB) -> None + + Gather to All. + + Gather data from all processes and broadcast the combined data to all + other processes. + + `Allgatherv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], recv_axis: int = 0)` + : Allgatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV) -> None + + Gather to All Vector. + + Gather data from all processes and send it to all other processes + providing different amounts of data and displacements. + + `Allreduce(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = )` + : Allreduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> None + + Reduce to All. + + `Alltoall(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], send_axis: int = 0, recv_axis: int = None)` + : Alltoall(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB) -> None + + All to All Scatter/Gather. + + Send data to all processes and recv data from all processes. + + `Alltoallv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], send_axis: int = 0, recv_axis: int = None)` + : Alltoallv(self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV) -> None + + All to All Scatter/Gather Vector. + + Send data to all processes and recv data from all processes + providing different amounts of data and displacements. + + `Alltoallw(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any])` + : Alltoallw(self, sendbuf: BufSpecW | InPlace, recvbuf: BufSpecW) -> None + + All to All Scatter/Gather General. + + Send/recv data to/from all processes allowing the specification of + different counts, displacements, and datatypes for each dest/source. + + `Bcast(self, buf: Union[DNDarray, torch.Tensor, Any], root: int = 0) ‑> None` + : Bcast(self, buf: BufSpec, root: int = 0) -> None + + Broadcast data from one process to all other processes. + + `Bsend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0)` + : Bsend(self, buf: BufSpec, dest: int, tag: int = 0) -> None + + Blocking send in buffered mode. + + `Exscan(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = )` + : Exscan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> None + + Exclusive Scan. + + `Free(self) ‑> None` + : Free a communicator. + + `Gather(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None)` + : Gather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, root: int = 0) -> None + + Gather data to one process from all other processes. + + `Gatherv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None)` + : Gatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, root: int = 0) -> None + + Gather Vector. + + Gather data to one process from all other processes + providing different amounts of data and displacements. + + `Iallgather(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], recv_axis: int = 0) ‑> heat.core.communication.MPIRequest` + : Iallgather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB) -> Request + + Nonblocking Gather to All. + + `Iallgatherv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], recv_axis: int = 0)` + : Iallgatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV) -> Request + + Nonblocking Gather to All Vector. + + `Iallreduce(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = ) ‑> heat.core.communication.MPIRequest` + : Iallreduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> Request + + Nonblocking Reduce to All. + + `Ialltoall(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], send_axis: int = 0, recv_axis: int = None) ‑> heat.core.communication.MPIRequest` + : Ialltoall(self, sendbuf: BufSpecB | InPlace, recvbuf: BufSpecB) -> Request + + Nonblocking All to All Scatter/Gather. + + `Ialltoallv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], send_axis: int = 0, recv_axis: int = None) ‑> heat.core.communication.MPIRequest` + : Ialltoallv(self, sendbuf: BufSpecV | InPlace, recvbuf: BufSpecV) -> Request + + Nonblocking All to All Scatter/Gather Vector. + + `Ibcast(self, buf: Union[DNDarray, torch.Tensor, Any], root: int = 0) ‑> heat.core.communication.MPIRequest` + : Ibcast(self, buf: BufSpec, root: int = 0) -> Request + + Nonblocking Broadcast. + + `Ibsend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0) ‑> heat.core.communication.MPIRequest` + : Ibsend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request + + Nonblocking send in buffered mode. + + `Iexscan(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = ) ‑> heat.core.communication.MPIRequest` + : Iexscan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> Request + + Inclusive Scan. + + `Igather(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None) ‑> heat.core.communication.MPIRequest` + : Igather(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecB | None, root: int = 0) -> Request + + Nonblocking Gather. + + `Igatherv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None) ‑> heat.core.communication.MPIRequest` + : Igatherv(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpecV | None, root: int = 0) -> Request + + Nonblocking Gather Vector. + + `Irecv(self, buf: Union[DNDarray, torch.Tensor, Any], source: int = -1, tag: int = -1) ‑> heat.core.communication.MPIRequest` + : Irecv(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG) -> Request + + Nonblocking receive. + + `Ireduce(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = , root: int = 0) ‑> heat.core.communication.MPIRequest` + : Ireduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, op: Op = SUM, root: int = 0) -> Request + + Nonblocking Reduce to Root. + + `Irsend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0) ‑> heat.core.communication.MPIRequest` + : Irsend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request + + Nonblocking send in ready mode. + + `Iscan(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = ) ‑> heat.core.communication.MPIRequest` + : Iscan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> Request + + Inclusive Scan. + + `Iscatter(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None) ‑> heat.core.communication.MPIRequest` + : Iscatter(self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, root: int = 0) -> Request + + Nonblocking Scatter. + + `Iscatterv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None) ‑> heat.core.communication.MPIRequest` + : Iscatterv(self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, root: int = 0) -> Request + + Nonblocking Scatter Vector. + + `Isend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0) ‑> heat.core.communication.MPIRequest` + : Isend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request + + Nonblocking send. + + `Issend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0) ‑> heat.core.communication.MPIRequest` + : Issend(self, buf: BufSpec, dest: int, tag: int = 0) -> Request + + Nonblocking send in synchronous mode. + + `Recv(self, buf: Union[DNDarray, torch.Tensor, Any], source: int = -1, tag: int = -1, status: MPI.Status = None)` + : Recv(self, buf: BufSpec, source: int = ANY_SOURCE, tag: int = ANY_TAG, status: Status | None = None) -> None + + Blocking receive. + + .. note:: This function blocks until the message is received. + + `Reduce(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = , root: int = 0)` + : Reduce(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec | None, op: Op = SUM, root: int = 0) -> None + + Reduce to Root. + + `Rsend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0)` + : Rsend(self, buf: BufSpec, dest: int, tag: int = 0) -> None + + Blocking send in ready mode. + + `Scan(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], op: MPI.Op = )` + : Scan(self, sendbuf: BufSpec | InPlace, recvbuf: BufSpec, op: Op = SUM) -> None + + Inclusive Scan. + + `Scatter(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: Union[DNDarray, torch.Tensor, Any], root: int = 0, axis: int = 0, recv_axis: int = None)` + : Scatter(self, sendbuf: BufSpecB | None, recvbuf: BufSpec | InPlace, root: int = 0) -> None + + Scatter data from one process to all other processes. + + `Scatterv(self, sendbuf: Union[DNDarray, torch.Tensor, Any], recvbuf: int, root: int = 0, axis: int = 0, recv_axis: int = None)` + : Scatterv(self, sendbuf: BufSpecV | None, recvbuf: BufSpec | InPlace, root: int = 0) -> None + + Scatter Vector. + + Scatter data from one process to all other processes + providing different amounts of data and displacements. + + `Send(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0)` + : Send(self, buf: BufSpec, dest: int, tag: int = 0) -> None + + Blocking send. + + .. note:: This function may block until the message is received. + Whether `Send` blocks or not depends on several factors and is + implementation dependent. + + `Split(self, color: int = 0, key: int = 0) ‑> heat.core.communication.MPICommunication` + : Split communicator by color and key. + + Parameters + ---------- + color : int, optional + Determines the new communicator for a process. + key: int, optional + Ordering within the new communicator. + + `Ssend(self, buf: Union[DNDarray, torch.Tensor, Any], dest: int, tag: int = 0)` + : Ssend(self, buf: BufSpec, dest: int, tag: int = 0) -> None + + Blocking send in synchronous mode. + + `alltoall_recvbuffer(self, obj: torch.Tensor) ‑> List[mpi4py.MPI.buffer | Tuple[int, int] | mpi4py.MPI.Datatype]` + : Converts a passed ``torch.Tensor`` into a memory buffer object with associated number of elements and MPI data type. + XXX: might not work for all MPI stacks. Might require multiple type commits or so + + Parameters + ---------- + obj: torch.Tensor + The object to be transformed into a custom MPI datatype + + `alltoall_sendbuffer(self, obj: torch.Tensor) ‑> List[mpi4py.MPI.buffer | Tuple[int, int] | mpi4py.MPI.Datatype]` + : Converts a passed ``torch.Tensor`` into a memory buffer object with associated number of elements and MPI data type. + XXX: might not work for all MPI stacks. Might require multiple type commits or so + + Parameters + ---------- + obj: torch.Tensor + The object to be transformed into a custom MPI datatype + + `chunk(self, shape: Tuple[int], split: int, rank: int = None, w_size: int = None, sparse: bool = False) ‑> Tuple[int, Tuple[int], Tuple[slice]]` + : Calculates the chunk of data that will be assigned to this compute node given a global data shape and a split + axis. + Returns ``(offset, local_shape, slices)``: the offset in the split dimension, the resulting local shape if the + global input shape is chunked on the split axis and the chunk slices with respect to the given shape + + Parameters + ---------- + shape : Tuple[int,...] + The global shape of the data to be split + split : int + The axis along which to chunk the data + rank : int, optional + Process for which the chunking is calculated for, defaults to ``self.rank``. + Intended for creating chunk maps without communication + w_size : int, optional + The MPI world size, defaults to ``self.size``. + Intended for creating chunk maps without communication + sparse : bool, optional + Specifies whether the array is a sparse matrix + + `counts_displs_shape(self, shape: Tuple[int], axis: int) ‑> Tuple[Tuple[int], Tuple[int], Tuple[int]]` + : Calculates the item counts, displacements and output shape for a variable sized all-to-all MPI-call (e.g. + ``MPI_Alltoallv``). The passed shape is regularly chunk along the given axis and for all nodes. + + Parameters + ---------- + shape : Tuple[int,...] + The object for which to calculate the chunking. + axis : int + The axis along which the chunking is performed. + + `is_distributed(self) ‑> bool` + : Determines whether the communicator is distributed, i.e. handles more than one node. + +`MPIRequest(handle, sendbuf: Union[DNDarray, torch.Tensor, Any] = None, recvbuf: Union[DNDarray, torch.Tensor, Any] = None, tensor: torch.Tensor = None, permutation: Tuple[int, ...] = None)` +: Represents a handle on a non-blocking operation + + Parameters + ---------- + handle: MPI.Communicator + Handle for the mpi4py Communicator + sendbuf: DNDarray or torch.Tensor or Any + The buffer for the data to be send + recvbuf: DNDarray or torch.Tensor or Any + The buffer to the receive data + tensor: torch.Tensor + Internal Data + permutation: Tuple[int,...] + Permutation of the tensor axes + + ### Methods + + `Wait(self, status: MPI.Status = None)` + : Waits for an MPI request to complete diff --git a/doc/api/heat/core/complex_math.md b/doc/api/heat/core/complex_math.md new file mode 100644 index 0000000000..3d35c34131 --- /dev/null +++ b/doc/api/heat/core/complex_math.md @@ -0,0 +1,81 @@ +Module heat.core.complex_math +============================= +Complex numbers module. + +Functions +--------- + +`angle(x: heat.core.dndarray.DNDarray, deg: bool = False, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the element-wise angle of the complex argument. + + Parameters + ---------- + x : DNDarray + Input array for which to compute the angle. + deg : bool, optional + Return the angle in degrees (True) or radiands (False). + out : DNDarray, optional + Output array with the angles. + + Examples + -------- + >>> ht.angle(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j])) + DNDarray([ 0.0000, 1.5708, 0.7854, 2.3562, -0.7854], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.angle(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j]), deg=True) + DNDarray([ 0., 90., 45., 135., -45.], dtype=ht.float32, device=cpu:0, split=None) + +`conj(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Compute the complex conjugate, element-wise. + + Parameters + ---------- + x : DNDarray + Input array for which to compute the complex conjugate. + out : DNDarray, optional + Output array with the complex conjugates. + + Examples + -------- + >>> ht.conjugate(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j])) + DNDarray([ (1-0j), -1j, (1-1j), (-2-2j), (3+3j)], dtype=ht.complex64, device=cpu:0, split=None) + +`conjugate(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Compute the complex conjugate, element-wise. + + Parameters + ---------- + x : DNDarray + Input array for which to compute the complex conjugate. + out : DNDarray, optional + Output array with the complex conjugates. + + Examples + -------- + >>> ht.conjugate(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j])) + DNDarray([ (1-0j), -1j, (1-1j), (-2-2j), (3+3j)], dtype=ht.complex64, device=cpu:0, split=None) + +`imag(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Return the imaginary part of the complex argument. The returned DNDarray and the input DNDarray share the same underlying storage. + + Parameters + ---------- + x : DNDarray + Input array for which the imaginary part is returned. + + Examples + -------- + >>> ht.imag(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j])) + DNDarray([ 0., 1., 1., 2., -3.], dtype=ht.float32, device=cpu:0, split=None) + +`real(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Return the real part of the complex argument. The returned DNDarray and the input DNDarray share the same underlying storage. + + Parameters + ---------- + x : DNDarray + Input array for which the real part is returned. + + Examples + -------- + >>> ht.real(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j])) + DNDarray([ 1., 0., 1., -2., 3.], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/constants.md b/doc/api/heat/core/constants.md new file mode 100644 index 0000000000..da0e519689 --- /dev/null +++ b/doc/api/heat/core/constants.md @@ -0,0 +1,33 @@ +Module heat.core.constants +========================== +Constants module. + +Variables +--------- + +`Euler` +: Euler's number, Euler's constant (:math:`e`). + +`Inf` +: IEEE 754 floating point representation of (positive) infinity (:math:`\infty`). + +`Infinity` +: IEEE 754 floating point representation of (positive) infinity (:math:`\infty`). + +`Infty` +: IEEE 754 floating point representation of (positive) infinity (:math:`\infty`). + +`NaN` +: IEEE 754 floating point representation of Not a Number (NaN). + +`e` +: Euler's number, Euler's constant (:math:`e`). + +`inf` +: IEEE 754 floating point representation of (positive) infinity (:math:`\infty`). + +`nan` +: IEEE 754 floating point representation of Not a Number (NaN). + +`pi` +: Archimedes' constant (:math:`\pi`). diff --git a/doc/api/heat/core/devices.md b/doc/api/heat/core/devices.md new file mode 100644 index 0000000000..2f09f9eefe --- /dev/null +++ b/doc/api/heat/core/devices.md @@ -0,0 +1,81 @@ +Module heat.core.devices +======================== +handle different devices. Current options: CPU (default), GPU + +Variables +--------- + +`cpu` +: The standard CPU Device + + Examples + -------- + >>> ht.cpu + device(cpu:0) + >>> ht.ones((2, 3), device=ht.cpu) + DNDarray([[1., 1., 1.], + [1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + +Functions +--------- + +`get_device() ‑> heat.core.devices.Device` +: Retrieves the currently globally set default :class:`~heat.core.device.Device`. + +`sanitize_device(device: Optional[Union[str, Device]] = None) ‑> heat.core.devices.Device` +: Sanitizes a device or device identifier, i.e. checks whether it is already an instance of :class:`~heat.core.device.Device` or a string with + known device identifier and maps it to a proper :class:`~heat.core.device.Device`. + + Parameters + ---------- + device : str or Device, optional + The device to be sanitized + + Raises + ------ + ValueError + If the given device id is not recognized + +`use_device(device: Optional[Union[str, Device]] = None) ‑> None` +: Sets the globally used default :class:`~heat.core.device.Device`. + + Parameters + ---------- + device : str or Device + The device to be set + +Classes +------- + +`Device(device_type: str, device_id: int, torch_device: str)` +: Implements a compute device. Heat can run computations on different compute devices or backends. + A device describes the device type and id on which said computation should be carried out. + + Parameters + ---------- + device_type : str + Represents Heat's device name + device_id : int + The device id + torch_device : str + The corresponding PyTorch device type + + Examples + -------- + >>> ht.Device("cpu", 0, "cpu:0") + device(cpu:0) + >>> ht.Device("gpu", 0, "cuda:0") + device(gpu:0) + >>> ht.Device("gpu", 0, "mps:0") # on Apple M1/M2 + device(gpu:0) + + ### Instance variables + + `device_id: int` + : Return the identification number of :class:`~heat.core.device.Device`. + + `device_type: str` + : Return the type of :class:`~heat.core.device.Device` as a string. + + `torch_device: str` + : Return the type and id of :class:`~heat.core.device.Device` as a PyTorch device string object. diff --git a/doc/api/heat/core/dndarray.md b/doc/api/heat/core/dndarray.md new file mode 100644 index 0000000000..972dd5d5a8 --- /dev/null +++ b/doc/api/heat/core/dndarray.md @@ -0,0 +1,3929 @@ +Module heat.core.dndarray +========================= +Provides HeAT's core data structure, the DNDarray, a distributed n-dimensional array + +Classes +------- + +`DNDarray(array: torch.Tensor, gshape: Tuple[int, ...], dtype: datatype, split: Union[int, None], device: Device, comm: Communication, balanced: bool)` +: Distributed N-Dimensional array. The core element of HeAT. It is composed of + PyTorch tensors local to each process. + + Parameters + ---------- + array : torch.Tensor + Local array elements + gshape : Tuple[int,...] + The global shape of the array + dtype : datatype + The datatype of the array + split : int or None + The axis on which the array is divided between processes + device : Device + The device on which the local arrays are using (cpu or gpu) + comm : Communication + The communications object for sending and receiving data + balanced: bool or None + Describes whether the data are evenly distributed across processes. + If this information is not available (``self.balanced is None``), it + can be gathered via the :func:`is_balanced()` method (requires communication). + + ### Instance variables + + `T: heat.core.dndarray.DNDarray` + : Permute the dimensions of an array. + + Parameters + ---------- + a : DNDarray + Input array. + axes : None or List[int,...], optional + By default, reverse the dimensions, otherwise permute the axes according to the values given. + + `array_with_halos: torch.Tensor` + : Fetch halos of size ``halo_size`` from neighboring ranks and save them in ``self.halo_next``/``self.halo_prev`` + in case they are not already stored. If ``halo_size`` differs from the size of already stored halos, + the are overwritten. + + `balanced: bool` + : Boolean value indicating if the DNDarray is balanced between the MPI processes + + `comm: Communication` + : The :class:`~heat.core.communication.Communication` of the ``DNDarray`` + + `device: Device` + : The :class:`~heat.core.devices.Device` of the ``DNDarray`` + + `dtype: datatype` + : The :class:`~heat.core.types.datatype` of the ``DNDarray`` + + `gnbytes: int` + : Returns the number of bytes consumed by the global ``DNDarray`` + + Note + ----------- + Does not include memory consumed by non-element attributes of the ``DNDarray`` object. + + `gnumel: int` + : Returns the number of total elements of the ``DNDarray`` + + `gshape: Tuple` + : Returns the global shape of the ``DNDarray`` across all processes + + `halo_next: torch.Tensor` + : Returns the halo of the next process + + `halo_prev: torch.Tensor` + : Returns the halo of the previous process + + `imag: DNDarray` + : Return the imaginary part of the ``DNDarray``. + + `larray: torch.Tensor` + : Returns the underlying process-local ``torch.Tensor`` of the ``DNDarray`` + + `lloc: Union[DNDarray, None]` + : Local item setter and getter. i.e. this function operates on a local + level and only on the PyTorch tensors composing the :class:`DNDarray`. + This function uses the LocalIndex class. As getter, it returns a ``DNDarray`` + with the indices selected at a *local* level + + Parameters + ---------- + key : int or slice or Tuple[int,...] + Indices of the desired data. + value : scalar, optional + All types compatible with pytorch tensors, if none given then this is a getter function + + Examples + -------- + >>> a = ht.zeros((4, 5), split=0) + DNDarray([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=0) + >>> a.lloc[1, 0:4] + (1/2) tensor([0., 0., 0., 0.]) + (2/2) tensor([0., 0., 0., 0.]) + >>> a.lloc[1, 0:4] = torch.arange(1, 5) + >>> a + DNDarray([[0., 0., 0., 0., 0.], + [1., 2., 3., 4., 0.], + [0., 0., 0., 0., 0.], + [1., 2., 3., 4., 0.]], dtype=ht.float32, device=cpu:0, split=0) + + `lnbytes: int` + : Returns the number of bytes consumed by the local ``torch.Tensor`` + + Note + ------------------- + Does not include memory consumed by non-element attributes of the ``DNDarray`` object. + + `lnumel: int` + : Number of elements of the ``DNDarray`` on each process + + `lshape: Tuple[int]` + : Returns the shape of the ``DNDarray`` on each node + + `lshape_map: torch.Tensor` + : Returns the lshape map. If it hasn't been previously created then it will be created here. + + `nbytes: int` + : Returns the number of bytes consumed by the global tensor. Equivalent to property gnbytes. + + Note + ------------ + Does not include memory consumed by non-element attributes of the ``DNDarray`` object. + + `ndim: int` + : Number of dimensions of the ``DNDarray`` + + `real: DNDarray` + : Return the real part of the ``DNDarray``. + + `shape: Tuple[int]` + : Returns the shape of the ``DNDarray`` as a whole + + `size: int` + : Number of total elements of the ``DNDarray`` + + `split: int` + : Returns the axis on which the ``DNDarray`` is split + + `stride: Tuple[int]` + : Returns the steps in each dimension when traversing a ``DNDarray``. torch-like usage: ``self.stride()`` + + `strides: Tuple[int]` + : Returns bytes to step in each dimension when traversing a ``DNDarray``. numpy-like usage: ``self.strides()`` + + ### Methods + + `abs(self, out=None, dtype=None)` + : Returns :class:`~heat.core.dndarray.DNDarray` containing the elementwise abolute values of the input array ``x``. + + Parameters + ---------- + x : DNDarray + The array for which the compute the absolute value. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + dtype : datatype, optional + Determines the data type of the output array. The values are cast to this type with potential loss of + precision. + + Raises + ------ + TypeError + If dtype is not a heat type. + + `absolute(self, out=None, dtype=None)` + : Calculate the absolute value element-wise. + :func:`abs` is a shorthand for this function. + + Parameters + ---------- + x : DNDarray + The array for which the compute the absolute value. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + dtype : datatype, optional + Determines the data type of the output array. The values are cast to this type with potential loss of + precision. + + `acos(self, out=None)` + : Compute the trigonometric arccos, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arccos`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arccos(ht.array([-1.0, -0.0, 0.83])) + DNDarray([3.1416, 1.5708, 0.5917], dtype=ht.float32, device=cpu:0, split=None) + + `add_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place addition of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise adds the + element(s) of the second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) in-place, + i.e. the element(s) of `t1` are overwritten by the results of element-wise addition of `t1` and + `t2`. + Can be called as a DNDarray method or with the symbol `+=`. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the addition + t2: DNDarray or scalar + The second operand involved in the addition + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 += T2 + >>> T1 + DNDarray([[3., 4.], + [5., 6.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> T2.add_(s) + DNDarray([[4., 4.], + [4., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[4., 4.], + [4., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + + `all(self, axis=None, out=None, keepdims=False)` + : Test whether all array elements along a given axis evaluate to ``True``. + A new boolean or :class:`~heat.core.dndarray.DNDarray` is returned unless out is specified, in which case a + reference to ``out`` is returned. + + Parameters + ---------- + x : DNDarray + Input array or object that can be converted to an array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which a logical AND reduction is performed. The default (``axis=None``) is to perform a + logical AND over all the dimensions of the input array. ``axis`` may be negative, in which case it counts + from the last to the first axis. + out : DNDarray, optional + Alternate output array in which to place the result. It must have the same shape as the expected output + and its type is preserved. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> x = ht.random.randn(4, 5) + >>> x + DNDarray([[ 0.7199, 1.3718, 1.5008, 0.3435, 1.2884], + [ 0.1532, -0.0968, 0.3739, 1.7843, 0.5614], + [ 1.1522, 1.9076, 1.7638, 0.4110, -0.2803], + [-0.5475, -0.0271, 0.8564, -1.5870, 1.3108]], dtype=ht.float32, device=cpu:0, split=None) + >>> y = x < 0.5 + >>> y + DNDarray([[False, False, False, True, False], + [ True, True, True, False, False], + [False, False, False, True, True], + [ True, True, False, True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(y) + DNDarray([False], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(y, axis=0) + DNDarray([False, False, False, False, False], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(x, axis=1) + DNDarray([True, True, True, True], dtype=ht.bool, device=cpu:0, split=None) + >>> out = ht.zeros(5) + >>> ht.all(y, axis=0, out=out) + DNDarray([False, False, False, False, False], dtype=ht.float32, device=cpu:0, split=None) + >>> out + DNDarray([False, False, False, False, False], dtype=ht.float32, device=cpu:0, split=None) + + `allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False)` + : Test whether all array elements along a given axis evaluate to ``True``. + A new boolean or :class:`~heat.core.dndarray.DNDarray` is returned unless out is specified, in which case a + reference to ``out`` is returned. + + Parameters + ---------- + x : DNDarray + Input array or object that can be converted to an array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which a logical AND reduction is performed. The default (``axis=None``) is to perform a + logical AND over all the dimensions of the input array. ``axis`` may be negative, in which case it counts + from the last to the first axis. + out : DNDarray, optional + Alternate output array in which to place the result. It must have the same shape as the expected output + and its type is preserved. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> x = ht.random.randn(4, 5) + >>> x + DNDarray([[ 0.7199, 1.3718, 1.5008, 0.3435, 1.2884], + [ 0.1532, -0.0968, 0.3739, 1.7843, 0.5614], + [ 1.1522, 1.9076, 1.7638, 0.4110, -0.2803], + [-0.5475, -0.0271, 0.8564, -1.5870, 1.3108]], dtype=ht.float32, device=cpu:0, split=None) + >>> y = x < 0.5 + >>> y + DNDarray([[False, False, False, True, False], + [ True, True, True, False, False], + [False, False, False, True, True], + [ True, True, False, True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(y) + DNDarray([False], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(y, axis=0) + DNDarray([False, False, False, False, False], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(x, axis=1) + DNDarray([True, True, True, True], dtype=ht.bool, device=cpu:0, split=None) + >>> out = ht.zeros(5) + >>> ht.all(y, axis=0, out=out) + DNDarray([False, False, False, False, False], dtype=ht.float32, device=cpu:0, split=None) + >>> out + DNDarray([False, False, False, False, False], dtype=ht.float32, device=cpu:0, split=None) + + `any(self, axis=None, out=None, keepdims=False)` + : Returns a :class:`~heat.core.dndarray.DNDarray` containing the result of the test whether any array elements along a + given axis evaluate to ``True``. + The returning array is one dimensional unless axis is not ``None``. + + Parameters + ---------- + x : DNDarray + Input tensor + axis : int, optional + Axis along which a logic OR reduction is performed. With ``axis=None``, the logical OR is performed over all + dimensions of the array. + out : DNDarray, optional + Alternative output tensor in which to place the result. It must have the same shape as the expected output. + The output is a array with ``datatype=bool``. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> x = ht.float32([[0.3, 0, 0.5]]) + >>> x.any() + DNDarray([True], dtype=ht.bool, device=cpu:0, split=None) + >>> x.any(axis=0) + DNDarray([ True, False, True], dtype=ht.bool, device=cpu:0, split=None) + >>> x.any(axis=1) + DNDarray([True], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.int32([[0, 0, 1], [0, 0, 0]]) + >>> res = ht.zeros(3, dtype=ht.bool) + >>> y.any(axis=0, out=res) + DNDarray([False, False, True], dtype=ht.bool, device=cpu:0, split=None) + >>> res + DNDarray([False, False, True], dtype=ht.bool, device=cpu:0, split=None) + + `argmax(self, axis=None, out=None, **kwargs)` + : Returns an array of the indices of the maximum values along an axis. It has the same shape as ``x.shape`` with the + dimension along axis removed. + + Parameters + ---------- + x : DNDarray + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise along the specified axis. + out : DNDarray, optional. + If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. + **kwargs + Extra keyword arguments + + Examples + -------- + >>> a = ht.random.randn(3, 3) + >>> a + DNDarray([[ 1.0661, 0.7036, -2.0908], + [-0.7534, -0.4986, -0.7751], + [-0.4815, 1.9436, 0.6400]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.argmax(a) + DNDarray([7], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmax(a, axis=0) + DNDarray([0, 2, 2], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmax(a, axis=1) + DNDarray([0, 1, 1], dtype=ht.int64, device=cpu:0, split=None) + + `argmin(self, axis=None, out=None, **kwargs)` + : Returns an array of the indices of the minimum values along an axis. It has the same shape as ``x.shape`` with the + dimension along axis removed. + + Parameters + ---------- + x : DNDarray + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise along the specified axis. + out : DNDarray, optional + Issue #100 If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. + **kwargs + Extra keyword arguments + + Examples + -------- + >>> a = ht.random.randn(3, 3) + >>> a + DNDarray([[ 1.0661, 0.7036, -2.0908], + [-0.7534, -0.4986, -0.7751], + [-0.4815, 1.9436, 0.6400]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.argmin(a) + DNDarray([2], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmin(a, axis=0) + DNDarray([1, 1, 0], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmin(a, axis=1) + DNDarray([2, 2, 0], dtype=ht.int64, device=cpu:0, split=None) + + `asin(self, out=None)` + : Compute the trigonometric arcsin, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arcsin`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arcsin(ht.array([-1.0, -0.0, 0.83])) + DNDarray([-1.5708, -0.0000, 0.9791], dtype=ht.float32, device=cpu:0, split=None) + + `astype(self, dtype, copy=True) ‑> heat.core.dndarray.DNDarray` + : Returns a casted version of this array. + Casted array is a new array of the same shape but with given type of this array. If copy is ``True``, the + same array is returned instead. + + Parameters + ---------- + dtype : datatype + Heat type to which the array is cast + copy : bool, optional + By default the operation returns a copy of this array. If copy is set to ``False`` the cast is performed + in-place and this array is returned + + `atan(self, out=None)` + : Compute the trigonometric arctan, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arctan`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arctan(ht.arange(-6, 7, 2)) + DNDarray([-1.4056, -1.3258, -1.1071, 0.0000, 1.1071, 1.3258, 1.4056], dtype=ht.float32, device=cpu:0, split=None) + + `atan2(self, x2)` + : Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. + Returns a new ``DNDarray`` with the signed angles in radians between vector (``x2``,``x1``) and vector (1,0) + + Parameters + ---------- + x1 : DNDarray + y-coordinates + x2 : DNDarray + x-coordinates. If ``x1.shape!=x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). + + Examples + -------- + >>> x = ht.array([-1, +1, +1, -1]) + >>> y = ht.array([-1, -1, +1, +1]) + >>> ht.arctan2(y, x) * 180 / ht.pi + DNDarray([-135.0000, -45.0000, 45.0000, 135.0000], dtype=ht.float64, device=cpu:0, split=None) + + `average(self, axis=None, weights=None, returned=False)` + : Compute the weighted average along the specified axis. + + If ``returned=True``, return a tuple with the average as the first element and the sum + of the weights as the second element. ``sum_of_weights`` is of the same type as ``average``. + + Parameters + ---------- + x : DNDarray + Array containing data to be averaged. + axis : None or int or Tuple[int,...], optional + Axis or axes along which to average ``x``. The default, + ``axis=None``, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + #TODO Issue #351: If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : DNDarray, optional + An array of weights associated with the values in ``x``. Each value in + ``x`` contributes to the average according to its associated weight. + The weights array can either be 1D (in which case its length must be + the size of ``x`` along the given axis) or of the same shape as ``x``. + If ``weights=None``, then all data in ``x`` are assumed to have a + weight equal to one, the result is equivalent to :func:`mean`. + returned : bool, optional + If ``True``, the tuple ``(average, sum_of_weights)`` + is returned, otherwise only the average is returned. + If ``weights=None``, ``sum_of_weights`` is equivalent to the number of + elements over which the average is taken. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. + TypeError + When the length of 1D weights is not the same as the shape of ``x`` + along axis. + + Examples + -------- + >>> data = ht.arange(1, 5, dtype=float) + >>> data + DNDarray([1., 2., 3., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(data) + DNDarray(2.5000, dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(ht.arange(1, 11, dtype=float), weights=ht.arange(10, 0, -1)) + DNDarray([4.], dtype=ht.float64, device=cpu:0, split=None) + >>> data = ht.array([[0, 1], + [2, 3], + [4, 5]], dtype=float, split=1) + >>> weights = ht.array([1.0 / 4, 3.0 / 4]) + >>> ht.average(data, axis=1, weights=weights) + DNDarray([0.7500, 2.7500, 4.7500], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(data, weights=weights) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of x and weights differ. + + `balance(self, copy=False)` + : Out of place balance function. More information on the meaning of balance can be found in + :func:`DNDarray.balance_() `. + + Parameters + ---------- + array : DNDarray + the DNDarray to be balanced + copy : bool, optional + if the DNDarray should be copied before being balanced. If false (default) this will balance + the original array and return that array. Otherwise (true), a balanced copy of the array + will be returned. + Default: False + + `balance_(self) ‑> heat.core.dndarray.DNDarray` + : Function for balancing a :class:`DNDarray` between all nodes. To determine if this is needed use the :func:`is_balanced()` function. + If the ``DNDarray`` is already balanced this function will do nothing. This function modifies the ``DNDarray`` + itself and will not return anything. + + Examples + -------- + >>> a = ht.zeros((10, 2), split=0) + >>> a[:, 0] = ht.arange(10) + >>> b = a[3:] + [0/2] tensor([[3., 0.], + [1/2] tensor([[4., 0.], + [5., 0.], + [6., 0.]]) + [2/2] tensor([[7., 0.], + [8., 0.], + [9., 0.]]) + >>> b.balance_() + >>> print(b.gshape, b.lshape) + [0/2] (7, 2) (1, 2) + [1/2] (7, 2) (3, 2) + [2/2] (7, 2) (3, 2) + >>> b + [0/2] tensor([[3., 0.], + [4., 0.], + [5., 0.]]) + [1/2] tensor([[6., 0.], + [7., 0.]]) + [2/2] tensor([[8., 0.], + [9., 0.]]) + >>> print(b.gshape, b.lshape) + [0/2] (7, 2) (3, 2) + [1/2] (7, 2) (2, 2) + [2/2] (7, 2) (2, 2) + + `bitwise_and_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Bitwise AND of two operands computed element-wise and in-place. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + bitwise AND with the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise bitwise AND of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `&=`. Only integer and boolean types are + handled. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the operation + t2: DNDarray or scalar + The second operand involved in the operation + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(13) + >>> T2 = ht.array(17) + >>> T1 &= T2 + >>> T1 + DNDarray(1, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray(17, dtype=ht.int64, device=cpu:0, split=None) + >>> T3 = ht.array(22) + >>> T2.bitwise_and_(T3) + DNDarray(16, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray(16, dtype=ht.int64, device=cpu:0, split=None) + >>> T4 = ht.array([14, 3]) + >>> s = 29 + >>> T4 &= s + >>> T4 + DNDarray([12, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> s + 29 + >>> T5 = ht.array([2, 5, 255]) + >>> T6 = ht.array([3, 14, 16]) + >>> T5 &= T6 + >>> T5 + DNDarray([ 2, 4, 16], dtype=ht.int64, device=cpu:0, split=None) + >>> T7 = ht.array([True, True]) + >>> T8 = ht.array([False, True]) + >>> T7 &= T8 + >>> T7 + DNDarray([False, True], dtype=ht.bool, device=cpu:0, split=None) + + `bitwise_not_(t: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Computes the bitwise NOT of the given input :class:`~heat.core.dndarray.DNDarray` in-place. The + elements of the input array must be of integer or Boolean types. For boolean arrays, it computes + the logical NOT. + Can only be called as a DNDarray method. `bitwise_not_` is an alias for `invert_`. + + Parameters + ---------- + t: DNDarray + The input array to invert. Must be of integral or Boolean types + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(13, dtype=ht.uint8) + >>> T1.invert_() + DNDarray(242, dtype=ht.uint8, device=cpu:0, split=None) + >>> T1 + DNDarray(242, dtype=ht.uint8, device=cpu:0, split=None) + >>> T2 = ht.array([-1, -2, 3], dtype=ht.int8) + >>> T2.invert_() + DNDarray([ 0, 1, -4], dtype=ht.int8, device=cpu:0, split=None) + >>> T2 + DNDarray([ 0, 1, -4], dtype=ht.int8, device=cpu:0, split=None) + >>> T3 = ht.array([[True, True], [False, True]]) + >>> T3.invert_() + DNDarray([[False, False], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> T3 + DNDarray([[False, False], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + + `bitwise_or_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Bitwise OR of two operands computed element-wise and in-place. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + bitwise OR with the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise bitwise OR of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `|=`. Only integer and boolean types are + handled. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the operation + t2: DNDarray or scalar + The second operand involved in the operation + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(13) + >>> T2 = ht.array(16) + >>> T1 |= T2 + >>> T1 + DNDarray(29, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray(16, dtype=ht.int64, device=cpu:0, split=None) + >>> T3 = ht.array([33, 4]) + >>> s = 1 + >>> T3.bitwise_or_(s) + DNDarray([33, 5], dtype=ht.int64, device=cpu:0, split=None) + >>> T3 + DNDarray([33, 5], dtype=ht.int64, device=cpu:0, split=None) + >>> s + 1 + >>> T4 = ht.array([2, 5, 255]) + >>> T5 = ht.array([4, 4, 4]) + >>> T4 |= T5 + >>> T4 + DNDarray([ 6, 5, 255], dtype=ht.int64, device=cpu:0, split=None) + >>> T6 = ht.array([True, True]) + >>> T7 = ht.array([False, True]) + >>> T6 |= T7 + >>> T6 + DNDarray([True, True], dtype=ht.bool, device=cpu:0, split=None) + + `bitwise_xor_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Bitwise XOR of two operands computed element-wise and in-place. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + bitwise XOR with the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise bitwise XOR of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `^=`. Only integer and boolean types are + handled. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the operation + t2: DNDarray or scalar + The second operand involved in the operation + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(13) + >>> T2 = ht.array(17) + >>> T1 ^= T2 + >>> T1 + DNDarray(28, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray(17, dtype=ht.int64, device=cpu:0, split=None) + >>> T3 = ht.array([31, 3]) + >>> s = 5 + >>> T3.bitwise_xor_(s) + DNDarray([26, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> T3 + DNDarray([26, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> s + 5 + >>> T4 = ht.array([31, 3, 255]) + >>> T5 = ht.array([5, 6, 4]) + >>> T4 ^= T5 + >>> T4 + DNDarray([ 26, 5, 251], dtype=ht.int64, device=cpu:0, split=None) + >>> T6 = ht.array([True, True]) + >>> T7 = ht.array([False, True]) + >>> T6 ^= T7 + >>> T6 + DNDarray([ True, False], dtype=ht.bool, device=cpu:0, split=None) + + `ceil(self, out=None)` + : Return the ceil of the input, element-wise. Result is a :class:`~heat.core.dndarray.DNDarray` of the same shape as + ``x``. The ceil of the scalar ``x`` is the smallest integer i, such that ``i>=x``. It is often denoted as + :math:`\lceil x \rceil`. + + Parameters + ---------- + x : DNDarray + The value for which to compute the ceiled values. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> import heat as ht + >>> ht.ceil(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -1., -1., -0., -0., 0., 1., 1., 2., 2.], dtype=ht.float32, device=cpu:0, split=None) + + `clip(self, a_min, a_max, out=None)` + : Returns a :class:`~heat.core.dndarray.DNDarray` with the elements of this array, but where values + ``a_max`` with ``a_max``. + + Parameters + ---------- + x : DNDarray + Array containing elements to clip. + min : scalar or None + Minimum value. If ``None``, clipping is not performed on lower interval edge. Not more than one of ``a_min`` and + ``a_max`` may be ``None``. + max : scalar or None + Maximum value. If ``None``, clipping is not performed on upper interval edge. Not more than one of ``a_min`` and + ``a_max`` may be None. + out : DNDarray, optional + The results will be placed in this array. It may be the input array for in-place clipping. ``out`` must be of + the right shape to hold the output. Its type is preserved. + + Raises + ------ + ValueError + if either min or max is not set + + `collect(arr, target_rank=0)` + : A function collecting a distributed DNDarray to one rank, chosen by the `target_rank` variable. + It is a specific case of the ``redistribute_`` method. + + Parameters + ---------- + arr : DNDarray + The DNDarray to be collected. + target_rank : int, optional + The rank to which the DNDarray will be collected. Default: 0. + + Raises + ------ + TypeError + If the target rank is not an integer. + ValueError + If the target rank is out of bounds. + + Examples + -------- + >>> st = ht.ones((50, 81, 67), split=2) + >>> print(st.lshape) + [0/2] (50, 81, 23) + [1/2] (50, 81, 22) + [2/2] (50, 81, 22) + >>> collected_st = collect(st) + >>> print(collected_st) + [0/2] (50, 81, 67) + [1/2] (50, 81, 0) + [2/2] (50, 81, 0) + >>> collected_st = collect(collected_st, 1) + >>> print(st.lshape) + [0/2] (50, 81, 0) + [1/2] (50, 81, 67) + [2/2] (50, 81, 0) + + `collect_(self, target_rank: Optional[int] = 0) ‑> None` + : A method collecting a distributed DNDarray to one MPI rank, chosen by the `target_rank` variable. + It is a specific case of the ``redistribute_`` method. + + Parameters + ---------- + target_rank : int, optional + The rank to which the DNDarray will be collected. Default: 0. + + Raises + ------ + TypeError + If the target rank is not an integer. + ValueError + If the target rank is out of bounds. + + Examples + -------- + >>> st = ht.ones((50, 81, 67), split=2) + >>> print(st.lshape) + [0/2] (50, 81, 23) + [1/2] (50, 81, 22) + [2/2] (50, 81, 22) + >>> st.collect_() + >>> print(st.lshape) + [0/2] (50, 81, 67) + [1/2] (50, 81, 0) + [2/2] (50, 81, 0) + >>> st.collect_(1) + >>> print(st.lshape) + [0/2] (50, 81, 0) + [1/2] (50, 81, 67) + [2/2] (50, 81, 0) + + `conj(self, out=None)` + : Compute the complex conjugate, element-wise. + + Parameters + ---------- + x : DNDarray + Input array for which to compute the complex conjugate. + out : DNDarray, optional + Output array with the complex conjugates. + + Examples + -------- + >>> ht.conjugate(ht.array([1.0, 1.0j, 1 + 1j, -2 + 2j, 3 - 3j])) + DNDarray([ (1-0j), -1j, (1-1j), (-2-2j), (3+3j)], dtype=ht.complex64, device=cpu:0, split=None) + + `copy(self)` + : Return a deep copy of the given object. + + Parameters + ---------- + x : DNDarray + Input array to be copied. + + Examples + -------- + >>> a = ht.array([1, 2, 3]) + >>> b = ht.copy(a) + >>> b + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> a[0] = 4 + >>> a + DNDarray([4, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> b + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + + `copysign_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : In-place version of the element-wise operation 'copysign'. + The magnitudes of the element(s) of 't1' are kept but the sign(s) are adopted from the + element(s) of 't2'. + Can only be called as a DNDarray method. + + Parameters + ---------- + t1: DNDarray + The input array + Entries must be of type float. + t2: DNDarray or scalar + value(s) whose signbit(s) are applied to the magnitudes in 't1' + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + At the moment, the operation only works for DNDarrays whose elements are floats and are not + complex. This is due to the fact that it relies on the PyTorch function 'copysign_', which + does not work if the entries of 't1' are integers. The case when 't1' contains floats and + 't2' contains integers works in PyTorch but has not been implemented properly in Heat yet. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([3.0, 2.0, -8.0, -2.0, 4.0]) + >>> s = 2.0 + >>> T1.copysign_(s) + DNDarray([3., 2., 8., 2., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([3., 2., 8., 2., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> T2 = ht.array([[1.0, -1.0], [1.0, -1.0]]) + >>> T3 = ht.array([-5.0, 2.0]) + >>> T2.copysign_(T3) + DNDarray([[-1., 1.], + [-1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[-1., 1.], + [-1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T3 + DNDarray([-5., 2.], dtype=ht.float32, device=cpu:0, split=None) + + `cos(self, out=None)` + : Return the trigonometric cosine, element-wise. + + Parameters + ---------- + x : ht.DNDarray + The value for which to compute the trigonometric cosine. + out : ht.DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to None, a fresh tensor is allocated. + + Examples + -------- + >>> ht.cos(ht.arange(-6, 7, 2)) + DNDarray([ 0.9602, -0.6536, -0.4161, 1.0000, -0.4161, -0.6536, 0.9602], dtype=ht.float32, device=cpu:0, split=None) + + `cosh(self, out=None)` + : Compute the hyperbolic cosine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Negative input elements are returned as ``NaN``. If ``out`` was provided, ``cosh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the hyperbolic cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.cosh(ht.arange(-6, 7, 2)) + DNDarray([201.7156, 27.3082, 3.7622, 1.0000, 3.7622, 27.3082, 201.7156], dtype=ht.float32, device=cpu:0, split=None) + + `counts_displs(self) ‑> Tuple[Tuple[int], Tuple[int]]` + : Returns actual counts (number of items per process) and displacements (offsets) of the DNDarray. + Does not assume load balance. + + `cpu(self) ‑> heat.core.dndarray.DNDarray` + : Returns a copy of this object in main memory. If this object is already in main memory, then no copy is + performed and the original object is returned. + + `create_lshape_map(self, force_check: bool = False) ‑> torch.Tensor` + : Generate a 'map' of the lshapes of the data on all processes. + Units are ``(process rank, lshape)`` + + Parameters + ---------- + force_check : bool, optional + if False (default) and the lshape map has already been created, use the previous + result. Otherwise, create the lshape_map + + `create_partition_interface(self)` + : Create a partition interface in line with the DPPY proposal. This is subject to change. + The intention of this to facilitate the usage of a general format for the referencing of + distributed datasets. + + An example of the output and shape is shown below. + + __partitioned__ = { + 'shape': (27, 3, 2), + 'partition_tiling': (4, 1, 1), + 'partitions': { + (0, 0, 0): { + 'start': (0, 0, 0), + 'shape': (7, 3, 2), + 'data': tensor([...], dtype=torch.int32), + 'location': [0], + 'dtype': torch.int32, + 'device': 'cpu' + }, + (1, 0, 0): { + 'start': (7, 0, 0), + 'shape': (7, 3, 2), + 'data': None, + 'location': [1], + 'dtype': torch.int32, + 'device': 'cpu' + }, + (2, 0, 0): { + 'start': (14, 0, 0), + 'shape': (7, 3, 2), + 'data': None, + 'location': [2], + 'dtype': torch.int32, + 'device': 'cpu' + }, + (3, 0, 0): { + 'start': (21, 0, 0), + 'shape': (6, 3, 2), + 'data': None, + 'location': [3], + 'dtype': torch.int32, + 'device': 'cpu' + } + }, + 'locals': [(rank, 0, 0)], + 'get': lambda x: x, + } + + Returns + ------- + dictionary containing the partition interface as shown above. + + `cumprod_(t: DNDarray, axis: int) ‑> heat.core.dndarray.DNDarray` + : Return the cumulative product of elements along a given axis in-place. + Can only be called as a DNDarray method. + + Parameters + ---------- + t: DNDarray + Input array. + axis: int + Axis along which the cumulative product is computed. + + Examples + -------- + >>> import heat as ht + >>> T = ht.full((3, 3), 2) + >>> T.cumprod_(0) + DNDarray([[2., 2., 2.], + [4., 4., 4.], + [8., 8., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T + DNDarray([[2., 2., 2.], + [4., 4., 4.], + [8., 8., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T.cumproduct_(1) + DNDarray([[ 2., 4., 8.], + [ 4., 16., 64.], + [ 8., 64., 512.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T + DNDarray([[ 2., 4., 8.], + [ 4., 16., 64.], + [ 8., 64., 512.]], dtype=ht.float32, device=cpu:0, split=None) + + `cumproduct_(t: DNDarray, axis: int) ‑> heat.core.dndarray.DNDarray` + : Return the cumulative product of elements along a given axis in-place. + Can only be called as a DNDarray method. + + Parameters + ---------- + t: DNDarray + Input array. + axis: int + Axis along which the cumulative product is computed. + + Examples + -------- + >>> import heat as ht + >>> T = ht.full((3, 3), 2) + >>> T.cumprod_(0) + DNDarray([[2., 2., 2.], + [4., 4., 4.], + [8., 8., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T + DNDarray([[2., 2., 2.], + [4., 4., 4.], + [8., 8., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T.cumproduct_(1) + DNDarray([[ 2., 4., 8.], + [ 4., 16., 64.], + [ 8., 64., 512.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T + DNDarray([[ 2., 4., 8.], + [ 4., 16., 64.], + [ 8., 64., 512.]], dtype=ht.float32, device=cpu:0, split=None) + + `cumsum_(t: DNDarray, axis: int) ‑> heat.core.dndarray.DNDarray` + : Return the cumulative sum of the elements along a given axis in-place. + Can only be called as a DNDarray method. + + Parameters + ---------- + t: DNDarray + Input array. + axis: int + Axis along which the cumulative sum is computed. + + Examples + -------- + >>> import heat as ht + >>> T = ht.ones((3, 3)) + >>> T.cumsum_(0) + DNDarray([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T + DNDarray([[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]], dtype=ht.float32, device=cpu:0, split=None) + + `div_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place true division of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise divides its + element(s) by the element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise division of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `/=`. `divide_` is an alias for `div_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided. + t2: DNDarray or scalar + The second operand by whose values is divided. + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Example + --------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 /= T2 + >>> T1 + DNDarray([[0.5000, 1.0000], + [1.5000, 2.0000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> T2.div_(s) + DNDarray([[1., 1.], + [1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1., 1.], + [1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> v = ht.int32([-1, 2]) + >>> T2.divide_(v) + DNDarray([[-1.0000, 0.5000], + [-1.0000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[-1.0000, 0.5000], + [-1.0000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-1, 2], dtype=ht.int32, device=cpu:0, split=None) + + `divide_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place true division of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise divides its + element(s) by the element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise division of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `/=`. `divide_` is an alias for `div_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided. + t2: DNDarray or scalar + The second operand by whose values is divided. + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Example + --------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 /= T2 + >>> T1 + DNDarray([[0.5000, 1.0000], + [1.5000, 2.0000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> T2.div_(s) + DNDarray([[1., 1.], + [1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1., 1.], + [1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> v = ht.int32([-1, 2]) + >>> T2.divide_(v) + DNDarray([[-1.0000, 0.5000], + [-1.0000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[-1.0000, 0.5000], + [-1.0000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-1, 2], dtype=ht.int32, device=cpu:0, split=None) + + `exp(self, out=None)` + : Calculate the exponential of all elements in the input array. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + + Parameters + ---------- + x : DNDarray + The array for which to compute the exponential. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.exp(ht.arange(5)) + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float32, device=cpu:0, split=None) + + `exp2(self, out=None)` + : Calculate the exponential of two of all elements in the input array (:math:`2^x`). + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + + Parameters + ---------- + x : DNDarray + The array for which to compute the exponential of two. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.exp2(ht.arange(5)) + DNDarray([ 1., 2., 4., 8., 16.], dtype=ht.float32, device=cpu:0, split=None) + + `expand_dims(self, axis)` + : Expand the shape of an array. + Insert a new axis that will appear at the axis position in the expanded array shape. + + Parameters + ---------- + a : DNDarray + Input array to be expanded. + axis : int + Position in the expanded axes where the new axis is placed. + + Raises + ------ + ValueError + If `axis` is not consistent with the available dimensions. + + Examples + -------- + >>> x = ht.array([1, 2]) + >>> x.shape + (2,) + >>> y = ht.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + >>> y = ht.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + `expm1(self, out=None)` + : Calculate :math:`exp(x) - 1` for all elements in the array. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + + Parameters + ---------- + x : DNDarray + The array for which to compute the exponential. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.expm1(ht.arange(5)) + 1.0 + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float64, device=cpu:0, split=None) + + `fabs(self, out=None)` + : Calculate the absolute value element-wise and return floating-point class:`~heat.core.dndarray.DNDarray`. + This function exists besides ``abs==absolute`` since it will be needed in case complex numbers will be introduced + in the future. + + Parameters + ---------- + x : DNDarray + The array for which the compute the absolute value. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + + `fill_diagonal(self, value: float) ‑> heat.core.dndarray.DNDarray` + : Fill the main diagonal of a 2D :class:`DNDarray`. + This function modifies the input tensor in-place, and returns the input array. + + Parameters + ---------- + value : float + The value to be placed in the ``DNDarrays`` main diagonal + + `flatten(self)` + : Flattens an array into one dimension. + + Parameters + ---------- + a : DNDarray + Array to collapse + + Warning + ---------- + If `a.split>0`, the array must be redistributed along the first axis (see :func:`resplit`). + + + See Also + -------- + :func:`ravel` + + Examples + -------- + >>> a = ht.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + >>> ht.flatten(a) + DNDarray([1, 2, 3, 4, 5, 6, 7, 8], dtype=ht.int64, device=cpu:0, split=None) + + `floor(self, out=None)` + : Return the floor of the input, element-wise. + The floor of the scalar ``x`` is the largest integer i, such that ``i<=x``. + It is often denoted as :math:`\lfloor x \rfloor`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the floored values. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh :class:`~heat.core.dndarray.DNDarray` is allocated. + + Examples + -------- + >>> import heat as ht + >>> ht.floor(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -2., -2., -1., -1., 0., 0., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) + + `floor_divide_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place floor division of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise divides its + element(s) by the element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, then rounds down the result to the next + integer, i.e. the element(s) of `t1` are overwritten by the results of element-wise floor + division of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `//=`. `floor_divide_` is an alias for + `floordiv_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1.7, 2.0], [1.9, 4.2]]) + >>> s = 1 + >>> T1 //= s + >>> T1 + DNDarray([[1., 2.], + [1., 4.]], dtype=ht.float64, device=cpu:0, split=None) + >>> s + 1 + >>> T2 = ht.float32([[1.5, 2.5], [1.0, 1.3]]) + >>> T1.floordiv_(T2) + DNDarray([[0., 0.], + [1., 3.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([[0., 0.], + [1., 3.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1.5000, 2.5000], + [1.0000, 1.3000]], dtype=ht.float32, device=cpu:0, split=None) + >>> v = ht.int32([-1, 2]) + >>> T1.floor_divide_(v) + DNDarray([[-0., 0.], + [-1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([[-0., 0.], + [-1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-1, 2], dtype=ht.int32, device=cpu:0, split=None) + + `floordiv_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place floor division of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise divides its + element(s) by the element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, then rounds down the result to the next + integer, i.e. the element(s) of `t1` are overwritten by the results of element-wise floor + division of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `//=`. `floor_divide_` is an alias for + `floordiv_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1.7, 2.0], [1.9, 4.2]]) + >>> s = 1 + >>> T1 //= s + >>> T1 + DNDarray([[1., 2.], + [1., 4.]], dtype=ht.float64, device=cpu:0, split=None) + >>> s + 1 + >>> T2 = ht.float32([[1.5, 2.5], [1.0, 1.3]]) + >>> T1.floordiv_(T2) + DNDarray([[0., 0.], + [1., 3.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([[0., 0.], + [1., 3.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1.5000, 2.5000], + [1.0000, 1.3000]], dtype=ht.float32, device=cpu:0, split=None) + >>> v = ht.int32([-1, 2]) + >>> T1.floor_divide_(v) + DNDarray([[-0., 0.], + [-1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([[-0., 0.], + [-1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-1, 2], dtype=ht.int32, device=cpu:0, split=None) + + `fmod_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : In-place computation of element-wise division remainder of values of operand `t1` by values of + operand `t2` (i.e. C Library function fmod). The result has the same sign as the dividend `t1`. + Can only be called as a DNDarray method. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided (may be floats) + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(2) + >>> T1.fmod_(T1) + >>> T1 + DNDarray(0, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 = ht.float32([[1, 2], [3, 4]]) + >>> T3 = ht.int32([[2, 2], [2, 2]]) + >>> T2.fmod_(T3) + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T3 + DNDarray([[2, 2], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + >>> s = -3 + >>> T3.fmod_(s) + DNDarray([[2, 2], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + >>> T3 + DNDarray([[2, 2], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + >>> s + -3 + + `gcd_(t1: DNDarray, t2: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Returns the greatest common divisor of |t1| and |t2| element-wise and in-place. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + greatest common divisor with the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise gcd of `t1` and `t2`. + Can only be called as a DNDarray method. + + Parameters + ---------- + t1: DNDarray + The first input array, must be of integer type + t2: DNDarray + The second input array, must be of integer type + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.int(ht.ones(3)) * 9 + >>> T2 = ht.arange(3) + 1 + >>> T1.gcd_(T2) + DNDarray([1, 1, 3], dtype=ht.int32, device=cpu:0, split=None) + >>> T1 + DNDarray([1, 1, 3], dtype=ht.int32, device=cpu:0, split=None) + >>> T2 + DNDarray([1, 2, 3], dtype=ht.int32, device=cpu:0, split=None) + >>> s = 2 + >>> T2.gcd_(2) + DNDarray([1, 2, 1], dtype=ht.int32, device=cpu:0, split=None) + >>> T2 + DNDarray([1, 2, 1], dtype=ht.int32, device=cpu:0, split=None) + >>> s + 2 + + `get_halo(self, halo_size: int, prev: bool = True, next: bool = True) ‑> torch.Tensor` + : Fetch halos of size ``halo_size`` from neighboring ranks and save them in ``self.halo_next/self.halo_prev``. + + Parameters + ---------- + halo_size : int + Size of the halo. + prev : bool, optional + If True, fetch the halo from the previous rank. Default: True. + next : bool, optional + If True, fetch the halo from the next rank. Default: True. + + `hypot_(t1: DNDarray, t2: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Given the 'legs' of a right triangle, return its hypotenuse in-place of the first input. + Equivalent to :math:`sqrt(a^2 + b^2)`, element-wise. + Can only be called as a DNDarray method. + + Parameters + ---------- + t1: DNDarray + The first input array + t2: DNDarray + the second input array + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([1.0, 3.0, 3.0]) + >>> T2 = ht.array(2.0) + >>> T1.hypot_(T2) + DNDarray([2.2361, 3.6056, 3.6056], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([2.2361, 3.6056, 3.6056], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray(2., dtype=ht.float32, device=cpu:0, split=None) + + `invert_(t: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Computes the bitwise NOT of the given input :class:`~heat.core.dndarray.DNDarray` in-place. The + elements of the input array must be of integer or Boolean types. For boolean arrays, it computes + the logical NOT. + Can only be called as a DNDarray method. `bitwise_not_` is an alias for `invert_`. + + Parameters + ---------- + t: DNDarray + The input array to invert. Must be of integral or Boolean types + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(13, dtype=ht.uint8) + >>> T1.invert_() + DNDarray(242, dtype=ht.uint8, device=cpu:0, split=None) + >>> T1 + DNDarray(242, dtype=ht.uint8, device=cpu:0, split=None) + >>> T2 = ht.array([-1, -2, 3], dtype=ht.int8) + >>> T2.invert_() + DNDarray([ 0, 1, -4], dtype=ht.int8, device=cpu:0, split=None) + >>> T2 + DNDarray([ 0, 1, -4], dtype=ht.int8, device=cpu:0, split=None) + >>> T3 = ht.array([[True, True], [False, True]]) + >>> T3.invert_() + DNDarray([[False, False], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> T3 + DNDarray([[False, False], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + + `is_balanced(self, force_check: bool = False) ‑> bool` + : Determine if ``self`` is balanced evenly (or as evenly as possible) across all nodes + distributed evenly (or as evenly as possible) across all processes. + This is equivalent to returning ``self.balanced``. If no information + is available (``self.balanced = None``), the balanced status will be + assessed via collective communication. + + Parameters + ---------- + force_check : bool, optional + If True, the balanced status of the ``DNDarray`` will be assessed via + collective communication in any case. + + `is_distributed(self) ‑> bool` + : Determines whether the data of this ``DNDarray`` is distributed across multiple processes. + + `isclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False)` + : Returns a boolean :class:`~heat.core.dndarray.DNDarray`, with elements ``True`` where ``a`` and ``b`` are equal + within the given tolerance. If both ``x`` and ``y`` are scalars, returns a single boolean value. + + Parameters + ---------- + x : DNDarray + Input array to compare. + y : DNDarray + Input array to compare. + rtol : float + The relative tolerance parameter. + atol : float + The absolute tolerance parameter. + equal_nan : bool + Whether to compare NaN’s as equal. If ``True``, NaN’s in x will be considered equal to NaN’s in y in the output + array. + + `item(self)` + : Returns the only element of a 1-element :class:`DNDarray`. + Mirror of the pytorch command by the same name. If size of ``DNDarray`` is >1 element, then a ``ValueError`` is + raised (by pytorch) + + Examples + -------- + >>> import heat as ht + >>> x = ht.zeros((1)) + >>> x.item() + 0.0 + + `kurtosis(x, axis=None, unbiased=True, Fischer=True)` + : Compute the weighted average along the specified axis. + + If ``returned=True``, return a tuple with the average as the first element and the sum + of the weights as the second element. ``sum_of_weights`` is of the same type as ``average``. + + Parameters + ---------- + x : DNDarray + Array containing data to be averaged. + axis : None or int or Tuple[int,...], optional + Axis or axes along which to average ``x``. The default, + ``axis=None``, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + #TODO Issue #351: If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : DNDarray, optional + An array of weights associated with the values in ``x``. Each value in + ``x`` contributes to the average according to its associated weight. + The weights array can either be 1D (in which case its length must be + the size of ``x`` along the given axis) or of the same shape as ``x``. + If ``weights=None``, then all data in ``x`` are assumed to have a + weight equal to one, the result is equivalent to :func:`mean`. + returned : bool, optional + If ``True``, the tuple ``(average, sum_of_weights)`` + is returned, otherwise only the average is returned. + If ``weights=None``, ``sum_of_weights`` is equivalent to the number of + elements over which the average is taken. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. + TypeError + When the length of 1D weights is not the same as the shape of ``x`` + along axis. + + Examples + -------- + >>> data = ht.arange(1, 5, dtype=float) + >>> data + DNDarray([1., 2., 3., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(data) + DNDarray(2.5000, dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(ht.arange(1, 11, dtype=float), weights=ht.arange(10, 0, -1)) + DNDarray([4.], dtype=ht.float64, device=cpu:0, split=None) + >>> data = ht.array([[0, 1], + [2, 3], + [4, 5]], dtype=float, split=1) + >>> weights = ht.array([1.0 / 4, 3.0 / 4]) + >>> ht.average(data, axis=1, weights=weights) + DNDarray([0.7500, 2.7500, 4.7500], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(data, weights=weights) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of x and weights differ. + + `lcm_(t1: DNDarray, t2: Union[DNDarray, int]) ‑> heat.core.dndarray.DNDarray` + : Returns the lowest common multiple of |t1| and |t2| element-wise and in-place. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + lowest common multiple with the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise gcd of the absolute values of `t1` and `t2`. + Can only be called as a DNDarray method. + + Parameters + ---------- + t1: DNDarray + The first input array, must be of integer type + t2: DNDarray or scalar + the second input array, must be of integer type + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([6, 12, 15]) + >>> T2 = ht.array([3, 4, 5]) + >>> T1.lcm_(T2) + DNDarray([ 6, 12, 15], dtype=ht.int64, device=cpu:0, split=None) + >>> T1 + DNDarray([ 6, 12, 15], dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray([3, 4, 5], dtype=ht.int64, device=cpu:0, split=None) + >>> s = 2 + >>> T2.lcm_(s) + DNDarray([ 6, 4, 10], dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray([ 6, 4, 10], dtype=ht.int64, device=cpu:0, split=None) + >>> s + 2 + + `left_shift_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : In-place version of `left_shift`. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise shifts the bits + of each element in-place that many positions to the left as the element(s) of the second operand + (scalar or :class:`~heat.core.dndarray.DNDarray`) indicate, i.e. the element(s) of `t1` are + overwritten by the results of element-wise bitwise left shift of `t1` for `t2` positions. + Can be called as a DNDarray method or with the symbol `<<=`. Only works for inputs with integer + elements. + + Parameters + ---------- + t1: DNDarray + Input array + t2: DNDarray or float + Integer number of zero bits to add + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([1, 2, 3]) + >>> s = 1 + >>> T1.left_shift_(s) + DNDarray([2, 4, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> T1 + DNDarray([2, 4, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> s + 1 + >>> T2 = ht.array([-1, 1, 0]) + >>> T1 <<= T2 + >>> T1 + DNDarray([0, 8, 6], dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray([-1, 1, 0], dtype=ht.int64, device=cpu:0, split=None) + + `log(self, out=None)` + : Natural logarithm, element-wise. + The natural logarithm is the inverse of the exponential function, so that :math:`log(exp(x)) = x`. The natural + logarithm is logarithm in base e. Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log(ht.arange(5)) + DNDarray([ -inf, 0.0000, 0.6931, 1.0986, 1.3863], dtype=ht.float32, device=cpu:0, split=None) + + `log10(self, out=None)` + : Compute the logarithm to the base 10 (:math:`log_{10}(x)`), element-wise. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log10(ht.arange(5)) + DNDarray([ -inf, 0.0000, 0.3010, 0.4771, 0.6021], dtype=ht.float32, device=cpu:0, split=None) + + `log1p(self, out=None)` + : Return the natural logarithm of one plus the input array, element-wise. + Result is a :class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log1p(ht.arange(5)) + DNDarray([0.0000, 0.6931, 1.0986, 1.3863, 1.6094], dtype=ht.float32, device=cpu:0, split=None) + + `log2(self, out=None)` + : Compute the logarithm to the base 2 (:math:`log_2(x)`), element-wise. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log2(ht.arange(5)) + DNDarray([ -inf, 0.0000, 1.0000, 1.5850, 2.0000], dtype=ht.float32, device=cpu:0, split=None) + + `max(x, axis=None, out=None, keepdims=None)` + : Return the maximum along a given axis. + + Parameters + ---------- + x : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which to operate. By default, flattened input is used. + If this is a tuple of ints, the maximum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : DNDarray, optional + Tuple of two output arrays ``(max, max_indices)``. Must be of the same shape and buffer length as the expected + output. The minimum value of an output element. Must be present to allow computation on empty slice. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> a = ht.float32([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + [10, 11, 12] + ]) + >>> ht.max(a) + DNDarray([12.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.max(a, axis=0) + DNDarray([10., 11., 12.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.max(a, axis=1) + DNDarray([ 3., 6., 9., 12.], dtype=ht.float32, device=cpu:0, split=None) + + `mean(x, axis=None)` + : Calculates and returns the mean of a ``DNDarray``. + If an axis is given, the mean will be taken in that direction. + + Parameters + ---------- + x : DNDarray + Values for which the mean is calculated for. + The dtype of ``x`` must be a float + axis : None or int or iterable + Axis which the mean is taken in. Default ``None`` calculates mean of all data items. + + Notes + ----- + Split semantics when axis is an integer: + + - if ``axis==x.split``, then ``mean(x).split=None`` + + - if ``axis>split``, then ``mean(x).split=x.split`` + + - if ``axis>> a = ht.random.randn(1, 3) + >>> a + DNDarray([[-0.1164, 1.0446, -0.4093]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a) + DNDarray(0.1730, dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[-1.0585, 0.7541, -1.1011, 0.5009], + [-1.3575, 0.3344, 0.4506, 0.7379], + [-0.4337, -0.6516, -1.3690, -0.8772], + [ 0.6929, -1.0989, -0.9961, 0.3547]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a, 1) + DNDarray([-0.2262, 0.0413, -0.8328, -0.2619], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a, 0) + DNDarray([-0.5392, -0.1655, -0.7539, 0.1791], dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[-0.1441, 0.5016, 0.8907, 0.6318], + [-1.1690, -1.2657, 1.4840, -0.1014], + [ 0.4133, 1.4168, 1.3499, 1.0340], + [-0.9236, -0.7535, -0.2466, -0.9703]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a, (0, 1)) + DNDarray(0.1342, dtype=ht.float32, device=cpu:0, split=None) + + `median(x, axis=None, keepdims=False, sketched=False, sketch_size=1.0)` + : Compute the median of the data along the specified axis. + Returns the median of the ``DNDarray`` elements. + Per default, the "true" median of the entire data set is computed; however, the argument + `sketched` allows to switch to a faster but less accurate version that computes + the median only on behalf of a random subset of the data set ("sketch"). + + Parameters + ---------- + x : DNDarray + Input tensor + axis : int, or None, optional + Axis along which the median is computed. Default is ``None``, i.e., + the median is computed along a flattened version of the ``DNDarray``. + + keepdims : bool, optional + If True, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result can broadcast correctly against the original array ``a``. + + sketched : bool, optional + If True, the median is computed on a random subset of the data set ("sketch"). + This is faster but less accurate. Default is False. The size of the sketch is controlled by the argument `sketch_size`. + sketch_size : float, optional + The size of the sketch as a fraction of the data set size. Default is `1./n_proc` where `n_proc` is the number of MPI processes, e.g. `n_proc = MPI.COMM_WORLD.size`. Must be in the range (0, 1). + Ignored for sketched = False. + + `min(self, axis=None, out=None, keepdims=None)` + : Return the minimum along a given axis. + + Parameters + ---------- + x : DNDarray + Input array. + axis : None or int or Tuple[int,...] + Axis or axes along which to operate. By default, flattened input is used. + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : Tuple[DNDarray,DNDarray], optional + Tuple of two output arrays ``(min, min_indices)``. Must be of the same shape and buffer length as the expected + output. The maximum value of an output element. Must be present to allow computation on empty slice. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + + Examples + -------- + >>> a = ht.float32([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + [10, 11, 12] + ]) + >>> ht.min(a) + DNDarray([1.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.min(a, axis=0) + DNDarray([1., 2., 3.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.min(a, axis=1) + DNDarray([ 1., 4., 7., 10.], dtype=ht.float32, device=cpu:0, split=None) + + `mod_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place division remainder of values of two operands. The result has the same sign + as the divisor. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + modulo regarding the element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise `t1` modulo `t2`. + Can be called as a DNDarray method or with the symbol `%=`. `mod_` is an alias for `remainder_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(2) + >>> T1 %= T1 + >>> T1 + DNDarray(0, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 = ht.float32([[1, 2], [3, 4]]) + >>> T3 = ht.int32([[2, 2], [2, 2]]) + >>> T2.mod_(T3) + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T3 + DNDarray([[2, 2], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + >>> s = -3 + >>> T3.remainder_(s) + DNDarray([[-1, -1], + [-1, -1]], dtype=ht.int32, device=cpu:0, split=None) + >>> T3 + DNDarray([[-1, -1], + [-1, -1]], dtype=ht.int32, device=cpu:0, split=None) + >>> s + -3 + + `modf(self, out=None)` + : Return the fractional and integral parts of a :class:`~heat.core.dndarray.DNDarray`, element-wise. + The fractional and integral parts are negative if the given number is negative. + + Parameters + ---------- + x : DNDarray + Input array + out : Tuple[DNDarray, DNDarray], optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + + Raises + ------ + TypeError + if ``x`` is not a :class:`~heat.core.dndarray.DNDarray` + TypeError + if ``out`` is not None or a tuple of :class:`~heat.core.dndarray.DNDarray` + ValueError + if ``out`` is a tuple of length unqual 2 + + Examples + -------- + >>> import heat as ht + >>> ht.modf(ht.arange(-2.0, 2.0, 0.4)) + (DNDarray([ 0.0000, -0.6000, -0.2000, -0.8000, -0.4000, 0.0000, 0.4000, 0.8000, 0.2000, 0.6000], dtype=ht.float32, device=cpu:0, split=None), DNDarray([-2., -1., -1., -0., -0., 0., 0., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None)) + + `mul_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place multiplication of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise multiplies the + element(s) of the second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) in-place, + i.e. the element(s) of `t1` are overwritten by the results of element-wise multiplication of + `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `*=`. `multiply_` is an alias for `mul_`. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the multiplication. + t2: DNDarray or scalar + The second operand involved in the multiplication. + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 *= T2 + >>> T1 + DNDarray([[2., 4.], + [6., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> T2.mul_(s) + DNDarray([[4., 4.], + [4., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[4., 4.], + [4., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> v = ht.int32([-1, 2]) + >>> T2.multiply_(v) + DNDarray([[-4., 8.], + [-4., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[-4., 8.], + [-4., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-1, 2], dtype=ht.int32, device=cpu:0, split=None) + + `multiply_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place multiplication of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise multiplies the + element(s) of the second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) in-place, + i.e. the element(s) of `t1` are overwritten by the results of element-wise multiplication of + `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `*=`. `multiply_` is an alias for `mul_`. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the multiplication. + t2: DNDarray or scalar + The second operand involved in the multiplication. + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 *= T2 + >>> T1 + DNDarray([[2., 4.], + [6., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> T2.mul_(s) + DNDarray([[4., 4.], + [4., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[4., 4.], + [4., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> v = ht.int32([-1, 2]) + >>> T2.multiply_(v) + DNDarray([[-4., 8.], + [-4., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[-4., 8.], + [-4., 8.]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-1, 2], dtype=ht.int32, device=cpu:0, split=None) + + `nan_to_num_(t: DNDarray, nan: float = 0.0, posinf: float = None, neginf: float = None) ‑> heat.core.dndarray.DNDarray` + : Replaces NaNs, positive infinity values, and negative infinity values in the input 't' in-place + with the values specified by nan, posinf, and neginf, respectively. By default, NaNs are + replaced with zero, positive infinity is replaced with the greatest finite value representable + by input's dtype, and negative infinity is replaced with the least finite value representable by + input's dtype. + Can only be called as a DNDarray method. + + Parameters + ---------- + t: DNDarray + Input array. + nan: float, optional + Value to be used to replace NaNs. Default value is 0.0. + posinf: float, optional + Value to replace positive infinity values with. If None, positive infinity values are + replaced with the greatest finite value of the input's dtype. Default value is None. + neginf: float, optional + Value to replace negative infinity values with. If None, negative infinity values are + replaced with the greatest negative finite value of the input's dtype. Default value is + None. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([float("nan"), float("inf"), -float("inf")]) + >>> T1.nan_to_num_() + DNDarray([ 0.0000e+00, 3.4028e+38, -3.4028e+38], dtype=ht.float32, device=cpu:0, split=None) + >>> T1 + DNDarray([ 0.0000e+00, 3.4028e+38, -3.4028e+38], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 = ht.array([1, 2, 3, ht.nan, ht.inf, -ht.inf]) + >>> T2.nan_to_num_(nan=0, posinf=1, neginf=-1) + DNDarray([ 1., 2., 3., 0., 1., -1.], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([ 1., 2., 3., 0., 1., -1.], dtype=ht.float32, device=cpu:0, split=None) + + `neg_(t: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place negation of `t`. + Can only be called as a DNDarray method. `negative_` is an alias for `neg_`. + + Parameter + ---------- + t: DNDarray + The input array + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([-1, 1]) + >>> T1.neg_() + DNDarray([ 1, -1], dtype=ht.int64, device=cpu:0, split=None) + >>> T1 + DNDarray([ 1, -1], dtype=ht.int64, device=cpu:0, split=None) + >>> T2 = ht.array([[-1.0, 2.5], [4.0, 0.0]]) + >>> T2.neg_() + DNDarray([[ 1.0000, -2.5000], + [-4.0000, -0.0000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[ 1.0000, -2.5000], + [-4.0000, -0.0000]], dtype=ht.float32, device=cpu:0, split=None) + + `negative_(t: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place negation of `t`. + Can only be called as a DNDarray method. `negative_` is an alias for `neg_`. + + Parameter + ---------- + t: DNDarray + The input array + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([-1, 1]) + >>> T1.neg_() + DNDarray([ 1, -1], dtype=ht.int64, device=cpu:0, split=None) + >>> T1 + DNDarray([ 1, -1], dtype=ht.int64, device=cpu:0, split=None) + >>> T2 = ht.array([[-1.0, 2.5], [4.0, 0.0]]) + >>> T2.neg_() + DNDarray([[ 1.0000, -2.5000], + [-4.0000, -0.0000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[ 1.0000, -2.5000], + [-4.0000, -0.0000]], dtype=ht.float32, device=cpu:0, split=None) + + `nonzero(self)` + : Return a :class:`~heat.core.dndarray.DNDarray` containing the indices of the elements that are non-zero.. (using ``torch.nonzero``) + If ``x`` is split then the result is split in the 0th dimension. However, this :class:`~heat.core.dndarray.DNDarray` + can be UNBALANCED as it contains the indices of the non-zero elements on each node. + Returns an array with one entry for each dimension of ``x``, containing the indices of the non-zero elements in that dimension. + The values in ``x`` are always tested and returned in row-major, C-style order. + The corresponding non-zero values can be obtained with: ``x[nonzero(x)]``. + + Parameters + ---------- + x: DNDarray + Input array + + Examples + -------- + >>> import heat as ht + >>> x = ht.array([[3, 0, 0], [0, 4, 1], [0, 6, 0]], split=0) + >>> ht.nonzero(x) + DNDarray([[0, 0], + [1, 1], + [1, 2], + [2, 1]], dtype=ht.int64, device=cpu:0, split=0) + >>> y = ht.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], split=0) + >>> y > 3 + DNDarray([[False, False, False], + [ True, True, True], + [ True, True, True]], dtype=ht.bool, device=cpu:0, split=0) + >>> ht.nonzero(y > 3) + DNDarray([[1, 0], + [1, 1], + [1, 2], + [2, 0], + [2, 1], + [2, 2]], dtype=ht.int64, device=cpu:0, split=0) + >>> y[ht.nonzero(y > 3)] + DNDarray([4, 5, 6, 7, 8, 9], dtype=ht.int64, device=cpu:0, split=0) + + `norm(self)` + : Return the vector or matrix norm of an array. + + Parameters + ---------- + x : DNDarray + Input vector + axis : int, tuple, optional + Axes along which to compute the norm. If an integer, vector norm is used. If a 2-tuple, matrix norm is used. + If `None`, it is inferred from the dimension of the array. Default: `None` + keepdims : bool, optional + Retains the reduced dimension when `True`. Default: `False` + ord : int, float, inf, -inf, 'fro', 'nuc' + The norm order to compute. See Notes + + See Also + -------- + vector_norm + Computes the vector norm of an array. + matrix_norm + Computes the matrix norm of an array. + + Notes + ----- + The following norms are supported: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm L2-norm (Euclidean) + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) L1-norm (Manhattan) + -1 min(sum(abs(x), axis=0)) 1./sum(1./abs(a)) + 2 -- L2-norm (Euclidean) + -2 -- 1./sqrt(sum(1./abs(a)**2)) + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The following matrix norms are currently **not** supported: + + ===== ============================ + ord norm for matrices + ===== ============================ + 2 largest singular value + -2 smallest singular value + ===== ============================ + + Raises + ------ + ValueError + If 'axis' has more than 2 elements + + Examples + -------- + >>> from heat import linalg as LA + >>> a = ht.arange(9, dtype=ht.float) - 4 + >>> a + DNDarray([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> b = a.reshape((3, 3)) + >>> b + DNDarray([[-4., -3., -2.], + [-1., 0., 1.], + [ 2., 3., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a) + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b) + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, ord="fro") + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, float("inf")) + DNDarray([4.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, ht.inf) + DNDarray([9.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -ht.inf)) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, -ht.inf) + DNDarray([2.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, 1) + DNDarray([20.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, 1) + DNDarray([7.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -1) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, -1) + DNDarray([6.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, 2) + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -2) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, 3) + DNDarray([5.8480], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -3) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + c = ht.array([[ 1, 2, 3], + [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + DNDarray([1.4142, 2.2361, 5.0000], dtype=ht.float64, device=cpu:0, split=None) + >>> LA.norm(c, axis=1) + DNDarray([3.7417, 4.2426], dtype=ht.float64, device=cpu:0, split=None) + >>> LA.norm(c, axis=1, ord=1) + DNDarray([6., 6.], dtype=ht.float64, device=cpu:0, split=None) + >>> m = ht.arange(8).reshape(2, 2, 2) + >>> LA.norm(m, axis=(1, 2)) + DNDarray([ 3.7417, 11.2250], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (DNDarray(3.7417, dtype=ht.float32, device=cpu:0, split=None), DNDarray(11.2250, dtype=ht.float32, device=cpu:0, split=None)) + + `numpy(self) ‑> ` + : Returns a copy of the :class:`DNDarray` as numpy ndarray. If the ``DNDarray`` resides on the GPU, the underlying data will be copied to the CPU first. + + If the ``DNDarray`` is distributed, an MPI Allgather operation will be performed before converting to np.ndarray, i.e. each MPI process will end up holding a copy of the entire array in memory. Make sure process memory is sufficient! + + Examples + -------- + >>> import heat as ht + T1 = ht.random.randn((10,8)) + T1.numpy() + + `pow_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place exponentation. + Takes the element(s) of the first operand (:class:`~heat.core.dndarray.DNDarray`) element-wise + to the power of the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise exponentiation of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `**=`. `power_` is an alias for `pow_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values represent the base + t2: DNDarray or scalar + The second operand whose values represent the exponent + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[3, 3], [2, 2]]) + >>> T1 **= T2 + >>> T1 + DNDarray([[ 1., 8.], + [ 9., 16.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[3., 3.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = -1.0 + >>> T2.pow_(s) + DNDarray([[0.3333, 0.3333], + [0.5000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[0.3333, 0.3333], + [0.5000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + -1.0 + >>> v = ht.int32([-3, 2]) + >>> T2.power_(v) + DNDarray([[27.0000, 0.1111], + [ 8.0000, 0.2500]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[27.0000, 0.1111], + [ 8.0000, 0.2500]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-3, 2], dtype=ht.int32, device=cpu:0, split=None) + + `power_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place exponentation. + Takes the element(s) of the first operand (:class:`~heat.core.dndarray.DNDarray`) element-wise + to the power of the corresponding element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise exponentiation of `t1` and `t2`. + Can be called as a DNDarray method or with the symbol `**=`. `power_` is an alias for `pow_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values represent the base + t2: DNDarray or scalar + The second operand whose values represent the exponent + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[3, 3], [2, 2]]) + >>> T1 **= T2 + >>> T1 + DNDarray([[ 1., 8.], + [ 9., 16.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[3., 3.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = -1.0 + >>> T2.pow_(s) + DNDarray([[0.3333, 0.3333], + [0.5000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[0.3333, 0.3333], + [0.5000, 0.5000]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + -1.0 + >>> v = ht.int32([-3, 2]) + >>> T2.power_(v) + DNDarray([[27.0000, 0.1111], + [ 8.0000, 0.2500]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[27.0000, 0.1111], + [ 8.0000, 0.2500]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-3, 2], dtype=ht.int32, device=cpu:0, split=None) + + `prod(self, axis=None, out=None, keepdims=None)` + : Return the product of array elements over a given axis in form of a DNDarray shaped as a but + with the specified axis removed. + + Parameters + ---------- + a : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which a product is performed. The default, ``axis=None``, will calculate + the product of all the elements in the input array. If axis is negative it counts from the + last to the first axis. If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the axes as before. + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the + expected output, but the datatype of the output values will be cast if necessary. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions + with size one. With this option, the result will broadcast correctly against the input + array. + + Examples + -------- + >>> ht.prod(ht.array([1.0, 2.0])) + DNDarray(2., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.prod(ht.array([ + [1.,2.], + [3.,4.]])) + DNDarray(24., dtype=ht.float32, device=cpu:0, split=None) + >>> ht.prod(ht.array([ + [1.,2.], + [3.,4.] + ]), axis=1) + DNDarray([ 2., 12.], dtype=ht.float32, device=cpu:0, split=None) + + `ravel(self)` + : Flattens the ``DNDarray``. + + See Also + -------- + :func:`~heat.core.manipulations.ravel` + + Examples + -------- + >>> a = ht.ones((2, 3), split=0) + >>> b = a.ravel() + >>> a[0, 0] = 4 + >>> b + DNDarray([4., 1., 1., 1., 1., 1.], dtype=ht.float32, device=cpu:0, split=0) + + `redistribute(arr, lshape_map=None, target_map=None)` + : Redistributes the data of the :class:`DNDarray` *along the split axis* to match the given target map. + This function does not modify the non-split dimensions of the ``DNDarray``. + This is an abstraction and extension of the balance function. + + Parameters + ---------- + arr: DNDarray + DNDarray to redistribute + lshape_map : torch.Tensor, optional + The current lshape of processes. + Units are ``[rank, lshape]``. + target_map : torch.Tensor, optional + The desired distribution across the processes. + Units are ``[rank, target lshape]``. + Note: the only important parts of the target map are the values along the split axis, + values which are not along this axis are there to mimic the shape of the ``lshape_map``. + + Examples + -------- + >>> st = ht.ones((50, 81, 67), split=2) + >>> target_map = torch.zeros((st.comm.size, 3), dtype=torch.int64) + >>> target_map[0, 2] = 67 + >>> print(target_map) + [0/2] tensor([[ 0, 0, 67], + [0/2] [ 0, 0, 0], + [0/2] [ 0, 0, 0]], dtype=torch.int32) + [1/2] tensor([[ 0, 0, 67], + [1/2] [ 0, 0, 0], + [1/2] [ 0, 0, 0]], dtype=torch.int32) + [2/2] tensor([[ 0, 0, 67], + [2/2] [ 0, 0, 0], + [2/2] [ 0, 0, 0]], dtype=torch.int32) + >>> print(st.lshape) + [0/2] (50, 81, 23) + [1/2] (50, 81, 22) + [2/2] (50, 81, 22) + >>> ht.redistribute_(st, target_map=target_map) + >>> print(st.lshape) + [0/2] (50, 81, 67) + [1/2] (50, 81, 0) + [2/2] (50, 81, 0) + + `redistribute_(self, lshape_map: Optional[torch.Tensor] = None, target_map: Optional[torch.Tensor] = None)` + : Redistributes the data of the :class:`DNDarray` *along the split axis* to match the given target map. + This function does not modify the non-split dimensions of the ``DNDarray``. + This is an abstraction and extension of the balance function. + + Parameters + ---------- + lshape_map : torch.Tensor, optional + The current lshape of processes. + Units are ``[rank, lshape]``. + target_map : torch.Tensor, optional + The desired distribution across the processes. + Units are ``[rank, target lshape]``. + Note: the only important parts of the target map are the values along the split axis, + values which are not along this axis are there to mimic the shape of the ``lshape_map``. + + Examples + -------- + >>> st = ht.ones((50, 81, 67), split=2) + >>> target_map = torch.zeros((st.comm.size, 3), dtype=torch.int64) + >>> target_map[0, 2] = 67 + >>> print(target_map) + [0/2] tensor([[ 0, 0, 67], + [0/2] [ 0, 0, 0], + [0/2] [ 0, 0, 0]], dtype=torch.int32) + [1/2] tensor([[ 0, 0, 67], + [1/2] [ 0, 0, 0], + [1/2] [ 0, 0, 0]], dtype=torch.int32) + [2/2] tensor([[ 0, 0, 67], + [2/2] [ 0, 0, 0], + [2/2] [ 0, 0, 0]], dtype=torch.int32) + >>> print(st.lshape) + [0/2] (50, 81, 23) + [1/2] (50, 81, 22) + [2/2] (50, 81, 22) + >>> st.redistribute_(target_map=target_map) + >>> print(st.lshape) + [0/2] (50, 81, 67) + [1/2] (50, 81, 0) + [2/2] (50, 81, 0) + + `remainder_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place division remainder of values of two operands. The result has the same sign + as the divisor. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise computes the + modulo regarding the element(s) of the second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) in-place, i.e. the element(s) of `t1` are overwritten by + the results of element-wise `t1` modulo `t2`. + Can be called as a DNDarray method or with the symbol `%=`. `mod_` is an alias for `remainder_`. + + Parameters + ---------- + t1: DNDarray + The first operand whose values are divided + t2: DNDarray or scalar + The second operand by whose values is divided + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array(2) + >>> T1 %= T1 + >>> T1 + DNDarray(0, dtype=ht.int64, device=cpu:0, split=None) + >>> T2 = ht.float32([[1, 2], [3, 4]]) + >>> T3 = ht.int32([[2, 2], [2, 2]]) + >>> T2.mod_(T3) + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[1., 0.], + [1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T3 + DNDarray([[2, 2], + [2, 2]], dtype=ht.int32, device=cpu:0, split=None) + >>> s = -3 + >>> T3.remainder_(s) + DNDarray([[-1, -1], + [-1, -1]], dtype=ht.int32, device=cpu:0, split=None) + >>> T3 + DNDarray([[-1, -1], + [-1, -1]], dtype=ht.int32, device=cpu:0, split=None) + >>> s + -3 + + `reshape(self, *shape, **kwargs)` + : Returns an array with the same data and number of elements as `a`, but with the specified shape. + + Parameters + ---------- + a : DNDarray + The input array + shape : Union[int, Tuple[int,...]] + Shape of the new array. Must be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. + new_split : int, optional + The distribution axis of the reshaped array. If `new_split` is not provided, the reshaped array will have: + - the same split axis as the input array, if the original dimensionality is unchanged; + - split axis 0, if the number of dimensions is modified by reshaping. + **kwargs + Extra keyword arguments. + + Raises + ------ + ValueError + If the number of elements in the new shape is inconsistent with the input data. + + Notes + ----- + `reshape()` might require significant communication among processes. Communication is minimized if the input array is distributed along axis 0, i.e. `a.split == 0`. + + See Also + -------- + :func:`ravel` + + Examples + -------- + >>> a = ht.zeros((3, 4)) + >>> ht.reshape(a, (4, 3)) + DNDarray([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.linspace(0, 14, 8, split=0) + >>> ht.reshape(a, (2, 4)) + (1/2) tensor([[0., 2., 4., 6.]]) + (2/2) tensor([[ 8., 10., 12., 14.]]) + # 3-dim array, distributed along axis 1 + >>> a = ht.random.rand(2, 3, 4, split=1) + >>> a + DNDarray([[[0.5525, 0.5434, 0.9477, 0.9503], + [0.4165, 0.3924, 0.3310, 0.3935], + [0.1008, 0.1750, 0.9030, 0.8579]], + + [[0.0680, 0.4944, 0.4114, 0.6669], + [0.6423, 0.2625, 0.5413, 0.2225], + [0.0197, 0.5079, 0.4739, 0.4387]]], dtype=ht.float32, device=cpu:0, split=1) + >>> a.reshape(-1, 3) # reshape to 2-dim array: split axis will be set to 0 + DNDarray([[0.5525, 0.5434, 0.9477], + [0.9503, 0.4165, 0.3924], + [0.3310, 0.3935, 0.1008], + [0.1750, 0.9030, 0.8579], + [0.0680, 0.4944, 0.4114], + [0.6669, 0.6423, 0.2625], + [0.5413, 0.2225, 0.0197], + [0.5079, 0.4739, 0.4387]], dtype=ht.float32, device=cpu:0, split=0) + >>> a.reshape(2, 3, 2, 2, new_split=1) # reshape to 4-dim array, specify distribution axis + DNDarray([[[[0.5525, 0.5434], + [0.9477, 0.9503]], + + [[0.4165, 0.3924], + [0.3310, 0.3935]], + + [[0.1008, 0.1750], + [0.9030, 0.8579]]], + + + [[[0.0680, 0.4944], + [0.4114, 0.6669]], + + [[0.6423, 0.2625], + [0.5413, 0.2225]], + + [[0.0197, 0.5079], + [0.4739, 0.4387]]]], dtype=ht.float32, device=cpu:0, split=1) + + `resplit(self, axis=None)` + : Out-of-place redistribution of the content of the `DNDarray`. Allows to "unsplit" (i.e. gather) all values from all + nodes, as well as to define a new axis along which the array is split without changes to the values. + + Parameters + ---------- + arr : DNDarray + The array from which to resplit + axis : int or None + The new split axis, `None` denotes gathering, an int will set the new split axis + + Warning + ---------- + This operation might involve a significant communication overhead. Use it sparingly and preferably for + small arrays. + + Examples + -------- + >>> a = ht.zeros( + ... ( + ... 4, + ... 5, + ... ), + ... split=0, + ... ) + >>> a.lshape + (0/2) (2, 5) + (1/2) (2, 5) + >>> b = resplit(a, None) + >>> b.split + None + >>> b.lshape + (0/2) (4, 5) + (1/2) (4, 5) + >>> a = ht.zeros( + ... ( + ... 4, + ... 5, + ... ), + ... split=0, + ... ) + >>> a.lshape + (0/2) (2, 5) + (1/2) (2, 5) + >>> b = resplit(a, 1) + >>> b.split + 1 + >>> b.lshape + (0/2) (4, 3) + (1/2) (4, 2) + + `resplit_(self, axis: int = None)` + : In-place option for resplitting a :class:`DNDarray`. + + Parameters + ---------- + axis : int + The new split axis, ``None`` denotes gathering, an int will set the new split axis + + Examples + -------- + >>> a = ht.zeros( + ... ( + ... 4, + ... 5, + ... ), + ... split=0, + ... ) + >>> a.lshape + (0/2) (2, 5) + (1/2) (2, 5) + >>> ht.resplit_(a, None) + >>> a.split + None + >>> a.lshape + (0/2) (4, 5) + (1/2) (4, 5) + >>> a = ht.zeros( + ... ( + ... 4, + ... 5, + ... ), + ... split=0, + ... ) + >>> a.lshape + (0/2) (2, 5) + (1/2) (2, 5) + >>> ht.resplit_(a, 1) + >>> a.split + 1 + >>> a.lshape + (0/2) (4, 3) + (1/2) (4, 2) + + `right_shift_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : In-place version of `right_shift`. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise shifts the bits + of each element in-place that many positions to the right as the element(s) of the second + operand (scalar or :class:`~heat.core.dndarray.DNDarray`) indicate, i.e. the element(s) of `t1` + are overwritten by the results of element-wise bitwise right shift of `t1` for `t2` positions. + Can be called as a DNDarray method or with the symbol `>>=`. Only works for inputs with integer + elements. + + Parameters + ---------- + t1: DNDarray + Input array + t2: DNDarray or float + Integer number of zero bits to remove + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.array([1, 2, 32]) + >>> s = 1 + >>> T1.right_shift_(s) + DNDarray([ 0, 1, 16], dtype=ht.int64, device=cpu:0, split=None) + >>> T1 + DNDarray([0, 1, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> s + 1 + >>> T2 = ht.array([2, -3, 2]) + >>> T1 >>= T2 + >>> T1 + DNDarray([0, 0, 4], dtype=ht.int64, device=cpu:0, split=None) + >>> T2 + DNDarray([ 2, -3, 2], dtype=ht.int64, device=cpu:0, split=None) + + `rot90(self, k=1, axis=(0, 1))` + : Rotate an array by 90 degrees in the plane specified by `axes`. + Rotation direction is from the first towards the second axis. + + Parameters + ---------- + m : DNDarray + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes: (2,) Sequence[int, int] + The array is rotated in the plane defined by the axes. + Axes must be different. + + Raises + ------ + ValueError + If `len(axis)!=2`. + ValueError + If the axes are the same. + ValueError + If axes are out of range. + + Notes + ----- + - ``rot90(m, k=1, axes=(1,0))`` is the reverse of ``rot90(m, k=1, axes=(0,1))``. + + - ``rot90(m, k=1, axes=(1,0))`` is equivalent to ``rot90(m, k=-1, axes=(0,1))``. + + May change the split axis on distributed tensors. + + Examples + -------- + >>> m = ht.array([[1, 2], [3, 4]], dtype=ht.int) + >>> m + DNDarray([[1, 2], + [3, 4]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.rot90(m) + DNDarray([[2, 4], + [1, 3]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.rot90(m, 2) + DNDarray([[4, 3], + [2, 1]], dtype=ht.int32, device=cpu:0, split=None) + >>> m = ht.arange(8).reshape((2, 2, 2)) + >>> ht.rot90(m, 1, (1, 2)) + DNDarray([[[1, 3], + [0, 2]], + + [[5, 7], + [4, 6]]], dtype=ht.int32, device=cpu:0, split=None) + + `round(self, decimals=0, out=None, dtype=None)` + : Calculate the rounded value element-wise. + + Parameters + ---------- + x : DNDarray + The array for which the compute the rounded value. + decimals: int, optional + Number of decimal places to round to. + If decimals is negative, it specifies the number of positions to the left of the decimal point. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + dtype : datatype, optional + Determines the data type of the output array. The values are cast to this type with potential loss of + precision. + + Raises + ------ + TypeError + if dtype is not a heat data type + + Examples + -------- + >>> import heat as ht + >>> ht.round(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -2., -1., -1., -0., 0., 0., 1., 1., 2.], dtype=ht.float32, device=cpu:0, split=None) + + `save(self, path, *args, **kwargs)` + : Attempts to save data from a :class:`~heat.core.dndarray.DNDarray` to disk. An auto-detection based on the file + format extension is performed. + + Parameters + ---------- + data : DNDarray + The array holding the data to be stored + path : str + Path to the file to be stored. + args : list, optional + Additional options passed to the particular functions. + kwargs : dict, optional + Additional options passed to the particular functions. + + Raises + ------ + ValueError + If the file extension is not understood or known. + RuntimeError + If the optional dependency for a file extension is not available. + + Examples + -------- + >>> x = ht.arange(100, split=0) + >>> ht.save(x, "data.h5", "DATA", mode="a") + + `save_hdf5(self, path, dataset, mode='w', **kwargs)` + : Saves ``data`` to an HDF5 file. Attempts to utilize parallel I/O if possible. + + Parameters + ---------- + data : DNDarray + The data to be saved on disk. + path : str + Path to the HDF5 file to be written. + dataset : str + Name of the dataset the data is saved to. + mode : str, optional + File access mode, one of ``'w', 'a', 'r+'`` + kwargs : dict, optional + Additional arguments passed to the created dataset. + + Raises + ------ + TypeError + If any of the input parameters are not of correct type. + ValueError + If the access mode is not understood. + + Examples + -------- + >>> x = ht.arange(100, split=0) + >>> ht.save_hdf5(x, "data.h5", dataset="DATA") + + `save_netcdf(self, path, variable, mode='w', **kwargs)` + : Saves data to a netCDF4 file. Attempts to utilize parallel I/O if possible. + + Parameters + ---------- + data : DNDarray + The data to be saved on disk. + path : str + Path to the netCDF4 file to be written. + variable : str + Name of the variable the data is saved to. + mode : str, optional + File access mode, one of ``'w', 'a', 'r+'``. + dimension_names : list or tuple or string + Specifies the netCDF Dimensions used by the variable. Ignored if Variable already exists. + is_unlimited : bool, optional + If True, every dimension created for this variable (i.e. doesn't already exist) is unlimited. Already + existing limited dimensions cannot be changed to unlimited and vice versa. + file_slices : integer iterable, slice, ellipsis or bool + Keys used to slice the netCDF Variable, as given in the nc.utils._StartCountStride method. + kwargs : dict, optional + additional arguments passed to the created dataset. + + Raises + ------ + TypeError + If any of the input parameters are not of correct type. + ValueError + If the access mode is not understood or if the number of dimension names does not match the number of + dimensions. + + Examples + -------- + >>> x = ht.arange(100, split=0) + >>> ht.save_netcdf(x, "data.nc", dataset="DATA") + + `sin(self, out=None)` + : Compute the trigonometric sine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Negative input elements are returned as ``NaN``. If ``out`` was provided, ``sin`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the trigonometric tangent. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.sin(ht.arange(-6, 7, 2)) + DNDarray([ 0.2794, 0.7568, -0.9093, 0.0000, 0.9093, -0.7568, -0.2794], dtype=ht.float32, device=cpu:0, split=None) + + `sinh(self, out=None)` + : Compute the hyperbolic sine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Negative input elements are returned as ``NaN``. If ``out`` was provided, ``sinh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the hyperbolic sine. + out : DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.sinh(ht.arange(-6, 7, 2)) + DNDarray([-201.7132, -27.2899, -3.6269, 0.0000, 3.6269, 27.2899, 201.7132], dtype=ht.float32, device=cpu:0, split=None) + + `skew(self, axis=None, unbiased=True)` + : Compute the sample skewness of a data set. + + Parameters + ---------- + x : ht.DNDarray + Input array + axis : NoneType or Int + Axis along which skewness is calculated, Default is to compute over the whole array `x` + unbiased : Bool + if True (default) the calculations are corrected for bias + + Warnings + -------- + UserWarning: Dependent on the axis given and the split configuration, a UserWarning may be thrown during this function as data is transferred between processes. + + `sqrt(self, out=None)` + : Return the non-negative square-root of a tensor element-wise. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the square-roots. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.sqrt(ht.arange(5)) + DNDarray([0.0000, 1.0000, 1.4142, 1.7321, 2.0000], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sqrt(ht.arange(-5, 0)) + DNDarray([nan, nan, nan, nan, nan], dtype=ht.float32, device=cpu:0, split=None) + + `square(self, out=None)` + : Return a new tensor with the squares of the elements of input. + + Parameters + ---------- + x : DNDarray + The array for which to compute the squares. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> a = ht.random.rand(4) + >>> a + DNDarray([0.8654, 0.1432, 0.9164, 0.6179], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.square(a) + DNDarray([0.7488, 0.0205, 0.8397, 0.3818], dtype=ht.float32, device=cpu:0, split=None) + + `squeeze(self, axis=None)` + : Remove single-element entries from the shape of a `DNDarray`. + Returns the input array, but with all or a subset (indicated by `axis`) of the dimensions of length 1 removed. + Split semantics: see Notes below. + + Parameters + ---------- + x : DNDarray + Input data. + axis : None or int or Tuple[int,...], optional + Selects a subset of the single-element entries in the shape. + If axis is `None`, all single-element entries will be removed from the shape. + + Raises + ------ + `ValueError`, if an axis is selected with shape entry greater than one. + + Notes + ----- + Split semantics: a distributed DNDarray will keep its original split dimension after "squeezing", + which, depending on the squeeze axis, may result in a lower numerical `split` value (see Examples). + + Examples + -------- + >>> import heat as ht + >>> a = ht.random.randn(1, 3, 1, 5) + >>> a + DNDarray([[[[-0.2604, 1.3512, 0.1175, 0.4197, 1.3590]], + [[-0.2777, -1.1029, 0.0697, -1.3074, -1.1931]], + [[-0.4512, -1.2348, -1.1479, -0.0242, 0.4050]]]], dtype=ht.float32, device=cpu:0, split=None) + >>> a.shape + (1, 3, 1, 5) + >>> ht.squeeze(a).shape + (3, 5) + >>> ht.squeeze(a) + DNDarray([[-0.2604, 1.3512, 0.1175, 0.4197, 1.3590], + [-0.2777, -1.1029, 0.0697, -1.3074, -1.1931], + [-0.4512, -1.2348, -1.1479, -0.0242, 0.4050]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.squeeze(a, axis=0).shape + (3, 1, 5) + >>> ht.squeeze(a, axis=-2).shape + (1, 3, 5) + >>> ht.squeeze(a, axis=1).shape + Traceback (most recent call last): + ... + ValueError: Dimension along axis 1 is not 1 for shape (1, 3, 1, 5) + >>> x.shape + (10, 1, 12, 13) + >>> x.split + 2 + >>> x.squeeze().shape + (10, 12, 13) + >>> x.squeeze().split + 1 + + `std(self, axis=None, ddof=0, **kwargs)` + : Calculates the standard deviation of a ``DNDarray`` with the bessel correction. + If an axis is given, the variance will be taken in that direction. + + Parameters + ---------- + x : DNDarray + array for which the std is calculated for. + The datatype of ``x`` must be a float + axis : None or int or iterable + Axis which the std is taken in. Default ``None`` calculates std of all data items. + ddof : int, optional + Delta Degrees of Freedom: the denominator implicitely used in the calculation is N - ddof, where N + represents the number of elements. If ``ddof=1``, the Bessel correction will be applied. + Setting ``ddof>1`` raises a ``NotImplementedError``. + **kwargs + Extra keyword arguments + + Examples + -------- + >>> a = ht.random.randn(1, 3) + >>> a + DNDarray([[ 0.5714, 0.0048, -0.2942]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.std(a) + DNDarray(0.3590, dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[ 0.8488, 1.2225, 1.2498, -1.4592], + [-0.5820, -0.3928, 0.1509, -0.0174], + [ 0.6426, -1.8149, 0.1369, 0.0042], + [-0.6043, -0.0523, -1.6653, 0.6631]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.std(a, 1, ddof=1) + DNDarray([1.2961, 0.3362, 1.0739, 0.9820], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.std(a, 1) + DNDarray([1.2961, 0.3362, 1.0739, 0.9820], dtype=ht.float32, device=cpu:0, split=None) + + `sub_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place substitution of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise subtracts the + element(s) of the second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) in-place, + i.e. the element(s) of `t1` are overwritten by the results of element-wise subtraction of `t2` + from `t1`. + Can be called as a DNDarray method or with the symbol `-=`. `subtract_` is an alias for `sub_`. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the subtraction + t2: DNDarray or scalar + The second operand involved in the subtraction + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 -= T2 + >>> T1 + DNDarray([[-1., 0.], + [ 1., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.sub_(T2, s) + DNDarray([[0., 0.], + [0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[0., 0.], + [0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> v = ht.int32([-3, 2]) + >>> T2.subtract_(v) + DNDarray([[ 3., -2.], + [ 3., -2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[ 3., -2.], + [ 3., -2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-3, 2], dtype=ht.int32, device=cpu:0, split=None) + + `subtract_(t1: DNDarray, t2: Union[DNDarray, float]) ‑> heat.core.dndarray.DNDarray` + : Element-wise in-place substitution of values of two operands. + Takes the first operand (:class:`~heat.core.dndarray.DNDarray`) and element-wise subtracts the + element(s) of the second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) in-place, + i.e. the element(s) of `t1` are overwritten by the results of element-wise subtraction of `t2` + from `t1`. + Can be called as a DNDarray method or with the symbol `-=`. `subtract_` is an alias for `sub_`. + + Parameters + ---------- + t1: DNDarray + The first operand involved in the subtraction + t2: DNDarray or scalar + The second operand involved in the subtraction + + Raises + ------ + ValueError + If both inputs are DNDarrays that do not have the same split axis and the shapes of their + underlying torch.tensors differ, s.t. we cannot process them directly without resplitting. + TypeError + If the data type of `t2` cannot be cast to the data type of `t1`. Although the + corresponding out-of-place operation may work, for the in-place version the requirements + are stricter, because the data type of `t1` does not change. + + Examples + -------- + >>> import heat as ht + >>> T1 = ht.float32([[1, 2], [3, 4]]) + >>> T2 = ht.float32([[2, 2], [2, 2]]) + >>> T1 -= T2 + >>> T1 + DNDarray([[-1., 0.], + [ 1., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[2., 2.], + [2., 2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s = 2.0 + >>> ht.sub_(T2, s) + DNDarray([[0., 0.], + [0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[0., 0.], + [0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> s + 2.0 + >>> v = ht.int32([-3, 2]) + >>> T2.subtract_(v) + DNDarray([[ 3., -2.], + [ 3., -2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> T2 + DNDarray([[ 3., -2.], + [ 3., -2.]], dtype=ht.float32, device=cpu:0, split=None) + >>> v + DNDarray([-3, 2], dtype=ht.int32, device=cpu:0, split=None) + + `sum(self, axis=None, out=None, keepdims=None)` + : + + `swapaxes(self, axis1, axis2)` + : Interchanges two axes of an array. + + Parameters + ---------- + x : DNDarray + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + See Also + -------- + :func:`~heat.core.linalg.basics.transpose` + Permute the dimensions of an array. + + Examples + -------- + >>> x = ht.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) + >>> ht.swapaxes(x, 0, 1) + DNDarray([[[0, 1], + [4, 5]], + [[2, 3], + [6, 7]]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.swapaxes(x, 0, 2) + DNDarray([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]], dtype=ht.int64, device=cpu:0, split=None) + + `tan(self, out=None)` + : Compute tangent element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Equivalent to :func:`sin`/:func:`cos` element-wise. If ``out`` was provided, ``tan`` is a reference to it. + + + Parameters + ---------- + x : DNDarray + The value for which to compute the trigonometric tangent. + out : DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.tan(ht.arange(-6, 7, 2)) + DNDarray([ 0.2910, -1.1578, 2.1850, 0.0000, -2.1850, 1.1578, -0.2910], dtype=ht.float32, device=cpu:0, split=None) + + `tanh(self, out=None)` + : Compute the hyperbolic tangent, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + If ``out`` was provided, ``tanh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the hyperbolic tangent. + out : DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.tanh(ht.arange(-6, 7, 2)) + DNDarray([-1.0000, -0.9993, -0.9640, 0.0000, 0.9640, 0.9993, 1.0000], dtype=ht.float32, device=cpu:0, split=None) + + `to_sparse_csc(array: DNDarray) ‑> heat.sparse.dcsx_matrix.DCSC_matrix` + : Convert the distributed array to a sparse DCSC_matrix representation. + + Parameters + ---------- + array : DNDarray + The distributed array to be converted to a sparse DCSC_matrix. + + Returns + ------- + DCSC_matrix + A sparse DCSC_matrix representation of the input DNDarray. + + Examples + -------- + >>> dense_array = ht.array([[1, 0, 0], [0, 0, 2], [0, 3, 0]]) + >>> dense_array.to_sparse_csc() + (indptr: tensor([0, 1, 2, 3]), indices: tensor([0, 2, 1]), data: tensor([1, 3, 2]), dtype=ht.int64, device=cpu:0, split=None) + + `to_sparse_csr(array: DNDarray) ‑> heat.sparse.dcsx_matrix.DCSR_matrix` + : Convert the distributed array to a sparse DCSR_matrix representation. + + Parameters + ---------- + array : DNDarray + The distributed array to be converted to a sparse DCSR_matrix. + + Returns + ------- + DCSR_matrix + A sparse DCSR_matrix representation of the input DNDarray. + + Examples + -------- + >>> dense_array = ht.array([[1, 0, 0], [0, 0, 2], [0, 3, 0]]) + >>> dense_array.to_sparse_csr() + (indptr: tensor([0, 1, 2, 3]), indices: tensor([0, 2, 1]), data: tensor([1, 2, 3]), dtype=ht.int64, device=cpu:0, split=None) + + `tolist(self, keepsplit: bool = False) ‑> List` + : Return a copy of the local array data as a (nested) Python list. For scalars, a standard Python number is returned. + + Parameters + ---------- + keepsplit: bool + Whether the list should be returned locally or globally. + + Examples + -------- + >>> a = ht.array([[0, 1], [2, 3]]) + >>> a.tolist() + [[0, 1], [2, 3]] + + >>> a = ht.array([[0, 1], [2, 3]], split=0) + >>> a.tolist() + [[0, 1], [2, 3]] + + >>> a = ht.array([[0, 1], [2, 3]], split=1) + >>> a.tolist(keepsplit=True) + (1/2) [[0], [2]] + (2/2) [[1], [3]] + + `trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None)` + : Return the sum along diagonals of the array + + If `a` is 2D, the sum along its diagonal with the given offset is returned, i.e. the sum of + elements a[i, i+offset] for all i. + + If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used + to determine the 2D-sub-DNDarrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken + offset : int, optional + Offsets of the diagonal from the main diagonal. Can be both positive and negative. Defaults to 0. + axis1: int, optional + Axis to be used as the first axis of the 2D-sub-arrays from which the diagonals + should be taken. Default is the first axis of `a` + axis2 : int, optional + Axis to be used as the second axis of the 2D-sub-arrays from which the diagonals + should be taken. Default is the second two axis of `a` + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator where the elements are + summed. If `dtype` has value None than the dtype is the same as that of `a` + out: ht.DNDarray, optional + Array into which the output is placed. Its type is preserved and it must be of the right shape + to hold the output + Only applicable if `a` has more than 2 dimensions, thus the result is not a scalar. + If distributed, its split axis might change eventually. + + Returns + ------- + sum_along_diagonals : number (of defined dtype) or ht.DNDarray + If `a` is 2D, the sum along the diagonal is returned as a scalar + If `a` has more than 2 dimensions, then a DNDarray of sums along diagonals is returned + + Examples + -------- + 2D-case + >>> x = ht.arange(24).reshape((4, 6)) + >>> x + DNDarray([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 21, 22, 23]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x) + 42 + >>> ht.trace(x, 1) + 46 + >>> ht.trace(x, -2) + 31 + + > 2D-case + >>> x = x.reshape((2, 3, 4)) + >>> x + DNDarray([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x) + DNDarray([16, 18, 20, 22], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x, 1) + DNDarray([24, 26, 28, 30], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x, axis1=0, axis2=2) + DNDarray([13, 21, 29], dtype=ht.int32, device=cpu:0, split=None) + + `transpose(self, axes=None)` + : Permute the dimensions of an array. + + Parameters + ---------- + a : DNDarray + Input array. + axes : None or List[int,...], optional + By default, reverse the dimensions, otherwise permute the axes according to the values given. + + `tril(self, k=0)` + : Returns the lower triangular part of the ``DNDarray``. + The lower triangular part of the array is defined as the elements on and below the diagonal, the other elements of + the result array are set to 0. + The argument ``k`` controls which diagonal to consider. If ``k=0``, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative + value excludes just as many diagonals below the main diagonal. + + Parameters + ---------- + m : DNDarray + Input array for which to compute the lower triangle. + k : int, optional + Diagonal above which to zero elements. ``k=0`` (default) is the main diagonal, ``k<0`` is below and ``k>0`` is above. + + `triu(self, k=0)` + : Returns the upper triangular part of the ``DNDarray``. + The upper triangular part of the array is defined as the elements on and below the diagonal, the other elements of the result array are set to 0. + The argument ``k`` controls which diagonal to consider. If ``k=0``, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative + value excludes just as many diagonals below the main diagonal. + + Parameters + ---------- + m : DNDarray + Input array for which to compute the upper triangle. + k : int, optional + Diagonal above which to zero elements. ``k=0`` (default) is the main diagonal, ``k<0`` is below and ``k>0`` is above. + + `trunc(self, out=None)` + : Return the trunc of the input, element-wise. + The truncated value of the scalar ``x`` is the nearest integer ``i`` which is closer to zero than ``x`` is. In short, the + fractional part of the signed number ``x`` is discarded. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trunced values. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> import heat as ht + >>> ht.trunc(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -1., -1., -0., -0., 0., 0., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) + + `unique(self, sorted=False, return_inverse=False, axis=None)` + : Finds and returns the unique elements of a `DNDarray`. + If return_inverse is `True`, the second tensor will hold the list of inverse indices + If distributed, it is most efficient if `axis!=a.split`. + + Parameters + ---------- + a : DNDarray + Input array. + sorted : bool, optional + Whether the found elements should be sorted before returning as output. + Warning: sorted is not working if `axis!=None and axis!=a.split` + return_inverse : bool, optional + Whether to also return the indices for where elements in the original input ended up in the returned + unique list. + axis : int, optional + Axis along which unique elements should be found. Default to `None`, which will return a one dimensional list of + unique values. + + Examples + -------- + >>> x = ht.array([[3, 2], [1, 3]]) + >>> ht.unique(x, sorted=True) + array([1, 2, 3]) + >>> ht.unique(x, sorted=True, axis=0) + array([[1, 3], + [2, 3]]) + >>> ht.unique(x, sorted=True, axis=1) + array([[2, 3], + [3, 1]]) + + `var(self, axis=None, ddof=0, **kwargs)` + : Calculates and returns the variance of a ``DNDarray``. If an axis is given, the variance will be + taken in that direction. + + Parameters + ---------- + x : DNDarray + Array for which the variance is calculated for. + The datatype of ``x`` must be a float + axis : None or int or iterable + Axis which the std is taken in. Default ``None`` calculates std of all data items. + ddof : int, optional + Delta Degrees of Freedom: the denominator implicitely used in the calculation is N - ddof, where N + represents the number of elements. If ``ddof=1``, the Bessel correction will be applied. + Setting ``ddof>1`` raises a ``NotImplementedError``. + **kwargs + Extra keyword arguments + + + Notes + ----- + Split semantics when axis is an integer: + + - if ``axis=x.split``, then ``var(x).split=None`` + + - if ``axis>split``, then ``var(x).split = x.split`` + + - if ``axis>> a = ht.random.randn(1, 3) + >>> a + DNDarray([[-2.3589, -0.2073, 0.8806]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a) + DNDarray(1.8119, dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, ddof=1) + DNDarray(2.7179, dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[-0.8523, -1.4982, -0.5848, -0.2554], + [ 0.8458, -0.3125, -0.2430, 1.9016], + [-0.6778, -0.3584, -1.5112, 0.6545], + [-0.9161, 0.0168, 0.0462, 0.5964]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 1) + DNDarray([0.2777, 1.0957, 0.8015, 0.3936], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 0) + DNDarray([0.7001, 0.4376, 0.4576, 0.7890], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 0, ddof=1) + DNDarray([0.7001, 0.4376, 0.4576, 0.7890], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 0, ddof=0) + DNDarray([0.7001, 0.4376, 0.4576, 0.7890], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/exponential.md b/doc/api/heat/core/exponential.md new file mode 100644 index 0000000000..b15aa55f16 --- /dev/null +++ b/doc/api/heat/core/exponential.md @@ -0,0 +1,217 @@ +Module heat.core.exponential +============================ +Exponential and logarithmic operations module. + +Functions +--------- + +`exp(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the exponential of all elements in the input array. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + + Parameters + ---------- + x : DNDarray + The array for which to compute the exponential. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.exp(ht.arange(5)) + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float32, device=cpu:0, split=None) + +`exp2(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the exponential of two of all elements in the input array (:math:`2^x`). + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + + Parameters + ---------- + x : DNDarray + The array for which to compute the exponential of two. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.exp2(ht.arange(5)) + DNDarray([ 1., 2., 4., 8., 16.], dtype=ht.float32, device=cpu:0, split=None) + +`expm1(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate :math:`exp(x) - 1` for all elements in the array. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + + Parameters + ---------- + x : DNDarray + The array for which to compute the exponential. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.expm1(ht.arange(5)) + 1.0 + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float64, device=cpu:0, split=None) + +`log(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Natural logarithm, element-wise. + The natural logarithm is the inverse of the exponential function, so that :math:`log(exp(x)) = x`. The natural + logarithm is logarithm in base e. Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log(ht.arange(5)) + DNDarray([ -inf, 0.0000, 0.6931, 1.0986, 1.3863], dtype=ht.float32, device=cpu:0, split=None) + +`log10(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Compute the logarithm to the base 10 (:math:`log_{10}(x)`), element-wise. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log10(ht.arange(5)) + DNDarray([ -inf, 0.0000, 0.3010, 0.4771, 0.6021], dtype=ht.float32, device=cpu:0, split=None) + +`log1p(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the natural logarithm of one plus the input array, element-wise. + Result is a :class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log1p(ht.arange(5)) + DNDarray([0.0000, 0.6931, 1.0986, 1.3863, 1.6094], dtype=ht.float32, device=cpu:0, split=None) + +`log2(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Compute the logarithm to the base 2 (:math:`log_2(x)`), element-wise. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the logarithm. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.log2(ht.arange(5)) + DNDarray([ -inf, 0.0000, 1.0000, 1.5850, 2.0000], dtype=ht.float32, device=cpu:0, split=None) + +`logaddexp(x1: heat.core.dndarray.DNDarray, x2: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculates the logarithm of the sum of exponentiations :math:`log(exp(x1) + exp(x2))` for each element :math:`{x1}_i` of + the input array x1 with the respective element :math:`{x2}_i` of the input array x2. + + Parameters + ---------- + x1 : DNDarray + first input array. Should have a floating-point data type. + x2 : DNDarray + second input array. Must be compatible with x1. Should have a floating-point data type. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + See Also + -------- + :func:`logaddexp2` + Logarithm of the sum of exponentiations of inputs in base-2. + + Examples + -------- + >>> ht.logaddexp(ht.array([-1.0]), ht.array([-1.0, -2, -3])) + DNDarray([-0.3069, -0.6867, -0.8731], dtype=ht.float32, device=cpu:0, split=None) + +`logaddexp2(x1: heat.core.dndarray.DNDarray, x2: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculates the logarithm of the sum of exponentiations in base-2 :math:`log2(exp(x1) + exp(x2))` for each element :math:`{x1}_i` of + the input array x1 with the respective element :math:`{x2}_i` of the input array x2. + + Parameters + ---------- + x1 : DNDarray + first input array. Should have a floating-point data type. + x2 : DNDarray + second input array. Must be compatible with x1. Should have a floating-point data type. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + See Also + -------- + :func:`logaddexp` + Logarithm of the sum of exponentiations of inputs. + + Examples + -------- + >>> ht.logaddexp2(ht.array([-1.0]), ht.array([-1.0, -2, -3])) + DNDarray([ 0.0000, -0.4150, -0.6781], dtype=ht.float32, device=cpu:0, split=None) + +`sqrt(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the non-negative square-root of a tensor element-wise. + Result is a :py:class:`~heat.core.dndarray.DNDarray` of the same shape as ``x``. + Negative input elements are returned as :abbr:`NaN (Not a Number)`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the square-roots. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> ht.sqrt(ht.arange(5)) + DNDarray([0.0000, 1.0000, 1.4142, 1.7321, 2.0000], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sqrt(ht.arange(-5, 0)) + DNDarray([nan, nan, nan, nan, nan], dtype=ht.float32, device=cpu:0, split=None) + +`square(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return a new tensor with the squares of the elements of input. + + Parameters + ---------- + x : DNDarray + The array for which to compute the squares. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to :keyword:`None`, a fresh array is allocated. + + Examples + -------- + >>> a = ht.random.rand(4) + >>> a + DNDarray([0.8654, 0.1432, 0.9164, 0.6179], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.square(a) + DNDarray([0.7488, 0.0205, 0.8397, 0.3818], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/factories.md b/doc/api/heat/core/factories.md new file mode 100644 index 0000000000..2e1d8b9436 --- /dev/null +++ b/doc/api/heat/core/factories.md @@ -0,0 +1,790 @@ +Module heat.core.factories +========================== +Provides high-level DNDarray initialization functions + +Functions +--------- + +`arange(*args: int | float, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None) ‑> heat.core.dndarray.DNDarray` +: Return evenly spaced values within a given interval. + + Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` + but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range + `_ function, but returns a array rather than a list. + When using a non-integer step, such as 0.1, the results may be inconsistent due to being subject to numerical + rounding. In the cases the usage of :func:`linspace` is recommended. + For floating point arguments, the length of the result is :math:`\lceil(stop-start)/step\rceil`. + Again, due to floating point rounding, this rule may result in the last element of `out` being greater than `stop` + by machine epsilon. + + Parameters + ---------- + *args : int or float, optional + Positional arguments defining the interval. Can be: + - A single argument: interpreted as `stop`, with `start=0` and `step=1`. + - Two arguments: interpreted as `start` and `stop`, with `step=1`. + - Three arguments: interpreted as `start`, `stop`, and `step`. + The function raises a `TypeError` if more than three arguments are provided. + dtype : datatype, optional + The type of the output array. If `dtype` is not given, it is automatically inferred from the other input + arguments. + split: int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str, optional + Specifies the device the array shall be allocated on, defaults to globally set default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + :func:`linspace` : Evenly spaced numbers with careful handling of endpoints. + + Examples + -------- + >>> ht.arange(3) + DNDarray([0, 1, 2], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.arange(3.0) + DNDarray([0., 1., 2.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.arange(3, 7) + DNDarray([3, 4, 5, 6], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.arange(3, 7, 2) + DNDarray([3, 5], dtype=ht.int32, device=cpu:0, split=None) + +`array(obj: Iterable, dtype: Type[heat.core.types.datatype] | None = None, copy: bool | None = None, ndmin: int = 0, order: str = 'C', split: int | None = None, is_split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None) ‑> heat.core.dndarray.DNDarray` +: Create a :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + obj : array_like + A tensor or array, any object exposing the array interface, an object whose ``__array__`` method returns an + array, or any (nested) sequence. + dtype : datatype, optional + The desired data-type for the array. If not given, then the type will be determined as the minimum type required + to hold the objects in the sequence. This argument can only be used to ‘upcast’ the array. For downcasting, use + the :func:`~heat.core.dndarray.astype` method. + copy : bool, optional + If ``True``, the input object is copied. + If ``False``, input which supports the buffer protocol is never copied. + If ``None`` (default), the function reuses the existing memory buffer if possible, and copies otherwise. + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting array should have. Ones will, if needed, be + attached to the shape if ``ndim > 0`` and prefaced in case of ``ndim < 0`` to meet the requirement. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + split : int or None, optional + The axis along which the passed array content ``obj`` is split and distributed in memory. Mutually exclusive + with ``is_split``. + is_split : int or None, optional + Specifies the axis along which the local data portions, passed in obj, are split across all machines. Useful for + interfacing with other distributed-memory code. The shape of the global array is automatically inferred. + Mutually exclusive with ``split``. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on (i.e. globally set default + device). + comm : Communication, optional + Handle to the nodes holding distributed array chunks. + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + ValueError + If ``copy`` is False but a copy is necessary to satisfy other requirements (e.g. different dtype, device, etc.). + TypeError + If the input object cannot be converted to a torch.Tensor, hence it cannot be converted to a :class:`~heat.core.dndarray.DNDarray`. + + Examples + -------- + >>> ht.array([1, 2, 3]) + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.array([1, 2, 3.0]) + DNDarray([1., 2., 3.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.array([[1, 2], [3, 4]]) + DNDarray([[1, 2], + [3, 4]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.array([1, 2, 3], ndmin=2) + DNDarray([[1], + [2], + [3]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.array([1, 2, 3], dtype=float) + DNDarray([1., 2., 3.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.array([1, 2, 3, 4], split=0) + DNDarray([1, 2, 3, 4], dtype=ht.int64, device=cpu:0, split=0) + >>> if ht.MPI_WORLD.rank == 0 + >>> a = ht.array([1, 2], is_split=0) + >>> else: + >>> a = ht.array([3, 4], is_split=0) + >>> a + DNDarray([1, 2, 3, 4], dtype=ht.int64, device=cpu:0, split=0) + >>> a = np.arange(2 * 3).reshape(2, 3) + >>> a + array([[ 0, 1, 2], + [ 3, 4, 5]]) + >>> a.strides + (24, 8) + >>> b = ht.array(a) + >>> b + DNDarray([[0, 1, 2], + [3, 4, 5]], dtype=ht.int64, device=cpu:0, split=None) + >>> b.strides + (24, 8) + >>> b.larray.untyped_storage() + 0 + 1 + 2 + 3 + 4 + 5 + [torch.LongStorage of size 6] + >>> c = ht.array(a, order="F") + >>> c + DNDarray([[0, 1, 2], + [3, 4, 5]], dtype=ht.int64, device=cpu:0, split=None) + >>> c.strides + (8, 16) + >>> c.larray.untyped_storage() + 0 + 3 + 1 + 4 + 2 + 5 + [torch.LongStorage of size 6] + >>> a = np.arange(4 * 3).reshape(4, 3) + >>> a.strides + (24, 8) + >>> b = ht.array(a, order="F", split=0) + >>> b + DNDarray([[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8], + [ 9, 10, 11]], dtype=ht.int64, device=cpu:0, split=0) + >>> b.strides + [0/2] (8, 16) + [1/2] (8, 16) + >>> b.larray.untyped_storage() + [0/2] 0 + 3 + 1 + 4 + 2 + 5 + [torch.LongStorage of size 6] + [1/2] 6 + 9 + 7 + 10 + 8 + 11 + [torch.LongStorage of size 6] + +`asarray(obj: Iterable, dtype: Type[heat.core.types.datatype] | None = None, copy: bool | None = None, order: str = 'C', is_split: bool | None = None, device: str | heat.core.devices.Device | None = None) ‑> heat.core.dndarray.DNDarray` +: Convert ``obj`` to a DNDarray. If ``obj`` is a `DNDarray` or `Tensor` with the same `dtype` and `device` or if the + data is an `ndarray` of the corresponding ``dtype`` and the ``device`` is the CPU, no copy will be performed. + + Parameters + ---------- + obj : iterable + Input data, in any form that can be converted to an array. This includes e.g. lists, lists of tuples, tuples, + tuples of tuples, tuples of lists and ndarrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + copy : bool, optional + If ``True``, then the object is copied. If ``False``, the object is not copied and a ``ValueError`` is + raised in the case a copy would be necessary. If ``None``, a copy will only be made if `obj` is a nested + sequence or if a copy is needed to satisfy any of the other requirements, e.g. ``dtype``. + order: str, optional + Whether to use row-major (C-style) or column-major (Fortran-style) memory representation. Defaults to ‘C’. + is_split : None or int, optional + Specifies the axis along which the local data portions, passed in obj, are split across all MPI processes. Useful for + interfacing with other HPC code. The shape of the global tensor is automatically inferred. + device : str, ht.Device or None, optional + Specifies the device the tensor shall be allocated on. By default, it is inferred from the input data. + + Examples + -------- + >>> a = [1, 2] + >>> ht.asarray(a) + DNDarray([1, 2], dtype=ht.int64, device=cpu:0, split=None) + >>> a = np.array([1, 2, 3]) + >>> n = ht.asarray(a) + >>> n + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> n[0] = 0 + >>> a + DNDarray([0, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> a = torch.tensor([1, 2, 3]) + >>> t = ht.asarray(a) + >>> t + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> t[0] = 0 + >>> a + DNDarray([0, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> a = ht.array([1, 2, 3, 4], dtype=ht.float32) + >>> ht.asarray(a, dtype=ht.float32) is a + True + >>> ht.asarray(a, dtype=ht.float64) is a + False + +`empty(shape: int | Sequence[int], dtype: Type[heat.core.types.datatype] = heat.core.types.float32, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new uninitialized :class:`~heat.core.dndarray.DNDarray` of given shape and data type. May be allocated + split up across multiple nodes along the specified axis. + + Parameters + ---------- + shape : int or Sequence[int,...] + Desired shape of the output array, e.g. 1 or (1, 2, 3,). + dtype : datatype + The desired HeAT data type for the array. + split: int, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device`. the array shall be allocated on, defaults to globally set + default device. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> ht.empty(3) + DNDarray([0., 0., 0.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.empty(3, dtype=ht.int) + DNDarray([59140784, 0, 59136816], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.empty( + ... ( + ... 2, + ... 3, + ... ) + ... ) + DNDarray([[-1.7206e-10, 4.5905e-41, -1.7206e-10], + [ 4.5905e-41, 4.4842e-44, 0.0000e+00]], dtype=ht.float32, device=cpu:0, split=None) + +`empty_like(a: heat.core.dndarray.DNDarray, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new uninitialized :class:`~heat.core.dndarray.DNDarray` with the same type, shape and data distribution + of given object. Data type, data distribution axis, and device can be explicitly overridden. + + Parameters + ---------- + a : DNDarray + The shape, data-type, split axis and device of ``a`` define these same attributes of the returned array. Uninitialized array with + the same shape, type, split axis and device as ``a`` unless overriden. + dtype : datatype, optional + Overrides the data type of the result. + split: int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> x = ht.ones( + ... ( + ... 2, + ... 3, + ... ) + ... ) + >>> x + DNDarray([[1., 1., 1.], + [1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.empty_like(x) + DNDarray([[-1.7205e-10, 4.5905e-41, 7.9442e-37], + [ 0.0000e+00, 4.4842e-44, 0.0000e+00]], dtype=ht.float32, device=cpu:0, split=None) + +`eye(shape: int | Sequence[int], dtype: Type[heat.core.types.datatype] = heat.core.types.float32, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new 2-D :class:`~heat.core.dndarray.DNDarray` with ones on the diagonal and zeroes elsewhere, i.e. an + identity matrix. + + Parameters + ---------- + shape : int or Sequence[int,...] + The shape of the data-type. If only one number is provided, returning array will be square with that size. In + other cases, the first value represents the number rows, the second the number of columns. + dtype : datatype, optional + Overrides the data type of the result. + split : int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> ht.eye(2) + DNDarray([[1., 0.], + [0., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.eye((2, 3), dtype=ht.int32) + DNDarray([[1, 0, 0], + [0, 1, 0]], dtype=ht.int32, device=cpu:0, split=None) + +`from_partition_dict(parted: dict, comm: heat.core.communication.Communication | None = None) ‑> heat.core.dndarray.DNDarray` +: Return a newly created DNDarray constructed from the '__partitioned__' attributed of the input object. + Memory of local partitions will be shared (zero-copy) as long as supported by data objects. + Currently supports numpy ndarrays and torch tensors as data objects. + Current limitations: + * Partitions must be ordered in the partition-grid by rank + * Only one split-axis + * Only one partition per rank + * Only SPMD-style __partitioned__ + + Parameters + ---------- + parted : dict + A partition dictionary used to create the new DNDarray + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + :func:`ht.core.DNDarray.create_partition_interface `. + + Raises + ------ + AttributeError + If not hasattr(x, "__partitioned__") or if underlying data has no dtype. + TypeError + If it finds an unsupported array types + RuntimeError + If other unsupported content is found. + + Examples + -------- + >>> import heat as ht + >>> a = ht.ones((44, 55), split=0) + >>> b = ht.from_partition_dict(a.__partitioned__) + >>> assert (a == b).all() + >>> a[40] = 4711 + >>> assert (a == b).all() + +`from_partitioned(x, comm: heat.core.communication.Communication | None = None) ‑> heat.core.dndarray.DNDarray` +: Return a newly created DNDarray constructed from the '__partitioned__' attributed of the input object. + Memory of local partitions will be shared (zero-copy) as long as supported by data objects. + Currently supports numpy ndarrays and torch tensors as data objects. + Current limitations: + * Partitions must be ordered in the partition-grid by rank + * Only one split-axis + * Only one partition per rank + * Only SPMD-style __partitioned__ + + Parameters + ---------- + x : object + Requires x.__partitioned__ + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + :func:`ht.core.DNDarray.create_partition_interface `. + + Raises + ------ + AttributeError + If not hasattr(x, "__partitioned__") or if underlying data has no dtype. + TypeError + If it finds an unsupported array types + RuntimeError + If other unsupported content is found. + + Examples + -------- + >>> import heat as ht + >>> a = ht.ones((44, 55), split=0) + >>> b = ht.from_partitioned(a) + >>> assert (a == b).all() + >>> a[40] = 4711 + >>> assert (a == b).all() + +`full(shape: int | Sequence[int], fill_value: int | float, dtype: Type[heat.core.types.datatype] = heat.core.types.float32, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Return a new :class:`~heat.core.dndarray.DNDarray` of given shape and type, filled with ``fill_value``. + + Parameters + ---------- + shape : int or Sequence[int,...] + Shape of the new array, e.g., (2, 3) or 2. + fill_value : scalar + Fill value. + dtype : datatype, optional + The desired data-type for the array + split: int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> ht.full((2, 2), ht.inf) + DNDarray([[inf, inf], + [inf, inf]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.full((2, 2), 10) + DNDarray([[10., 10.], + [10., 10.]], dtype=ht.float32, device=cpu:0, split=None) + +`full_like(a: heat.core.dndarray.DNDarray, fill_value: int | float, dtype: Type[heat.core.types.datatype] = heat.core.types.float32, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Return a full :class:`~heat.core.dndarray.DNDarray` with the same shape and type as a given array. Data type, data distribution axis, and device can be explicitly overridden. + + Parameters + ---------- + a : DNDarray + The shape, data-type, split axis and device of ``a`` define these same attributes of the returned array. + fill_value : scalar + Fill value. + dtype : datatype, optional + The data type of the result, defaults to `a.dtype`. + split: int or None, optional + The axis along which the array is split and distributed; defaults to `a.split`. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to `a.device`. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> x = ht.zeros( + ... ( + ... 2, + ... 3, + ... ) + ... ) + >>> x + DNDarray([[0., 0., 0.], + [0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.full_like(x, 1.0) + DNDarray([[1., 1., 1.], + [1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + +`linspace(start: int | float, stop: int | float, num: int = 50, endpoint: bool = True, retstep: bool = False, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None) ‑> Tuple[heat.core.dndarray.DNDarray, float]` +: Returns num evenly spaced samples, calculated over the interval ``[start, stop]``. The endpoint of the interval can + optionally be excluded. There are num equally spaced samples in the closed interval ``[start, stop]`` or the + half-open interval ``[start, stop)`` (depending on whether endpoint is ``True`` or ``False``). + + Parameters + ---------- + start: scalar or scalar-convertible + The starting value of the sample interval, maybe a sequence if convertible to scalar + stop: scalar or scalar-convertible + The end value of the sample interval, unless is set to False. In that case, the sequence consists of all but the + last of ``num+1`` evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint + is ``False``. + num: int, optional + Number of samples to generate, defaults to 50. Must be non-negative. + endpoint: bool, optional + If ``True``, stop is the last sample, otherwise, it is not included. + retstep: bool, optional + If ``True``, return (samples, step), where step is the spacing between samples. + dtype: dtype, optional + The type of the output array. + split: int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + Examples + -------- + >>> ht.linspace(2.0, 3.0, num=5) + DNDarray([2.0000, 2.2500, 2.5000, 2.7500, 3.0000], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.linspace(2.0, 3.0, num=5, endpoint=False) + DNDarray([2.0000, 2.2000, 2.4000, 2.6000, 2.8000], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.linspace(2.0, 3.0, num=5, retstep=True) + (DNDarray([2.0000, 2.2500, 2.5000, 2.7500, 3.0000], dtype=ht.float32, device=cpu:0, split=None), 0.25) + +`logspace(start: int | float, stop: int | float, num: int = 50, endpoint: bool = True, base: float = 10.0, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None) ‑> heat.core.dndarray.DNDarray` +: Return numbers spaced evenly on a log scale. In linear space, the sequence starts at ``base**start`` (``base`` to + the power of ``start``) and ends with ``base**stop`` (see ``endpoint`` below). + + Parameters + ---------- + start : scalar or scalar-convertible + ``base**start`` is the starting value of the sequence. + stop : scalar or scalar-convertible + ``base**stop`` is the final value of the sequence, unless `endpoint` is ``False``. In that case, ``num+1`` + values are spaced over the interval in log-space, of which all but the last (a sequence of length ``num``) are + returned. + num : int, optional + Number of samples to generate. + endpoint : bool, optional + If ``True``, `stop` is the last sample. Otherwise, it is not included. + base : float, optional + The base of the log space. The step size between the elements in :math:`ln(samples) / ln(base)` (or + :math:`base(samples)`) is uniform. + dtype : datatype, optional + The type of the output array. If ``dtype`` is not given, infer the data type from the other input arguments. + split: int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + :func:`arange` : Similar to :func:`linspace`, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the endpoint may or may not be included. + + :func:`linspace` : Similar to ``logspace``, but with the samples uniformly distributed in linear space, instead of + log space. + + Examples + -------- + >>> ht.logspace(2.0, 3.0, num=4) + DNDarray([ 100.0000, 215.4434, 464.1590, 1000.0000], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.logspace(2.0, 3.0, num=4, endpoint=False) + DNDarray([100.0000, 177.8279, 316.2278, 562.3413], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.logspace(2.0, 3.0, num=4, base=2.0) + DNDarray([4.0000, 5.0397, 6.3496, 8.0000], dtype=ht.float32, device=cpu:0, split=None) + +`meshgrid(*arrays: Sequence[heat.core.dndarray.DNDarray], indexing: str = 'xy') ‑> List[heat.core.dndarray.DNDarray]` +: Returns coordinate matrices from coordinate vectors. + + Parameters + ---------- + arrays : Sequence[ DNDarray ] + one-dimensional arrays representing grid coordinates. If exactly one vector is distributed, the returned matrices will + be distributed along the axis equal to the index of this vector in the input list. + indexing : str, optional + Cartesian ‘xy’ or matrix ‘ij’ indexing of output. It is ignored if zero or one one-dimensional arrays are provided. Default: 'xy' . + + Raises + ------ + ValueError + If `indexing` is not 'xy' or 'ij'. + ValueError + If more than one input vector is distributed. + + Examples + -------- + >>> x = ht.arange(4) + >>> y = ht.arange(3) + >>> xx, yy = ht.meshgrid(x, y) + >>> xx + DNDarray([[0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3]], dtype=ht.int32, device=cpu:0, split=None) + >>> yy + DNDarray([[0, 0, 0, 0], + [1, 1, 1, 1], + [2, 2, 2, 2]], dtype=ht.int32, device=cpu:0, split=None) + +`ones(shape: int | Sequence[int], dtype: Type[heat.core.types.datatype] = heat.core.types.float32, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new :class:`~heat.core.dndarray.DNDarray` of given shape and data type filled with one. May be allocated + split up across multiple nodes along the specified axis. + + Parameters + ---------- + shape : int or Sequence[int,...] + Desired shape of the output array, e.g. 1 or (1, 2, 3,). + dtype : datatype, optional + The desired HeAT data type for the array. + split : int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> ht.ones(3) + DNDarray([1., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.ones(3, dtype=ht.int) + DNDarray([1, 1, 1], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.ones( + ... ( + ... 2, + ... 3, + ... ) + ... ) + DNDarray([[1., 1., 1.], + [1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + +`ones_like(a: heat.core.dndarray.DNDarray, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new :class:`~heat.core.dndarray.DNDarray` filled with ones with the same type, + shape, data distribution and device of the input object. Data type, data distribution axis, and device can be explicitly overridden. + + Parameters + ---------- + a : DNDarray + The shape, data-type, split axis and device of ``a`` define these same attributes of the returned array. + dtype : datatype, optional + Overrides the data type of the result. + split: int or None, optional + The axis along which the array is split and distributed; defaults to `a.split`. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to `a.device`. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> x = ht.zeros( + ... ( + ... 2, + ... 3, + ... ) + ... ) + >>> x + DNDarray([[0., 0., 0.], + [0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.ones_like(x) + DNDarray([[1., 1., 1.], + [1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + +`zeros(shape: int | Sequence[int], dtype: Type[heat.core.types.datatype] = heat.core.types.float32, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new :class:`~heat.core.dndarray.DNDarray` of given shape and data type filled with zero values. + May be allocated split up across multiple nodes along the specified axis. + + Parameters + ---------- + shape : int or Sequence[int,...] + Desired shape of the output array, e.g. 1 or (1, 2, 3,). + dtype : datatype + The desired HeAT data type for the array. + split: int or None, optional + The axis along which the array is split and distributed; ``None`` means no distribution. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> ht.zeros(3) + DNDarray([0., 0., 0.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.zeros(3, dtype=ht.int) + DNDarray([0, 0, 0], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.zeros( + ... ( + ... 2, + ... 3, + ... ) + ... ) + DNDarray([[0., 0., 0.], + [0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + +`zeros_like(a: heat.core.dndarray.DNDarray, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, order: str = 'C') ‑> heat.core.dndarray.DNDarray` +: Returns a new :class:`~heat.core.dndarray.DNDarray` filled with zeros with the same type, shape, data + distribution, and device of the input object. Data type, data distribution axis, and device can be explicitly overridden. + + Parameters + ---------- + a : DNDarray + The shape, data-type, split axis, and device of ``a`` define these same attributes of the returned array. + dtype : datatype, optional + Overrides the data type of the result. + split: int or None, optional + The axis along which the array is split and distributed; defaults to `a.split`. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to `a.device`. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created array. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + + Raises + ------ + NotImplementedError + If order is one of the NumPy options ``'K'`` or ``'A'``. + + Examples + -------- + >>> x = ht.ones( + ... ( + ... 2, + ... 3, + ... ) + ... ) + >>> x + DNDarray([[1., 1., 1.], + [1., 1., 1.]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.zeros_like(x) + DNDarray([[0., 0., 0.], + [0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/index.md b/doc/api/heat/core/index.md new file mode 100644 index 0000000000..e46781e51a --- /dev/null +++ b/doc/api/heat/core/index.md @@ -0,0 +1,35 @@ +Module heat.core +================ +Add the core heat function to the ht.core namespace + +Sub-modules +----------- +* heat.core.arithmetics +* heat.core.base +* heat.core.communication +* heat.core.complex_math +* heat.core.constants +* heat.core.devices +* heat.core.dndarray +* heat.core.exponential +* heat.core.factories +* heat.core.indexing +* heat.core.io +* heat.core.linalg +* heat.core.logical +* heat.core.manipulations +* heat.core.memory +* heat.core.printing +* heat.core.random +* heat.core.relational +* heat.core.rounding +* heat.core.sanitation +* heat.core.signal +* heat.core.statistics +* heat.core.stride_tricks +* heat.core.tests +* heat.core.tiling +* heat.core.trigonometrics +* heat.core.types +* heat.core.version +* heat.core.vmap diff --git a/doc/api/heat/core/indexing.md b/doc/api/heat/core/indexing.md new file mode 100644 index 0000000000..909809dc50 --- /dev/null +++ b/doc/api/heat/core/indexing.md @@ -0,0 +1,80 @@ +Module heat.core.indexing +========================= +Functions relating to indices of items within DNDarrays, i.e. `where()` + +Functions +--------- + +`nonzero(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Return a :class:`~heat.core.dndarray.DNDarray` containing the indices of the elements that are non-zero.. (using ``torch.nonzero``) + If ``x`` is split then the result is split in the 0th dimension. However, this :class:`~heat.core.dndarray.DNDarray` + can be UNBALANCED as it contains the indices of the non-zero elements on each node. + Returns an array with one entry for each dimension of ``x``, containing the indices of the non-zero elements in that dimension. + The values in ``x`` are always tested and returned in row-major, C-style order. + The corresponding non-zero values can be obtained with: ``x[nonzero(x)]``. + + Parameters + ---------- + x: DNDarray + Input array + + Examples + -------- + >>> import heat as ht + >>> x = ht.array([[3, 0, 0], [0, 4, 1], [0, 6, 0]], split=0) + >>> ht.nonzero(x) + DNDarray([[0, 0], + [1, 1], + [1, 2], + [2, 1]], dtype=ht.int64, device=cpu:0, split=0) + >>> y = ht.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], split=0) + >>> y > 3 + DNDarray([[False, False, False], + [ True, True, True], + [ True, True, True]], dtype=ht.bool, device=cpu:0, split=0) + >>> ht.nonzero(y > 3) + DNDarray([[1, 0], + [1, 1], + [1, 2], + [2, 0], + [2, 1], + [2, 2]], dtype=ht.int64, device=cpu:0, split=0) + >>> y[ht.nonzero(y > 3)] + DNDarray([4, 5, 6, 7, 8, 9], dtype=ht.int64, device=cpu:0, split=0) + +`where(cond: heat.core.dndarray.DNDarray, x: int | float | heat.core.dndarray.DNDarray | None = None, y: int | float | heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return a :class:`~heat.core.dndarray.DNDarray` containing elements chosen from ``x`` or ``y`` depending on condition. + Result is a :class:`~heat.core.dndarray.DNDarray` with elements from ``x`` where cond is ``True``, + and elements from ``y`` elsewhere (``False``). + + Parameters + ---------- + cond : DNDarray + Condition of interest, where true yield ``x`` otherwise yield ``y`` + x : DNDarray or int or float, optional + Values from which to choose. ``x``, ``y`` and condition need to be broadcastable to some shape. + y : DNDarray or int or float, optional + Values from which to choose. ``x``, ``y`` and condition need to be broadcastable to some shape. + + Raises + ------ + NotImplementedError + if splits of the two input :class:`~heat.core.dndarray.DNDarray` differ + TypeError + if only x or y is given or both are not DNDarrays or numerical scalars + + Notes + ----- + When only condition is provided, this function is a shorthand for :func:`nonzero`. + + Examples + -------- + >>> import heat as ht + >>> x = ht.arange(10, split=0) + >>> ht.where(x < 5, x, 10 * x) + DNDarray([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90], dtype=ht.int64, device=cpu:0, split=0) + >>> y = ht.array([[0, 1, 2], [0, 2, 4], [0, 3, 6]]) + >>> ht.where(y < 4, y, -1) + DNDarray([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]], dtype=ht.int64, device=cpu:0, split=None) diff --git a/doc/api/heat/core/io.md b/doc/api/heat/core/io.md new file mode 100644 index 0000000000..ec01f2560d --- /dev/null +++ b/doc/api/heat/core/io.md @@ -0,0 +1,379 @@ +Module heat.core.io +=================== +Enables parallel I/O with data on disk. + +Functions +--------- + +`load(path: str, *args: Optional[List[object]], **kwargs: Optional[Dict[str, object]]) ‑> heat.core.dndarray.DNDarray` +: Attempts to load data from a file stored on disk. Attempts to auto-detect the file format by determining the + extension. Supports at least CSV files, HDF5 and netCDF4 are additionally possible if the corresponding libraries + are installed. + + Parameters + ---------- + path : str + Path to the file to be read. + args : list, optional + Additional options passed to the particular functions. + kwargs : dict, optional + Additional options passed to the particular functions. + + Raises + ------ + ValueError + If the file extension is not understood or known. + RuntimeError + If the optional dependency for a file extension is not available. + + Examples + -------- + >>> ht.load("data.h5", dataset="DATA") + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.load("data.nc", variable="DATA") + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.load("my_data.zarr", variable="RECEIVER_1/DATA") + DNDarray([ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], dtype=ht.float32, device=cpu:0, split=0) + >>> ht.load("my_data.zarr", variable="RECEIVER_*/DATA") + DNDarray([[ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], + [ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981], + [ 1.0000, 2.7183, 7.3891, 20.0855, 54.5981]], dtype=ht.float32, device=cpu:0, split=0) + + See Also + -------- + :func:`load_csv` : Loads data from a CSV file. + :func:`load_csv_from_folder` : Loads multiple .csv files into one DNDarray which will be returned. + :func:`load_hdf5` : Loads data from an HDF5 file. + :func:`load_netcdf` : Loads data from a NetCDF4 file. + :func:`load_npy_from_path` : Loads multiple .npy files into one DNDarray which will be returned. + :func:`load_zarr` : Loads zarr-Format into DNDarray which will be returned. + +`load_csv(path: str, header_lines: int = 0, sep: str = ',', dtype: datatype = heat.core.types.float32, encoding: str = 'utf-8', split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Loads data from a CSV file. The data will be distributed along the axis 0. + + Parameters + ---------- + path : str + Path to the CSV file to be read. + header_lines : int, optional + The number of columns at the beginning of the file that should not be considered as data. + sep : str, optional + The single ``char`` or ``str`` that separates the values in each row. + dtype : datatype, optional + Data type of the resulting array. + encoding : str, optional + The type of encoding which will be used to interpret the lines of the csv file as strings. + split : int or None : optional + Along which axis the resulting array should be split. + Default is ``None`` which means each node will have the full array. + device : str, optional + The device id on which to place the data, defaults to globally set default device. + comm : Communication, optional + The communication to use for the data distribution, defaults to global default + + Raises + ------ + TypeError + If any of the input parameters are not of correct type. + + Examples + -------- + >>> import heat as ht + >>> a = ht.load_csv("data.csv") + >>> a.shape + [0/3] (150, 4) + [1/3] (150, 4) + [2/3] (150, 4) + [3/3] (150, 4) + >>> a.lshape + [0/3] (38, 4) + [1/3] (38, 4) + [2/3] (37, 4) + [3/3] (37, 4) + >>> b = ht.load_csv("data.csv", header_lines=10) + >>> b.shape + [0/3] (140, 4) + [1/3] (140, 4) + [2/3] (140, 4) + [3/3] (140, 4) + >>> b.lshape + [0/3] (35, 4) + [1/3] (35, 4) + [2/3] (35, 4) + [3/3] (35, 4) + +`load_hdf5(path: str, dataset: str, dtype: datatype = heat.core.types.float32, slices: Optional[Tuple[Optional[slice], ...]] = None, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Loads data from an HDF5 file. The data may be distributed among multiple processing nodes via the split flag. + + Parameters + ---------- + path : str + Path to the HDF5 file to be read. + dataset : str + Name of the dataset to be read. + dtype : datatype, optional + Data type of the resulting array. + slices : tuple of slice objects, optional + Load only the specified slices of the dataset. + split : int or None, optional + The axis along which the data is distributed among the processing cores. + device : str, optional + The device id on which to place the data, defaults to globally set default device. + comm : Communication, optional + The communication to use for the data distribution. + + Raises + ------ + TypeError + If any of the input parameters are not of correct type + + Examples + -------- + >>> a = ht.load_hdf5("data.h5", dataset="DATA") + >>> a.shape + [0/2] (5,) + [1/2] (5,) + >>> a.lshape + [0/2] (5,) + [1/2] (5,) + >>> b = ht.load_hdf5("data.h5", dataset="DATA", split=0) + >>> b.shape + [0/2] (5,) + [1/2] (5,) + >>> b.lshape + [0/2] (3,) + [1/2] (2,) + + Using the slicing argument: + >>> not_sliced = ht.load_hdf5("other_data.h5", dataset="DATA", split=0) + >>> not_sliced.shape + [0/2] (10,2) + [1/2] (10,2) + >>> not_sliced.lshape + [0/2] (5,2) + [1/2] (5,2) + >>> not_sliced.larray + [0/2] [[ 0, 1], + [ 2, 3], + [ 4, 5], + [ 6, 7], + [ 8, 9]] + [1/2] [[10, 11], + [12, 13], + [14, 15], + [16, 17], + [18, 19]] + + >>> sliced = ht.load_hdf5("other_data.h5", dataset="DATA", split=0, slices=slice(8)) + >>> sliced.shape + [0/2] (8,2) + [1/2] (8,2) + >>> sliced.lshape + [0/2] (4,2) + [1/2] (4,2) + >>> sliced.larray + [0/2] [[ 0, 1], + [ 2, 3], + [ 4, 5], + [ 6, 7]] + [1/2] [[ 8, 9], + [10, 11], + [12, 13], + [14, 15], + [16, 17]] + + >>> sliced = ht.load_hdf5('other_data.h5', dataset='DATA', split=0, slices=(slice(2,8), slice(0,1)) + >>> sliced.shape + [0/2] (6,1) + [1/2] (6,1) + >>> sliced.lshape + [0/2] (3,1) + [1/2] (3,1) + >>> sliced.larray + [0/2] [[ 4, ], + [ 6, ], + [ 8, ]] + [1/2] [[10, ], + [12, ], + [14, ]] + +`load_netcdf(path: str, variable: str, dtype: datatype = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Loads data from a NetCDF4 file. The data may be distributed among multiple processing nodes via the split flag. + + Parameters + ---------- + path : str + Path to the NetCDF4 file to be read. + variable : str + Name of the variable to be read. + dtype : datatype, optional + Data type of the resulting array + split : int or None, optional + The axis along which the data is distributed among the processing cores. + comm : Communication, optional + The communication to use for the data distribution. Defaults to MPI_COMM_WORLD. + device : str, optional + The device id on which to place the data, defaults to globally set default device. + + Raises + ------ + TypeError + If any of the input parameters are not of correct type. + + Examples + -------- + >>> a = ht.load_netcdf("data.nc", variable="DATA") + >>> a.shape + [0/2] (5,) + [1/2] (5,) + >>> a.lshape + [0/2] (5,) + [1/2] (5,) + >>> b = ht.load_netcdf("data.nc", variable="DATA", split=0) + >>> b.shape + [0/2] (5,) + [1/2] (5,) + >>> b.lshape + [0/2] (3,) + [1/2] (2,) + +`load_npy_from_path(path: str, dtype: datatype = heat.core.types.int32, split: int = 0, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Loads multiple .npy files into one DNDarray which will be returned. The data will be concatenated along the split axis provided as input. + + Parameters + ---------- + path : str + Path to the directory in which .npy-files are located. + dtype : datatype, optional + Data type of the resulting array. + split : int + Along which axis the loaded arrays should be concatenated. + device : str, optional + The device id on which to place the data, defaults to globally set default device. + comm : Communication, optional + The communication to use for the data distribution, default is 'heat.MPI_WORLD' + +`save(data: DNDarray, path: str, *args: Optional[List[object]], **kwargs: Optional[Dict[str, object]])` +: Attempts to save data from a :class:`~heat.core.dndarray.DNDarray` to disk. An auto-detection based on the file + format extension is performed. + + Parameters + ---------- + data : DNDarray + The array holding the data to be stored + path : str + Path to the file to be stored. + args : list, optional + Additional options passed to the particular functions. + kwargs : dict, optional + Additional options passed to the particular functions. + + Raises + ------ + ValueError + If the file extension is not understood or known. + RuntimeError + If the optional dependency for a file extension is not available. + + Examples + -------- + >>> x = ht.arange(100, split=0) + >>> ht.save(x, "data.h5", "DATA", mode="a") + +`save_csv(data: DNDarray, path: str, header_lines: Iterable[str] = None, sep: str = ',', decimals: int = -1, encoding: str = 'utf-8', comm: Optional[Communication] = None, truncate: bool = True)` +: Saves data to CSV files. Only 2D data, all split axes. + + Parameters + ---------- + data : DNDarray + The DNDarray to be saved to CSV. + path : str + The path as a string. + header_lines : Iterable[str] + Optional iterable of str to prepend at the beginning of the file. No + pound sign or any other comment marker will be inserted. + sep : str + The separator character used in this CSV. + decimals: int + Number of digits after decimal point. + encoding : str + The encoding to be used in this CSV. + comm : Optional[Communication] + An optional object of type Communication to be used. + truncate : bool + Whether to truncate an existing file before writing, i.e. fully overwrite it. + The sane default is True. Setting it to False will not shorten files if + needed and thus may leave garbage at the end of existing files. + +`save_hdf5(data: DNDarray, path: str, dataset: str, mode: str = 'w', **kwargs: Dict[str, object])` +: Saves ``data`` to an HDF5 file. Attempts to utilize parallel I/O if possible. + + Parameters + ---------- + data : DNDarray + The data to be saved on disk. + path : str + Path to the HDF5 file to be written. + dataset : str + Name of the dataset the data is saved to. + mode : str, optional + File access mode, one of ``'w', 'a', 'r+'`` + kwargs : dict, optional + Additional arguments passed to the created dataset. + + Raises + ------ + TypeError + If any of the input parameters are not of correct type. + ValueError + If the access mode is not understood. + + Examples + -------- + >>> x = ht.arange(100, split=0) + >>> ht.save_hdf5(x, "data.h5", dataset="DATA") + +`save_netcdf(data: DNDarray, path: str, variable: str, mode: str = 'w', dimension_names: Union[list, tuple, str] = None, is_unlimited: bool = False, file_slices: Union[Iterable[int], slice, bool] = slice(None, None, None), **kwargs: Dict[str, object])` +: Saves data to a netCDF4 file. Attempts to utilize parallel I/O if possible. + + Parameters + ---------- + data : DNDarray + The data to be saved on disk. + path : str + Path to the netCDF4 file to be written. + variable : str + Name of the variable the data is saved to. + mode : str, optional + File access mode, one of ``'w', 'a', 'r+'``. + dimension_names : list or tuple or string + Specifies the netCDF Dimensions used by the variable. Ignored if Variable already exists. + is_unlimited : bool, optional + If True, every dimension created for this variable (i.e. doesn't already exist) is unlimited. Already + existing limited dimensions cannot be changed to unlimited and vice versa. + file_slices : integer iterable, slice, ellipsis or bool + Keys used to slice the netCDF Variable, as given in the nc.utils._StartCountStride method. + kwargs : dict, optional + additional arguments passed to the created dataset. + + Raises + ------ + TypeError + If any of the input parameters are not of correct type. + ValueError + If the access mode is not understood or if the number of dimension names does not match the number of + dimensions. + + Examples + -------- + >>> x = ht.arange(100, split=0) + >>> ht.save_netcdf(x, "data.nc", dataset="DATA") + +`supports_hdf5() ‑> bool` +: Returns ``True`` if Heat supports reading from and writing to HDF5 files, ``False`` otherwise. + +`supports_netcdf() ‑> bool` +: Returns ``True`` if Heat supports reading from and writing to netCDF4 files, ``False`` otherwise. + +`supports_zarr() ‑> bool` +: Returns ``True`` if zarr is installed, ``False`` otherwise. diff --git a/doc/api/heat/core/linalg/basics.md b/doc/api/heat/core/linalg/basics.md new file mode 100644 index 0000000000..22f68de6d7 --- /dev/null +++ b/doc/api/heat/core/linalg/basics.md @@ -0,0 +1,704 @@ +Module heat.core.linalg.basics +============================== +Basic linear algebra operations on distributed ``DNDarray`` + +Functions +--------- + +`condest(A: heat.core.dndarray.DNDarray, p: int | str = None, algorithm: str = 'randomized', params: list = None) ‑> heat.core.dndarray.DNDarray` +: Computes a (possibly randomized) upper estimate of the l2-condition number of the input 2D DNDarray. + + Parameters + ---------- + A : DNDarray + The matrix, i.e., a 2D DNDarray, for which the condition number shall be estimated. + p : int or str (optional) + The norm to use for the condition number computation. If None, the l2-norm (default, p=2) is used. + So far, only p=2 is implemented. + algorithm : str + The algorithm to use for the estimation. Currently, only "randomized" (default) is implemented. + params : dict (optional) + A list of parameters required for the chosen algorithm; if not provided, default values for the respective algorithm are chosen. + If `algorithm="randomized"` the number of random samples to use can be specified under the key "nsamples"; default is 10. + + Notes + ----- + The "randomized" algorithm follows the approach described in [1]; note that in the paper actually the condition number w.r.t. the Frobenius norm is estimated. + However, this yields an upper bound for the condition number w.r.t. the l2-norm as well. + + References + ---------- + [1] T. Gudmundsson, C. S. Kenney, and A. J. Laub. Small-Sample Statistical Estimates for Matrix Norms. SIAM Journal on Matrix Analysis and Applications 1995 16:3, 776-792. + +`cross(a: heat.core.dndarray.DNDarray, b: heat.core.dndarray.DNDarray, axisa: int = -1, axisb: int = -1, axisc: int = -1, axis: int = -1) ‑> heat.core.dndarray.DNDarray` +: Returns the cross product. 2D vectors will we converted to 3D. + + Parameters + ---------- + a : DNDarray + First input array. + b : DNDarray + Second input array. Must have the same shape as 'a'. + axisa: int + Axis of `a` that defines the vector(s). By default, the last axis. + axisb: int + Axis of `b` that defines the vector(s). By default, the last axis. + axisc: int + Axis of the output containing the cross product vector(s). By default, the last axis. + axis : int + Axis that defines the vectors for which to compute the cross product. Overrides `axisa`, `axisb` and `axisc`. Default: -1 + + Raises + ------ + ValueError + If the two input arrays don't match in shape, split, device, or comm. If the vectors are along the split axis. + TypeError + If 'axis' is not an integer. + + Examples + -------- + >>> a = ht.eye(3) + >>> b = ht.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]) + >>> cross = ht.cross(a, b) + DNDarray([[0., 0., 1.], + [1., 0., 0.], + [0., 1., 0.]], dtype=ht.float32, device=cpu:0, split=None) + +`det(a: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Returns the determinant of a square matrix. + + Parameters + ---------- + a : DNDarray + A square matrix or a stack of matrices. Shape = (...,M,M) + + Raises + ------ + RuntimeError + If the dtype of 'a' is not floating-point. + RuntimeError + If `a.ndim < 2` or if the length of the last two dimensions is not the same. + + Examples + -------- + >>> a = ht.array([[-2, -1, 2], [2, 1, 4], [-3, 3, -1]]) + >>> ht.linalg.det(a) + DNDarray(54., dtype=ht.float64, device=cpu:0, split=None) + +`dot(a: heat.core.dndarray.DNDarray, b: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray | float` +: Returns the dot product of two ``DNDarrays``. + Specifically, + + 1. If both a and b are 1-D arrays, it is inner product of vectors. + + 2. If both a and b are 2-D arrays, it is matrix multiplication, but using matmul or ``a@b`` is preferred. + + 3. If either a or b is 0-D (scalar), it is equivalent to multiply and using ``multiply(a, b)`` or ``a*b`` is preferred. + + Parameters + ---------- + a : DNDarray + First input DNDarray + b : DNDarray + Second input DNDarray + out : DNDarray, optional + Output buffer. + + See Also + -------- + vecdot + Supports (vector) dot along an axis. + +`inv(a: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Computes the multiplicative inverse of a square matrix. + + Parameters + ---------- + a : DNDarray + Square matrix of floating-point data type or a stack of square matrices. Shape = (...,M,M) + + Raises + ------ + RuntimeError + If the inverse does not exist. + RuntimeError + If the dtype is not floating-point + RuntimeError + If a is not at least two-dimensional or if the lengths of the last two dimensions are not the same. + + Examples + -------- + >>> a = ht.array([[1.0, 2], [2, 3]]) + >>> ht.linalg.inv(a) + DNDarray([[-3., 2.], + [ 2., -1.]], dtype=ht.float32, device=cpu:0, split=None) + +`matmul(a: heat.core.dndarray.DNDarray, b: heat.core.dndarray.DNDarray, allow_resplit: bool = False) ‑> heat.core.dndarray.DNDarray` +: Matrix multiplication of two ``DNDarrays``: ``a@b=c`` or ``A@B=c``. + Returns a tensor with the result of ``a@b``. The split dimension of the returned array is + typically the split dimension of a. If both are ``None`` and if ``allow_resplit=False`` then ``c.split`` is also ``None``. + + Batched inputs (with batch dimensions being leading dimensions) are allowed; see also the Notes below. + + Parameters + ---------- + a : DNDarray + matrix :math:`L \times P` or vector :math:`P` or batch of matrices: :math:`B_1 \times ... \times B_k \times L \times P` + b : DNDarray + matrix :math:`P \times Q` or vector :math:`P` or batch of matrices: :math:`B_1 \times ... \times B_k \times P \times Q` + allow_resplit : bool, optional + Whether to distribute ``a`` in the case that both ``a.split is None`` and ``b.split is None``. + Default is ``False``. If ``True``, if both are not split then ``a`` will be distributed in-place along axis 0. + + Notes + ----- + - For batched inputs, batch dimensions must coincide and if one matrix is split along a batch axis the other must be split along the same axis. + - If ``a`` or ``b`` is a vector the result will also be a vector. + - We recommend to avoid the particular split combinations ``1``-``0``, ``None``-``0``, and ``1``-``None`` (for ``a.split``-``b.split``) due to their comparably high memory consumption, if possible. Applying ``DNDarray.resplit_`` or ``heat.resplit`` on one of the two factors before calling ``matmul`` in these situations might improve performance of your code / might avoid memory bottlenecks. + + References + ---------- + [1] R. Gu, et al., "Improving Execution Concurrency of Large-scale Matrix Multiplication on + Distributed Data-parallel Platforms," IEEE Transactions on Parallel and Distributed Systems, + vol 28, no. 9. 2017. + + [2] S. Ryu and D. Kim, "Parallel Huge Matrix Multiplication on a Cluster with GPGPU + Accelerators," 2018 IEEE International Parallel and Distributed Processing Symposium + Workshops (IPDPSW), Vancouver, BC, 2018, pp. 877-882. + + Examples + -------- + >>> a = ht.ones((n, m), split=1) + >>> a[0] = ht.arange(1, m + 1) + >>> a[:, -1] = ht.arange(1, n + 1).larray + [0/1] tensor([[1., 2.], + [1., 1.], + [1., 1.], + [1., 1.], + [1., 1.]]) + [1/1] tensor([[3., 1.], + [1., 2.], + [1., 3.], + [1., 4.], + [1., 5.]]) + >>> b = ht.ones((j, k), split=0) + >>> b[0] = ht.arange(1, k + 1) + >>> b[:, 0] = ht.arange(1, j + 1).larray + [0/1] tensor([[1., 2., 3., 4., 5., 6., 7.], + [2., 1., 1., 1., 1., 1., 1.]]) + [1/1] tensor([[3., 1., 1., 1., 1., 1., 1.], + [4., 1., 1., 1., 1., 1., 1.]]) + >>> linalg.matmul(a, b).larray + [0/1] tensor([[18., 8., 9., 10.], + [14., 6., 7., 8.], + [18., 7., 8., 9.], + [22., 8., 9., 10.], + [26., 9., 10., 11.]]) + [1/1] tensor([[11., 12., 13.], + [ 9., 10., 11.], + [10., 11., 12.], + [11., 12., 13.], + [12., 13., 14.]]) + +`matrix_norm(x: heat.core.dndarray.DNDarray, axis: Tuple[int, int] | None = None, keepdims: bool = False, ord: int | str | None = None) ‑> heat.core.dndarray.DNDarray` +: Computes the matrix norm of an array. + + Parameters + ---------- + x : DNDarray + Input array + axis : tuple, optional + Both axes of the matrix. If `None` 'x' must be a matrix. Default: `None` + keepdims : bool, optional + Retains the reduced dimension when `True`. Default: `False` + ord : int, 'fro', 'nuc', optional + The matrix norm order to compute. If `None` the Frobenius norm (`'fro'`) is used. Default: `None` + + See Also + -------- + norm + Computes the vector or matrix norm of an array. + vector_norm + Computes the vector norm of an array. + + Notes + ----- + The following norms are supported: + + ===== ============================ + ord norm for matrices + ===== ============================ + None Frobenius norm + 'fro' Frobenius norm + 'nuc' nuclear norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + ===== ============================ + + The following matrix norms are currently **not** supported: + + ===== ============================ + ord norm for matrices + ===== ============================ + 2 largest singular value + -2 smallest singular value + ===== ============================ + + Raises + ------ + TypeError + If axis is not a 2-tuple + ValueError + If an invalid matrix norm is given or 'x' is a vector. + + Examples + -------- + >>> ht.matrix_norm(ht.array([[1, 2], [3, 4]])) + DNDarray([[5.4772]], dtype=ht.float64, device=cpu:0, split=None) + >>> ht.matrix_norm(ht.array([[1, 2], [3, 4]]), keepdims=True, ord=-1) + DNDarray([[4.]], dtype=ht.float64, device=cpu:0, split=None) + +`norm(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int, int] | None = None, keepdims: bool = False, ord: int | float | str | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the vector or matrix norm of an array. + + Parameters + ---------- + x : DNDarray + Input vector + axis : int, tuple, optional + Axes along which to compute the norm. If an integer, vector norm is used. If a 2-tuple, matrix norm is used. + If `None`, it is inferred from the dimension of the array. Default: `None` + keepdims : bool, optional + Retains the reduced dimension when `True`. Default: `False` + ord : int, float, inf, -inf, 'fro', 'nuc' + The norm order to compute. See Notes + + See Also + -------- + vector_norm + Computes the vector norm of an array. + matrix_norm + Computes the matrix norm of an array. + + Notes + ----- + The following norms are supported: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm L2-norm (Euclidean) + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) L1-norm (Manhattan) + -1 min(sum(abs(x), axis=0)) 1./sum(1./abs(a)) + 2 -- L2-norm (Euclidean) + -2 -- 1./sqrt(sum(1./abs(a)**2)) + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The following matrix norms are currently **not** supported: + + ===== ============================ + ord norm for matrices + ===== ============================ + 2 largest singular value + -2 smallest singular value + ===== ============================ + + Raises + ------ + ValueError + If 'axis' has more than 2 elements + + Examples + -------- + >>> from heat import linalg as LA + >>> a = ht.arange(9, dtype=ht.float) - 4 + >>> a + DNDarray([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> b = a.reshape((3, 3)) + >>> b + DNDarray([[-4., -3., -2.], + [-1., 0., 1.], + [ 2., 3., 4.]], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a) + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b) + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, ord="fro") + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, float("inf")) + DNDarray([4.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, ht.inf) + DNDarray([9.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -ht.inf)) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, -ht.inf) + DNDarray([2.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, 1) + DNDarray([20.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, 1) + DNDarray([7.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -1) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(b, -1) + DNDarray([6.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, 2) + DNDarray(7.7460, dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -2) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, 3) + DNDarray([5.8480], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(a, -3) + DNDarray([0.], dtype=ht.float32, device=cpu:0, split=None) + c = ht.array([[ 1, 2, 3], + [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + DNDarray([1.4142, 2.2361, 5.0000], dtype=ht.float64, device=cpu:0, split=None) + >>> LA.norm(c, axis=1) + DNDarray([3.7417, 4.2426], dtype=ht.float64, device=cpu:0, split=None) + >>> LA.norm(c, axis=1, ord=1) + DNDarray([6., 6.], dtype=ht.float64, device=cpu:0, split=None) + >>> m = ht.arange(8).reshape(2, 2, 2) + >>> LA.norm(m, axis=(1, 2)) + DNDarray([ 3.7417, 11.2250], dtype=ht.float32, device=cpu:0, split=None) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (DNDarray(3.7417, dtype=ht.float32, device=cpu:0, split=None), DNDarray(11.2250, dtype=ht.float32, device=cpu:0, split=None)) + +`outer(a: heat.core.dndarray.DNDarray, b: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None, split: int | None = None) ‑> heat.core.dndarray.DNDarray` +: Compute the outer product of two 1-D DNDarrays: :math:`out(i, j) = a(i) \times b(j)`. + Given two vectors, :math:`a = (a_0, a_1, ..., a_N)` and :math:`b = (b_0, b_1, ..., b_M)`, the outer product is: + + .. math:: + :nowrap: + + \begin{pmatrix} + a_0 \cdot b_0 & a_0 \cdot b_1 & . & . & a_0 \cdot b_M \\ + a_1 \cdot b_0 & a_1 \cdot b_1 & . & . & a_1 \cdot b_M \\ + . & . & . & . & . \\ + a_N \cdot b_0 & a_N \cdot b_1 & . & . & a_N \cdot b_M + \end{pmatrix} + + Parameters + ---------- + a : DNDarray + 1-dimensional: :math:`N` + Will be flattened by default if more than 1-D. + b : DNDarray + 1-dimensional: :math:`M` + Will be flattened by default if more than 1-D. + out : DNDarray, optional + 2-dimensional: :math:`N \times M` + A location where the result is stored + split : int, optional + Split dimension of the resulting DNDarray. Can be 0, 1, or None. + This is only relevant if the calculations are memory-distributed. + Default is ``split=0`` (see Notes). + + Notes + ----- + Parallel implementation of outer product, assumes arrays are dense. + In the classical (dense) case, one of the two arrays needs to be communicated around the processes in + a ring. + + * Sending ``b`` around in a ring results in ``outer`` being split along the rows (``outer.split = 0``). + + + * Sending ``a`` around in a ring results in ``outer`` being split along the columns (``outer.split = 1``). + + + So, if specified, ``split`` defines which ``DNDarray`` stays put and which one is passed around. + If ``split`` is ``None`` or unspecified, the result will be distributed along axis ``0``, i.e. by default ``b`` is + passed around, ``a`` stays put. + + Examples + -------- + >>> a = ht.arange(4) + >>> b = ht.arange(3) + >>> ht.outer(a, b).larray + (3 processes) + [0/2] tensor([[0, 0, 0], + [0, 1, 2], + [0, 2, 4], + [0, 3, 6]], dtype=torch.int32) + [1/2] tensor([[0, 0, 0], + [0, 1, 2], + [0, 2, 4], + [0, 3, 6]], dtype=torch.int32) + [2/2] tensor([[0, 0, 0], + [0, 1, 2], + [0, 2, 4], + [0, 3, 6]], dtype=torch.int32) + >>> a = ht.arange(4, split=0) + >>> b = ht.arange(3, split=0) + >>> ht.outer(a, b).larray + [0/2] tensor([[0, 0, 0], + [0, 1, 2]], dtype=torch.int32) + [1/2] tensor([[0, 2, 4]], dtype=torch.int32) + [2/2] tensor([[0, 3, 6]], dtype=torch.int32) + >>> ht.outer(a, b, split=1).larray + [0/2] tensor([[0], + [0], + [0], + [0]], dtype=torch.int32) + [1/2] tensor([[0], + [1], + [2], + [3]], dtype=torch.int32) + [2/2] tensor([[0], + [2], + [4], + [6]], dtype=torch.int32) + >>> a = ht.arange(5, dtype=ht.float32, split=0) + >>> b = ht.arange(4, dtype=ht.float64, split=0) + >>> out = ht.empty((5,4), dtype=ht.float64, split=1) + >>> ht.outer(a, b, split=1, out=out) + >>> out.larray + [0/2] tensor([[0., 0.], + [0., 1.], + [0., 2.], + [0., 3.], + [0., 4.]], dtype=torch.float64) + [1/2] tensor([[0.], + [2.], + [4.], + [6.], + [8.]], dtype=torch.float64) + [2/2] tensor([[ 0.], + [ 3.], + [ 6.], + [ 9.], + [12.]], dtype=torch.float64) + +`projection(a: heat.core.dndarray.DNDarray, b: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Projection of vector ``a`` onto vector ``b`` + + Parameters + ---------- + a : DNDarray + The vector to be projected. Must be a 1D ``DNDarray`` + b : DNDarray + The vector to project onto. Must be a 1D ``DNDarray`` + +`trace(a: heat.core.dndarray.DNDarray, offset: int | None = 0, axis1: int | None = 0, axis2: int | None = 1, dtype: heat.core.types.datatype | None = None, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray | float` +: Return the sum along diagonals of the array + + If `a` is 2D, the sum along its diagonal with the given offset is returned, i.e. the sum of + elements a[i, i+offset] for all i. + + If `a` has more than two dimensions, then the axes specified by `axis1` and `axis2` are used + to determine the 2D-sub-DNDarrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken + offset : int, optional + Offsets of the diagonal from the main diagonal. Can be both positive and negative. Defaults to 0. + axis1: int, optional + Axis to be used as the first axis of the 2D-sub-arrays from which the diagonals + should be taken. Default is the first axis of `a` + axis2 : int, optional + Axis to be used as the second axis of the 2D-sub-arrays from which the diagonals + should be taken. Default is the second two axis of `a` + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator where the elements are + summed. If `dtype` has value None than the dtype is the same as that of `a` + out: ht.DNDarray, optional + Array into which the output is placed. Its type is preserved and it must be of the right shape + to hold the output + Only applicable if `a` has more than 2 dimensions, thus the result is not a scalar. + If distributed, its split axis might change eventually. + + Returns + ------- + sum_along_diagonals : number (of defined dtype) or ht.DNDarray + If `a` is 2D, the sum along the diagonal is returned as a scalar + If `a` has more than 2 dimensions, then a DNDarray of sums along diagonals is returned + + Examples + -------- + 2D-case + >>> x = ht.arange(24).reshape((4, 6)) + >>> x + DNDarray([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 21, 22, 23]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x) + 42 + >>> ht.trace(x, 1) + 46 + >>> ht.trace(x, -2) + 31 + + > 2D-case + >>> x = x.reshape((2, 3, 4)) + >>> x + DNDarray([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x) + DNDarray([16, 18, 20, 22], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x, 1) + DNDarray([24, 26, 28, 30], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.trace(x, axis1=0, axis2=2) + DNDarray([13, 21, 29], dtype=ht.int32, device=cpu:0, split=None) + +`transpose(a: heat.core.dndarray.DNDarray, axes: List[int] | None = None) ‑> heat.core.dndarray.DNDarray` +: Permute the dimensions of an array. + + Parameters + ---------- + a : DNDarray + Input array. + axes : None or List[int,...], optional + By default, reverse the dimensions, otherwise permute the axes according to the values given. + +`tril(m: heat.core.dndarray.DNDarray, k: int = 0) ‑> heat.core.dndarray.DNDarray` +: Returns the lower triangular part of the ``DNDarray``. + The lower triangular part of the array is defined as the elements on and below the diagonal, the other elements of + the result array are set to 0. + The argument ``k`` controls which diagonal to consider. If ``k=0``, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative + value excludes just as many diagonals below the main diagonal. + + Parameters + ---------- + m : DNDarray + Input array for which to compute the lower triangle. + k : int, optional + Diagonal above which to zero elements. ``k=0`` (default) is the main diagonal, ``k<0`` is below and ``k>0`` is above. + +`triu(m: heat.core.dndarray.DNDarray, k: int = 0) ‑> heat.core.dndarray.DNDarray` +: Returns the upper triangular part of the ``DNDarray``. + The upper triangular part of the array is defined as the elements on and below the diagonal, the other elements of the result array are set to 0. + The argument ``k`` controls which diagonal to consider. If ``k=0``, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative + value excludes just as many diagonals below the main diagonal. + + Parameters + ---------- + m : DNDarray + Input array for which to compute the upper triangle. + k : int, optional + Diagonal above which to zero elements. ``k=0`` (default) is the main diagonal, ``k<0`` is below and ``k>0`` is above. + +`vdot(x1: heat.core.dndarray.DNDarray, x2: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Computes the dot product of two vectors. Higher-dimensional arrays will be flattened. + + Parameters + ---------- + x1 : DNDarray + first input array. If it's complex, it's complex conjugate will be used. + x2 : DNDarray + second input array. + + Raises + ------ + ValueError + If the number of elements is inconsistent. + + See Also + -------- + dot + Return the dot product without using the complex conjugate. + + Examples + -------- + >>> a = ht.array([1 + 1j, 2 + 2j]) + >>> b = ht.array([1 + 2j, 3 + 4j]) + >>> ht.vdot(a, b) + DNDarray([(17+3j)], dtype=ht.complex64, device=cpu:0, split=None) + >>> ht.vdot(b, a) + DNDarray([(17-3j)], dtype=ht.complex64, device=cpu:0, split=None) + +`vecdot(x1: heat.core.dndarray.DNDarray, x2: heat.core.dndarray.DNDarray, axis: int | None = None, keepdims: bool | None = None) ‑> heat.core.dndarray.DNDarray` +: Computes the (vector) dot product of two DNDarrays. + + Parameters + ---------- + x1 : DNDarray + first input array. + x2 : DNDarray + second input array. Must be compatible with x1. + axis : int, optional + axis over which to compute the dot product. The last dimension is used if 'None'. + keepdims : bool, optional + If this is set to 'True', the axes which are reduced are left in the result as dimensions with size one. + + See Also + -------- + dot + NumPy-like dot function. + + Examples + -------- + >>> ht.vecdot(ht.full((3, 3, 3), 3), ht.ones((3, 3)), axis=0) + DNDarray([[9., 9., 9.], + [9., 9., 9.], + [9., 9., 9.]], dtype=ht.float32, device=cpu:0, split=None) + +`vector_norm(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int] | None = None, keepdims=False, ord: int | float | None = None) ‑> heat.core.dndarray.DNDarray` +: Computes the vector norm of an array. + + Parameters + ---------- + x : DNDarray + Input array + axis : int, tuple, optional + Axis along which to compute the vector norm. If `None` 'x' must be a vector. Default: `None` + keepdims : bool, optional + Retains the reduced dimension when `True`. Default: `False` + ord : int, float, optional + The norm order to compute. If `None` the euclidean norm (`2`) is used. Default: `None` + + See Also + -------- + norm + Computes the vector norm or matrix norm of an array. + matrix_norm + Computes the matrix norm of an array. + + Notes + ----- + The following norms are suported: + + ===== ========================== + ord norm for vectors + ===== ========================== + None L2-norm (Euclidean) + inf max(abs(x)) + -inf min(abs(x)) + 0 sum(x != 0) + 1 L1-norm (Manhattan) + -1 1./sum(1./abs(a)) + 2 L2-norm (Euclidean) + -2 1./sqrt(sum(1./abs(a)**2)) + other sum(abs(x)**ord)**(1./ord) + ===== ========================== + + Raises + ------ + TypeError + If axis is not an integer or a 1-tuple + ValueError + If an invalid vector norm is given. + + Examples + -------- + >>> ht.vector_norm(ht.array([1, 2, 3, 4])) + DNDarray([5.4772], dtype=ht.float64, device=cpu:0, split=None) + >>> ht.vector_norm(ht.array([[1, 2], [3, 4]]), axis=0, ord=1) + DNDarray([[4., 6.]], dtype=ht.float64, device=cpu:0, split=None) diff --git a/doc/api/heat/core/linalg/eigh.md b/doc/api/heat/core/linalg/eigh.md new file mode 100644 index 0000000000..eef2baf57f --- /dev/null +++ b/doc/api/heat/core/linalg/eigh.md @@ -0,0 +1,40 @@ +Module heat.core.linalg.eigh +============================ +Implements Symmetric Eigenvalue Decomposition + +Functions +--------- + +`eigh(A: heat.core.dndarray.DNDarray, r_max_zolopd: int = 8, silent: bool = True) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Computes the symmetric eigenvalue decomposition of a symmetric n x n - matrix A, provided as a DNDarray. + + The function returns DNDarrays Lambda (shape (n,) with split = 0) and V (shape (n,n)) such that + A = V @ diag(Lambda) @ V^T, where Lambda contains the eigenvalues of A and V is an orthonormal matrix + containing the corresponding eigenvectors as columns. + + Parameters + ---------- + A : DNDarray + The input matrix. Must be symmetric. + r_max_zolopd : int, optional + This is a hyperparameter for the computation of the polar decomposition via :func:`heat.linalg.polar` which is + applied multiple times in this function. See the documentation of :func:`heat.linalg.polar` for more details on its + meaning and the respective default value. + silent : bool, optional + If True (default), suppresses output messages; otherwise, some information on the recursion is printed to the console. + + Notes + ----- + Unlike the :func:`torch.linalg.eigh` function, the eigenvalues are returned in descending order. + Note that no check of symmetry is performed on the input matrix A; thus, applying this function to a non-symmetric matrix may + result in unpredictable behaviour without a specific error message pointing to this issue. + + The algorithm used for the computation of the symmetric eigenvalue decomposition is based on the Zolotarev polar decomposition; + see Algorithm 5.2 in: + + Nakatsukasa, Y., & Freund, R. W. (2016). Computing fundamental matrix decompositions accurately via the + matrix sign function in two iterations: The power of Zolotarev's functions. SIAM Review, 58(3). + + See Also + -------- + :func:`heat.linalg.polar` diff --git a/doc/api/heat/core/linalg/index.md b/doc/api/heat/core/linalg/index.md new file mode 100644 index 0000000000..ee4f5ecba0 --- /dev/null +++ b/doc/api/heat/core/linalg/index.md @@ -0,0 +1,14 @@ +Module heat.core.linalg +======================= +Import all linear algebra functions into the ht.linalg namespace + +Sub-modules +----------- +* heat.core.linalg.basics +* heat.core.linalg.eigh +* heat.core.linalg.polar +* heat.core.linalg.qr +* heat.core.linalg.solver +* heat.core.linalg.svd +* heat.core.linalg.svdtools +* heat.core.linalg.tests diff --git a/doc/api/heat/core/linalg/polar.md b/doc/api/heat/core/linalg/polar.md new file mode 100644 index 0000000000..bb5d9a6446 --- /dev/null +++ b/doc/api/heat/core/linalg/polar.md @@ -0,0 +1,41 @@ +Module heat.core.linalg.polar +============================= +Implements polar decomposition (PD) + +Functions +--------- + +`polar(A: heat.core.dndarray.DNDarray, r: int = None, calcH: bool = True, condition_estimate: float = 1e+16, silent: bool = True, r_max: int = 8) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Computes the so-called polar decomposition of the input 2D DNDarray ``A``, i.e., it returns the orthogonal matrix ``U`` and the symmetric, positive definite + matrix ``H`` such that ``A = U @ H``. + + Input + ----- + A : ht.DNDarray, + The input matrix for which the polar decomposition is computed; + must be two-dimensional, of data type float32 or float64, and must have at least as many rows as columns. + r : int, optional, default: None + The parameter r used in the Zolotarev-PD algorithm; if provided, must be an integer between 1 and 8 that divides the number of MPI processes. + Higher values of r lead to faster convergence, but memory consumption is proportional to r. + If not provided, the largest 1 <= r <= r_max that divides the number of MPI processes is chosen. + calcH : bool, optional, default: True + If True, the function returns the symmetric, positive definite matrix H. If False, only the orthogonal matrix U is returned. + condition_estimate : float, optional, default: 1.e16. + This argument allows to provide an estimate for the condition number of the input matrix ``A``, if such estimate is already known. + If a positive number greater than 1., this value is used as an estimate for the condition number of A. + If smaller or equal than 1., the condition number is estimated internally. + The default value of 1.e16 is the worst case scenario considered in [1]. + silent : bool, optional, default: True + If True, the function does not print any output. If False, some information is printed during the computation. + r_max : int, optional, default: 8 + See the description of r for the meaning; r_max is only taken into account if r is not provided. + + + Notes + ----- + The implementation follows Algorithm 5.1 in Reference [1]; however, instead of switching from QR to Cholesky decomposition depending on the condition number, + we stick to QR decomposition in all iterations. + + References + ---------- + [1] Nakatsukasa, Y., & Freund, R. W. (2016). Computing Fundamental Matrix Decompositions Accurately via the Matrix Sign Function in Two Iterations: The Power of Zolotarev's Functions. SIAM Review, 58(3), DOI: https://doi.org/10.1137/140990334. diff --git a/doc/api/heat/core/linalg/qr.md b/doc/api/heat/core/linalg/qr.md new file mode 100644 index 0000000000..5f53a2fcdb --- /dev/null +++ b/doc/api/heat/core/linalg/qr.md @@ -0,0 +1,56 @@ +Module heat.core.linalg.qr +========================== +QR decomposition of ``DNDarray``s. + +Functions +--------- + +`qr(A: heat.core.dndarray.DNDarray, mode: str = 'reduced', procs_to_merge: int = 2) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Calculates the QR decomposition of a 2D ``DNDarray``. + Factor the matrix ``A`` as *QR*, where ``Q`` is orthonormal and ``R`` is upper-triangular. + If ``mode = "reduced``, function returns ``QR(Q=Q, R=R)``, if ``mode = "r"`` function returns ``QR(Q=None, R=R)`` + + This function also works for batches of matrices; in this case, the last two dimensions of the input array are considered as the matrix dimensions. + The output arrays have the same leading batch dimensions as the input array. + + Parameters + ---------- + A : DNDarray of shape (M, N), of shape (...,M,N) in the batched case + Array which will be decomposed. So far only arrays with datatype float32 or float64 are supported + mode : str, optional + default "reduced" returns Q and R with dimensions (M, min(M,N)) and (min(M,N), N). Potential batch dimensions are not modified. + "r" returns only R, with dimensions (min(M,N), N). + procs_to_merge : int, optional + This parameter is only relevant for split=0 (-2, in the batched case) and determines the number of processes to be merged at one step during the so-called TS-QR algorithm. + The default is 2. Higher choices might be faster, but will probably result in higher memory consumption. 0 corresponds to merging all processes at once. + We only recommend to modify this parameter if you are familiar with the TS-QR algorithm (see the references below). + + Notes + ----- + The distribution schemes of ``Q`` and ``R`` depend on that of the input ``A``. + + - If ``A`` is distributed along the columns (A.split = 1), so will be ``Q`` and ``R``. + + - If ``A`` is distributed along the rows (A.split = 0), ``Q`` too will have `split=0`. ``R`` won't be distributed, i.e. `R. split = None`, if ``A`` is tall-skinny, i.e., if + the largest local chunk of data of ``A`` has at least as many rows as columns. Otherwise, ``R`` will be distributed along the rows as well, i.e., `R.split = 0`. + + Note that the argument `calc_q` allowed in earlier Heat versions is no longer supported; `calc_q = False` is equivalent to `mode = "r"`. + Unlike ``numpy.linalg.qr()``, `ht.linalg.qr` only supports ``mode="reduced"`` or ``mode="r"`` for the moment, since "complete" may result in heavy memory usage. + + Heats QR function is built on top of PyTorchs QR function, ``torch.linalg.qr()``, using LAPACK (CPU) and MAGMA (CUDA) on + the backend. Both cases split=0 and split=1 build on a column-block-wise version of stabilized Gram-Schmidt orthogonalization. + For split=1 (-1, in the batched case), this is directly applied to the local arrays of the input array. + For split=0, a tall-skinny QR (TS-QR) is implemented for the case of tall-skinny matrices (i.e., the largest local chunk of data has at least as many rows as columns), + and extended to non tall-skinny matrices by applying a block-wise version of stabilized Gram-Schmidt orthogonalization. + + References + ---------- + Basic information about QR factorization/decomposition can be found at, e.g.: + + - https://en.wikipedia.org/wiki/QR_factorization, + + - Gene H. Golub and Charles F. Van Loan. 1996. Matrix Computations (3rd Ed.). + + For an extensive overview on TS-QR and its variants we refer to, e.g., + + - Demmel, James, et al. “Communication-Optimal Parallel and Sequential QR and LU Factorizations.” SIAM Journal on Scientific Computing, vol. 34, no. 1, 2 Feb. 2012, pp. A206–A239., doi:10.1137/080731992. diff --git a/doc/api/heat/core/linalg/solver.md b/doc/api/heat/core/linalg/solver.md new file mode 100644 index 0000000000..4d91928c65 --- /dev/null +++ b/doc/api/heat/core/linalg/solver.md @@ -0,0 +1,61 @@ +Module heat.core.linalg.solver +============================== +Collection of solvers for systems of linear equations. + +Functions +--------- + +`cg(A: heat.core.dndarray.DNDarray, b: heat.core.dndarray.DNDarray, x0: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Conjugate gradients method for solving a system of linear equations :math: `Ax = b` + + Parameters + ---------- + A : DNDarray + 2D symmetric, positive definite Matrix + b : DNDarray + 1D vector + x0 : DNDarray + Arbitrary 1D starting vector + out : DNDarray, optional + Output Vector + +`lanczos(A: heat.core.dndarray.DNDarray, m: int, v0: heat.core.dndarray.DNDarray | None = None, V_out: heat.core.dndarray.DNDarray | None = None, T_out: heat.core.dndarray.DNDarray | None = None) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: The Lanczos algorithm is an iterative approximation of the solution to the eigenvalue problem, as an adaptation of + power methods to find the m "most useful" (tending towards extreme highest/lowest) eigenvalues and eigenvectors of + an :math:`n \times n` Hermitian matrix, where often :math:`m< heat.core.dndarray.DNDarray` +: Solver for (possibly batched) upper triangular systems of linear equations: it returns `x` in `Ax = b`, where `A` is a (possibly batched) upper triangular matrix and + `b` a (possibly batched) vector or matrix of suitable shape, both provided as input to the function. + The implementation builts on the corresponding solver in PyTorch and implements an memory-distributed, MPI-parallel block-wise version thereof. + + Parameters + ---------- + A : DNDarray + An upper triangular invertible square (n x n) matrix or a batch thereof, i.e. a ``DNDarray`` of shape `(..., n, n)`. + b : DNDarray + a (possibly batched) n x k matrix, i.e. an DNDarray of shape (..., n, k), where the batch-dimensions denoted by ... need to coincide with those of A. + (Batched) Vectors have to be provided as ... x n x 1 matrices and the split dimension of b must the second last dimension if not None. + Note + --------- + Since such a check might be computationally expensive, we do not check whether A is indeed upper triangular. + If you require such a check, please open an issue on our GitHub page and request this feature. diff --git a/doc/api/heat/core/linalg/svd.md b/doc/api/heat/core/linalg/svd.md new file mode 100644 index 0000000000..a862d4d5bc --- /dev/null +++ b/doc/api/heat/core/linalg/svd.md @@ -0,0 +1,52 @@ +Module heat.core.linalg.svd +=========================== +file for future "full" SVD implementation + +Functions +--------- + +`svd(A: heat.core.dndarray.DNDarray, full_matrices: bool = False, compute_uv: bool = True, qr_procs_to_merge: int = 2, r_max_zolopd: int = 8) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Computes the singular value decomposition of a matrix (the input array ``A``). + For an input DNDarray ``A`` of shape ``(M, N)``, the function returns DNDarrays ``U``, ``S``, and ``V`` such that ``A = U @ ht.diag(S) @ V.T`` + with shapes ``(M, min(M,N))``, ``(min(M, N),)``, and ``(min(M,N),N)``, respectively, in the case that ``compute_uv=True``, or + only the vector containing the singular values ``S`` of shape ``(min(M, N),)`` in the case that ``compute_uv=False``. By definition of the singular value decomposition, + the matrix ``U`` is orthogonal, the matrix ``V`` is orthogonal, and the entries of the vector ``S``are non-negative real numbers. + + We refer to, e.g., wikipedia (https://en.wikipedia.org/wiki/Singular_value_decomposition) or to Gene H. Golub and Charles F. Van Loan, Matrix Computations (3rd Ed., 1996), + for more detailed information on the singular value decomposition. + + Parameters + ---------- + A : ht.DNDarray + The input array (2D, float32 or float64) for which the singular value decomposition is computed. + Must be tall skinny (``M >> N``) or short fat (``M << n``) for the current implementation; an implementation that covers the remaining cases is planned. + full_matrices : bool, optional + currently, only the default value ``False`` is supported. This argument is included for compatibility with NumPy. + compute_uv : bool, optional + if ``True``, the matrices ``U`` and ``V`` are computed and returned together with the singular values ``S``. + If ``False``, only the vector ``S`` containing the singular values is returned. + qr_procs_to_merge : int, optional + the number of processes to merge in the tall skinny QR decomposition that is applied if the input array is tall skinny (``M > N``) or short fat (``M < N``). + See the corresponding remarks for :func:``heat.linalg.qr`` for more details. + r_max_zolopd : int, optional + an internal parameter only relevant for the case that the input matrix is neither tall-skinny nor short-fat. + This parameter is passed to the Zolotarev-Polar Decomposition and the symmetric eigenvalue decomposition that is applied in this case. + See the documentation of :func:``heat.linalg.polar`` as well as of :func:``heat.linalg.eigh`` for more details. + + Notes + ----- + Unlike in NumPy, we currently do not support the option ``full_matrices=True``, since this can result in heavy memory consumption (in particular for tall skinny + and short fat matrices) that should be avoided in the context Heat is designed for. If you nevertheless require this feature, please open an issue on GitHub. + + The algorithm used for the computation of the singular value depens on the shape of the input array ``A``. + For tall and skinny matrices (``M > N``), the algorithm is based on the tall-skinny QR decomposition. For the remaining cases we use the approach based on + Zolotarev-Polar Decomposition and a symmetric eigenvalue decomposition based on Zolotarev-Polar Decomposition; see Algorithm 5.3 in: + + Nakatsukasa, Y., & Freund, R. W. (2016). Computing fundamental matrix decompositions accurately via the + matrix sign function in two iterations: The power of Zolotarev's functions. SIAM Review, 58(3). + + See Also + -------- + :func:`heat.linalg.qr` + :func:`heat.linalg.polar` + :func:`heat.linalg.eigh` diff --git a/doc/api/heat/core/linalg/svdtools.md b/doc/api/heat/core/linalg/svdtools.md new file mode 100644 index 0000000000..5e3dd25377 --- /dev/null +++ b/doc/api/heat/core/linalg/svdtools.md @@ -0,0 +1,218 @@ +Module heat.core.linalg.svdtools +================================ +distributed hierarchical SVD + +Functions +--------- + +`hsvd(A: heat.core.dndarray.DNDarray, maxrank: int | None = None, maxmergedim: int | None = None, rtol: float | None = None, safetyshift: int = 0, no_of_merges: int | None = 2, compute_sv: bool = False, silent: bool = True, warnings_off: bool = False) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, float] | Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray] | heat.core.dndarray.DNDarray` +: Computes an approximate truncated SVD of A utilizing a distributed hiearchical algorithm; see the references. + The present function `hsvd` is a low-level routine, provides many options/parameters, but no default values, and is not recommended for usage by non-experts since conflicts + arising from inappropriate parameter choice will not be catched. We strongly recommend to use the corresponding high-level functions `hsvd_rank` and `hsvd_rtol` instead. + + Input + ------- + A: DNDarray + 2D-array (float32/64) of which hSVD has to be computed + maxrank: int, optional + truncation rank of the SVD + maxmergedim: int, optional + maximal size of the concatenation matrices when "merging" the local SVDs + rtol: float, optional + upper bound on the relative reconstruction error ||A-U Sigma V^T ||_F / ||A||_F (may deteriorate due to other parameters) + safetyshift: int, optional + shift that increases the actual truncation rank of the local SVDs during the computations in order to increase accuracy + no_of_merges: int, optional + maximum number of local SVDs to be "merged" at one step + compute_sv: bool, optional + determines whether to compute U, Sigma, V (compute_sv=True) or not (then U only) + silent: bool, optional + determines whether to print infos on the computations performed (silent=False) + warnings_off: bool, optional + switch on and off warnings that are not intended for the high-level routines based on this function + + Returns + ------- + (Union[ Tuple[DNDarray, DNDarray, DNDarray, float], Tuple[DNDarray, DNDarray, DNDarray], DNDarray]) + if compute_sv=True: U, Sigma, V, a-posteriori error estimate for the reconstruction error ||A-U Sigma V^T ||_F / ||A||_F (computed according to [2] along the "true" merging tree used in the computations). + if compute_sv=False: U, a-posteriori error estimate + + References + ---------- + [1] Iwen, Ong. A distributed and incremental SVD algorithm for agglomerative data analysis on large networks. SIAM J. Matrix Anal. Appl., 37(4), 2016. + [2] Himpe, Leibner, Rave. Hierarchical approximate proper orthogonal decomposition. SIAM J. Sci. Comput., 40 (5), 2018. + + See Also + -------- + :func:`hsvd_rank` + :func:`hsvd_rtol` + +`hsvd_rank(A: heat.core.dndarray.DNDarray, maxrank: int, compute_sv: bool = False, maxmergedim: int | None = None, safetyshift: int = 5, silent: bool = True) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, float] | Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray] | heat.core.dndarray.DNDarray` +: Hierarchical SVD (hSVD) with prescribed truncation rank `maxrank`. + If A = U diag(sigma) V^T is the true SVD of A, this routine computes an approximation for U[:,:maxrank] (and sigma[:maxrank], V[:,:maxrank]). + + The accuracy of this approximation depends on the structure of A ("low-rank" is best) and appropriate choice of parameters. + + One can expect a similar outcome from this routine as for sci-kit learn's TruncatedSVD (with `algorithm='randomized'`) although a different, determinstic algorithm is applied here. Hereby, the parameters `n_components` + and `n_oversamples` (sci-kit learn) roughly correspond to `maxrank` and `safetyshift` (see below). + + Parameters + ---------- + A : DNDarray + 2D-array (float32/64) of which the hSVD has to be computed. + maxrank : int + truncation rank. (This parameter corresponds to `n_components` in sci-kit learn's TruncatedSVD.) + compute_sv : bool, optional + compute_sv=True implies that also Sigma and V are computed and returned. The default is False. + maxmergedim : int, optional + maximal size of the concatenation matrices during the merging procedure. The default is None and results in an appropriate choice depending on the size of the local slices of A and maxrank. + Too small choices for this parameter will result in failure if the maximal size of the concatenation matrices does not allow to merge at least two matrices. Too large choices for this parameter can cause memory errors if the resulting merging problem becomes too large. + safetyshift : int, optional + Increases the actual truncation rank within the computations by a safety shift. The default is 5. (There is some similarity to `n_oversamples` in sci-kit learn's TruncatedSVD.) + silent : bool, optional + silent=False implies that some information on the computations are printed. The default is True. + + Returns + ------- + (Union[ Tuple[DNDarray, DNDarray, DNDarray, float], Tuple[DNDarray, DNDarray, DNDarray], DNDarray]) + if compute_sv=True: U, Sigma, V, a-posteriori error estimate for the reconstruction error ||A-U Sigma V^T ||_F / ||A||_F (computed according to [2] along the "true" merging tree). + if compute_sv=False: U, a-posteriori error estimate + + Notes + ----- + The size of the process local SVDs to be computed during merging is proportional to the non-split size of the input A and (maxrank + safetyshift). Therefore, conservative choice of maxrank and safetyshift is advised to avoid memory issues. + Note that, as sci-kit learn's randomized SVD, this routine is different from `numpy.linalg.svd` because not all singular values and vectors are computed + and even those computed may be inaccurate if the input matrix exhibts a unfavorable structure. + + See Also + -------- + :func:`hsvd` + :func:`hsvd_rtol` + + References + ---------- + [1] Iwen, Ong. A distributed and incremental SVD algorithm for agglomerative data analysis on large networks. SIAM J. Matrix Anal. Appl., 37(4), 2016. + [2] Himpe, Leibner, Rave. Hierarchical approximate proper orthogonal decomposition. SIAM J. Sci. Comput., 40 (5), 2018. + +`hsvd_rtol(A: heat.core.dndarray.DNDarray, rtol: float, compute_sv: bool = False, maxrank: int | None = None, maxmergedim: int | None = None, safetyshift: int = 5, no_of_merges: int | None = None, silent: bool = True) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, float] | Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray] | heat.core.dndarray.DNDarray` +: Hierchical SVD (hSVD) with prescribed upper bound on the relative reconstruction error. + If A = U diag(sigma) V^T is the true SVD of A, this routine computes an approximation for U[:,:r] (and sigma[:r], V[:,:r]) + such that the rel. reconstruction error ||A-U[:,:r] diag(sigma[:r]) V[:,:r]^T ||_F / ||A||_F does not exceed rtol. + + The accuracy of this approximation depends on the structure of A ("low-rank" is best) and appropriate choice of parameters. This routine is similar to `hsvd_rank` with the difference that + truncation is not performed after a fixed number (namly `maxrank` many) singular values but after such a number of singular values that suffice to capture a prescribed fraction of the amount of information + contained in the input data (`rtol`). + + Parameters + ---------- + A : DNDarray + 2D-array (float32/64) of which the hSVD has to be computed. + rtol : float + desired upper bound on the relative reconstruction error ||A-U Sigma V^T ||_F / ||A||_F. This upper bound is processed into 'local' + tolerances during the actual computations assuming the worst case scenario of a binary "merging tree"; therefore, the a-posteriori + error for the relative error using the true "merging tree" (see output) may be significantly smaller than rtol. + Prescription of maxrank or maxmergedim (disabled in default) can result in loss of desired precision, but can help to avoid memory issues. + compute_sv : bool, optional + compute_sv=True implies that also Sigma and V are computed and returned. The default is False. + no_of_merges : int, optional + Maximum number of processes to be merged at each step. If no further arguments are provided (see below), + this completely determines the "merging tree" and may cause memory issues. The default is None and results in a binary merging tree. + Note that no_of_merges dominates maxrank and maxmergedim in the sense that at most no_of_merges processes are merged + even if maxrank and maxmergedim would allow merging more processes. + maxrank : int, optional + maximal truncation rank. The default is None. + Setting at least one of maxrank and maxmergedim is recommended to avoid memory issues, but can result in loss of desired precision. + Setting only maxrank (and not maxmergedim) results in an appropriate default choice for maxmergedim depending on the size of the local slices of A and the value of maxrank. + maxmergedim : int, optional + maximal size of the concatenation matrices during the merging procedure. The default is None and results in an appropriate choice depending on the size of the local slices of A and maxrank. The default is None. + Too small choices for this parameter will result in failure if the maximal size of the concatenation matrices does not allow to merge at least two matrices. Too large choices for this parameter can cause memory errors if the resulting merging problem becomes too large. + Setting at least one of maxrank and maxmergedim is recommended to avoid memory issues, but can result in loss of desired precision. + Setting only maxmergedim (and not maxrank) results in an appropriate default choice for maxrank. + safetyshift : int, optional + Increases the actual truncation rank within the computations by a safety shift. The default is 5. + silent : bool, optional + silent=False implies that some information on the computations are printed. The default is True. + + Returns + ------- + (Union[ Tuple[DNDarray, DNDarray, DNDarray, float], Tuple[DNDarray, DNDarray, DNDarray], DNDarray]) + if compute_sv=True: U, Sigma, V, a-posteriori error estimate for the reconstruction error ||A-U Sigma V^T ||_F / ||A||_F (computed according to [2] along the "true" merging tree used in the computations). + if compute_sv=False: U, a-posteriori error estimate + + Notes + ----- + The maximum size of the process local SVDs to be computed during merging is proportional to the non-split size of the input A and (maxrank + safetyshift). Therefore, conservative choice of maxrank and safetyshift is advised to avoid memory issues. + For similar reasons, prescribing only rtol and the number of processes to be merged in each step (without specifying maxrank or maxmergedim) may result in memory issues. + Although prescribing maxrank is therefore strongly recommended to avoid memory issues, but may result in loss of desired precision (rtol). If this occures, a separate warning will be raised. + + Note that this routine is different from `numpy.linalg.svd` because not all singular values and vectors are computed and even those computed may be inaccurate if the input matrix exhibts a unfavorable structure. + + To avoid confusion, note that `rtol` in this routine does not have any similarity to `tol` in scikit learn's TruncatedSVD. + + See Also + -------- + :func:`hsvd` + :func:`hsvd_rank` + + References + ---------- + [1] Iwen, Ong. A distributed and incremental SVD algorithm for agglomerative data analysis on large networks. SIAM J. Matrix Anal. Appl., 37(4), 2016. + [2] Himpe, Leibner, Rave. Hierarchical approximate proper orthogonal decomposition. SIAM J. Sci. Comput., 40 (5), 2018. + +`isvd(new_data: heat.core.dndarray.DNDarray, U_old: heat.core.dndarray.DNDarray, S_old: heat.core.dndarray.DNDarray, V_old: heat.core.dndarray.DNDarray, maxrank: int | None = None) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Incremental SVD (iSVD) for the addition of new data to an existing SVD. + Given the the SVD of an "old" matrix, :math:`X_\textnormal{old} = `U_\textnormal{old} \cdot S_\textnormal{old} \cdot V_\textnormal{old}^T`, and additional columns :math:`N` (\"`new_data`\"), this routine computes + (a possibly approximate) SVD of the extended matrix :math:`X_\textnormal{new} = [ X_\textnormal{old} | N]`. + + Parameters + ---------- + new_data : DNDarray + 2D-array (float32/64) of columns that are added to the "old" SVD. It must hold `new_data.split != 1` if `U_old.split = 0`. + U_old : DNDarray + U-factor of the SVD of the "old" matrix, 2D-array (float32/64). It must hold `U_old.split != 0` if `new_data.split = 1`. + S_old : DNDarray + Sigma-factor of the SVD of the "old" matrix, 1D-array (float32/64) + V_old : DNDarray + V-factor of the SVD of the "old" matrix, 2D-array (float32/64) + maxrank : int, optional + truncation rank of the SVD of the extended matrix. The default is None, i.e., no bound on the maximal rank is imposed. + + Notes + ----- + Inexactness may arise due to truncation to maximal rank `maxrank` if rank of the data to be processed exceeds this rank. + If you set `maxrank` to a high number (or None) in order to avoid inexactness, you may encounter memory issues. + The implementation follows the approach described in Ref. [1], Sect. 2. + + References + ---------- + [1] Brand, M. (2006). Fast low-rank modifications of the thin singular value decomposition. Linear algebra and its applications, 415(1), 20-30. + +`rsvd(A: heat.core.dndarray.DNDarray, rank: int, n_oversamples: int = 10, power_iter: int = 0, qr_procs_to_merge: int = 2) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray] | Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Randomized SVD (rSVD) with prescribed truncation rank `rank`. + If :math:`A = U \operatorname{diag}(S) V^T` is the true SVD of A, this routine computes an approximation for U[:,:rank] (and S[:rank], V[:,:rank]). + + The accuracy of this approximation depends on the structure of A ("low-rank" is best) and appropriate choice of parameters. + + Parameters + ---------- + A : DNDarray + 2D-array (float32/64) of which the rSVD has to be computed. + rank : int + truncation rank. (This parameter corresponds to `n_components` in scikit-learn's TruncatedSVD.) + n_oversamples : int, optional + number of oversamples. The default is 10. + power_iter : int, optional + number of power iterations. The default is 0. + Choosing `power_iter > 0` can improve the accuracy of the SVD approximation in the case of slowly decaying singular values, but increases the computational cost. + qr_procs_to_merge : int, optional + number of processes to merge at each step of QR decomposition in the power iteration (if power_iter > 0). The default is 2. See the corresponding remarks for :func:`heat.linalg.qr() ` for more details. + + + Notes + ----- + Memory requirements: the SVD computation of a matrix of size (rank + n_oversamples) x (rank + n_oversamples) must fit into the memory of a single process. + The implementation follows Algorithm 4.4 (randomized range finder) and Algorithm 5.1 (direct SVD) in [1]. + + References + ---------- + [1] Halko, N., Martinsson, P. G., & Tropp, J. A. (2011). Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions. SIAM review, 53(2), 217-288. diff --git a/doc/api/heat/core/linalg/tests/index.md b/doc/api/heat/core/linalg/tests/index.md new file mode 100644 index 0000000000..abe48647fc --- /dev/null +++ b/doc/api/heat/core/linalg/tests/index.md @@ -0,0 +1,12 @@ +Module heat.core.linalg.tests +============================= + +Sub-modules +----------- +* heat.core.linalg.tests.test_basics +* heat.core.linalg.tests.test_eigh +* heat.core.linalg.tests.test_polar +* heat.core.linalg.tests.test_qr +* heat.core.linalg.tests.test_solver +* heat.core.linalg.tests.test_svd +* heat.core.linalg.tests.test_svdtools diff --git a/doc/api/heat/core/linalg/tests/test_basics.md b/doc/api/heat/core/linalg/tests/test_basics.md new file mode 100644 index 0000000000..4a5298f3d8 --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_basics.md @@ -0,0 +1,102 @@ +Module heat.core.linalg.tests.test_basics +========================================= + +Classes +------- + +`TestLinalgBasics(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_condest(self)` + : + + `test_cross(self)` + : + + `test_det(self)` + : + + `test_dot(self)` + : + + `test_estimate_largest_singularvalue(self)` + : + + `test_inv(self)` + : + + `test_matmul(self)` + : + + `test_matrix_norm(self)` + : + + `test_norm(self)` + : + + `test_outer(self)` + : + + `test_projection(self)` + : + + `test_trace(self)` + : + + `test_transpose(self)` + : + + `test_tril(self)` + : + + `test_triu(self)` + : + + `test_vdot(self)` + : + + `test_vecdot(self)` + : + + `test_vector_norm(self)` + : diff --git a/doc/api/heat/core/linalg/tests/test_eigh.md b/doc/api/heat/core/linalg/tests/test_eigh.md new file mode 100644 index 0000000000..506e31db73 --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_eigh.md @@ -0,0 +1,57 @@ +Module heat.core.linalg.tests.test_eigh +======================================= + +Classes +------- + +`TestEigh(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_eigh(self)` + : + + `test_eigh_catch_wrong_inputs(self)` + : + + `test_eigh_options(self)` + : diff --git a/doc/api/heat/core/linalg/tests/test_polar.md b/doc/api/heat/core/linalg/tests/test_polar.md new file mode 100644 index 0000000000..7913b80e18 --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_polar.md @@ -0,0 +1,57 @@ +Module heat.core.linalg.tests.test_polar +======================================== + +Classes +------- + +`TestZolopolar(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_catch_wrong_inputs(self)` + : + + `test_polar_split0(self)` + : + + `test_polar_split1(self)` + : diff --git a/doc/api/heat/core/linalg/tests/test_qr.md b/doc/api/heat/core/linalg/tests/test_qr.md new file mode 100644 index 0000000000..4ba49b1355 --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_qr.md @@ -0,0 +1,66 @@ +Module heat.core.linalg.tests.test_qr +===================================== + +Classes +------- + +`TestQR(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_batched_qr_split0(self)` + : + + `test_batched_qr_split1(self)` + : + + `test_batched_qr_splitNone(self)` + : + + `test_qr_split0(self)` + : + + `test_qr_split1orNone(self)` + : + + `test_wronginputs(self)` + : diff --git a/doc/api/heat/core/linalg/tests/test_solver.md b/doc/api/heat/core/linalg/tests/test_solver.md new file mode 100644 index 0000000000..3d99d58abe --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_solver.md @@ -0,0 +1,57 @@ +Module heat.core.linalg.tests.test_solver +========================================= + +Classes +------- + +`TestSolver(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_cg(self)` + : + + `test_lanczos(self)` + : + + `test_solve_triangular(self)` + : diff --git a/doc/api/heat/core/linalg/tests/test_svd.md b/doc/api/heat/core/linalg/tests/test_svd.md new file mode 100644 index 0000000000..ac2b31ac60 --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_svd.md @@ -0,0 +1,109 @@ +Module heat.core.linalg.tests.test_svd +====================================== + +Classes +------- + +`TestTallSkinnySVD(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_shortfat_split1(self)` + : + + `test_singvals_only(self)` + : + + `test_tallskinny_split0(self)` + : + + `test_wrong_inputs(self)` + : + +`TestZoloSVD(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_full_svd(self)` + : + + `test_options_full_svd(self)` + : diff --git a/doc/api/heat/core/linalg/tests/test_svdtools.md b/doc/api/heat/core/linalg/tests/test_svdtools.md new file mode 100644 index 0000000000..0c626e135d --- /dev/null +++ b/doc/api/heat/core/linalg/tests/test_svdtools.md @@ -0,0 +1,152 @@ +Module heat.core.linalg.tests.test_svdtools +=========================================== + +Classes +------- + +`TestHSVD(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_hsvd_rank_part1(self)` + : + + `test_hsvd_rank_part2(self)` + : + +`TestISVD(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_isvd(self)` + : + + `test_isvd_catch_wrong_inputs(self)` + : + +`TestRSVD(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_rsvd(self)` + : + + `test_rsvd_catch_wrong_inputs(self)` + : diff --git a/doc/api/heat/core/logical.md b/doc/api/heat/core/logical.md new file mode 100644 index 0000000000..a9225d9ef1 --- /dev/null +++ b/doc/api/heat/core/logical.md @@ -0,0 +1,285 @@ +Module heat.core.logical +======================== +Logical functions for the DNDarrays + +Functions +--------- + +`all(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int] | None = None, out: heat.core.dndarray.DNDarray | None = None, keepdims: bool = False) ‑> heat.core.dndarray.DNDarray | bool` +: Test whether all array elements along a given axis evaluate to ``True``. + A new boolean or :class:`~heat.core.dndarray.DNDarray` is returned unless out is specified, in which case a + reference to ``out`` is returned. + + Parameters + ---------- + x : DNDarray + Input array or object that can be converted to an array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which a logical AND reduction is performed. The default (``axis=None``) is to perform a + logical AND over all the dimensions of the input array. ``axis`` may be negative, in which case it counts + from the last to the first axis. + out : DNDarray, optional + Alternate output array in which to place the result. It must have the same shape as the expected output + and its type is preserved. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> x = ht.random.randn(4, 5) + >>> x + DNDarray([[ 0.7199, 1.3718, 1.5008, 0.3435, 1.2884], + [ 0.1532, -0.0968, 0.3739, 1.7843, 0.5614], + [ 1.1522, 1.9076, 1.7638, 0.4110, -0.2803], + [-0.5475, -0.0271, 0.8564, -1.5870, 1.3108]], dtype=ht.float32, device=cpu:0, split=None) + >>> y = x < 0.5 + >>> y + DNDarray([[False, False, False, True, False], + [ True, True, True, False, False], + [False, False, False, True, True], + [ True, True, False, True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(y) + DNDarray([False], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(y, axis=0) + DNDarray([False, False, False, False, False], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.all(x, axis=1) + DNDarray([True, True, True, True], dtype=ht.bool, device=cpu:0, split=None) + >>> out = ht.zeros(5) + >>> ht.all(y, axis=0, out=out) + DNDarray([False, False, False, False, False], dtype=ht.float32, device=cpu:0, split=None) + >>> out + DNDarray([False, False, False, False, False], dtype=ht.float32, device=cpu:0, split=None) + +`allclose(x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray, rtol: float = 1e-05, atol: float = 1e-08, equal_nan: bool = False) ‑> bool` +: Test whether two tensors are element-wise equal within a tolerance. Returns ``True`` if ``|x-y|<=atol+rtol*|y|`` + for all elements of ``x`` and ``y``, ``False`` otherwise + + Parameters + ---------- + x : DNDarray + First array to compare + y : DNDarray + Second array to compare + atol: float, optional + Absolute tolerance. + rtol: float, optional + Relative tolerance (with respect to ``y``). + equal_nan: bool, optional + Whether to compare NaN’s as equal. If ``True``, NaN’s in ``x`` will be considered equal to NaN’s in ``y`` in + the output array. + + Examples + -------- + >>> x = ht.float32([[2, 2], [2, 2]]) + >>> ht.allclose(x, x) + True + >>> y = ht.float32([[2.00005, 2.00005], [2.00005, 2.00005]]) + >>> ht.allclose(x, y) + False + >>> ht.allclose(x, y, atol=1e-04) + True + +`any(x, axis: int | None = None, out: heat.core.dndarray.DNDarray | None = None, keepdims: bool = False) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the result of the test whether any array elements along a + given axis evaluate to ``True``. + The returning array is one dimensional unless axis is not ``None``. + + Parameters + ---------- + x : DNDarray + Input tensor + axis : int, optional + Axis along which a logic OR reduction is performed. With ``axis=None``, the logical OR is performed over all + dimensions of the array. + out : DNDarray, optional + Alternative output tensor in which to place the result. It must have the same shape as the expected output. + The output is a array with ``datatype=bool``. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> x = ht.float32([[0.3, 0, 0.5]]) + >>> x.any() + DNDarray([True], dtype=ht.bool, device=cpu:0, split=None) + >>> x.any(axis=0) + DNDarray([ True, False, True], dtype=ht.bool, device=cpu:0, split=None) + >>> x.any(axis=1) + DNDarray([True], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.int32([[0, 0, 1], [0, 0, 0]]) + >>> res = ht.zeros(3, dtype=ht.bool) + >>> y.any(axis=0, out=res) + DNDarray([False, False, True], dtype=ht.bool, device=cpu:0, split=None) + >>> res + DNDarray([False, False, True], dtype=ht.bool, device=cpu:0, split=None) + +`isclose(x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray, rtol: float = 1e-05, atol: float = 1e-08, equal_nan: bool = False) ‑> heat.core.dndarray.DNDarray` +: Returns a boolean :class:`~heat.core.dndarray.DNDarray`, with elements ``True`` where ``a`` and ``b`` are equal + within the given tolerance. If both ``x`` and ``y`` are scalars, returns a single boolean value. + + Parameters + ---------- + x : DNDarray + Input array to compare. + y : DNDarray + Input array to compare. + rtol : float + The relative tolerance parameter. + atol : float + The absolute tolerance parameter. + equal_nan : bool + Whether to compare NaN’s as equal. If ``True``, NaN’s in x will be considered equal to NaN’s in y in the output + array. + +`isfinite(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Test element-wise for finiteness (not infinity or not Not a Number) and return result as a boolean + :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input tensor + + Examples + -------- + >>> ht.isfinite(ht.array([1, ht.inf, -ht.inf, ht.nan])) + DNDarray([ True, False, False, False], dtype=ht.bool, device=cpu:0, split=None) + +`isinf(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Test element-wise for positive or negative infinity and return result as a boolean + :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input tensor + + Examples + -------- + >>> ht.isinf(ht.array([1, ht.inf, -ht.inf, ht.nan])) + DNDarray([False, True, True, False], dtype=ht.bool, device=cpu:0, split=None) + +`isnan(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Test element-wise for NaN and return result as a boolean :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input tensor + + Examples + -------- + >>> ht.isnan(ht.array([1, ht.inf, -ht.inf, ht.nan])) + DNDarray([False, False, False, True], dtype=ht.bool, device=cpu:0, split=None) + +`isneginf(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Test if each element of `x` is negative infinite, return result as a boolean :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input tensor + out : DNDarray, optional + Alternate output array in which to place the result. It must have the same shape as the expected output + and its type is preserved. + + Examples + -------- + >>> ht.isnan(ht.array([1, ht.inf, -ht.inf, ht.nan])) + DNDarray([False, False, True, False], dtype=ht.bool, device=cpu:0, split=None) + +`isposinf(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None)` +: Test if each element of `x` is positive infinite, return result as a boolean :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input tensor + out : DNDarray, optional + Alternate output array in which to place the result. It must have the same shape as the expected output + and its type is preserved. + + Examples + -------- + >>> ht.isnan(ht.array([1, ht.inf, -ht.inf, ht.nan])) + DNDarray([False, True, False, False], dtype=ht.bool, device=cpu:0, split=None) + +`logical_and(x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Compute the truth value of ``x`` AND ``y`` element-wise. Returns a boolean :class:`~heat.core.dndarray.DNDarray` containing the truth value of ``x`` AND ``y`` element-wise. + + Parameters + ---------- + x : DNDarray + Input array of same shape + y : DNDarray + Input array of same shape + + Examples + -------- + >>> ht.logical_and(ht.array([True, False]), ht.array([False, False])) + DNDarray([False, False], dtype=ht.bool, device=cpu:0, split=None) + +`logical_not(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Computes the element-wise logical NOT of the given input :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input array + out : DNDarray, optional + Alternative output array in which to place the result. It must have the same shape as the expected output. + The output is a :class:`~heat.core.dndarray.DNDarray` with ``datatype=bool``. + + Examples + -------- + >>> ht.logical_not(ht.array([True, False])) + DNDarray([False, True], dtype=ht.bool, device=cpu:0, split=None) + +`logical_or(x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Returns boolean :class:`~heat.core.dndarray.DNDarray` containing the element-wise logical NOT of the given + input :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input array of same shape + y : DNDarray + Input array of same shape + + Examples + -------- + >>> ht.logical_or(ht.array([True, False]), ht.array([False, False])) + DNDarray([ True, False], dtype=ht.bool, device=cpu:0, split=None) + +`logical_xor(x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Computes the element-wise logical XOR of the given input :class:`~heat.core.dndarray.DNDarray`. + + Parameters + ---------- + x : DNDarray + Input array of same shape + y : DNDarray + Input array of same shape + + Examples + -------- + >>> ht.logical_xor(ht.array([True, False, True]), ht.array([True, False, False])) + DNDarray([False, False, True], dtype=ht.bool, device=cpu:0, split=None) + +`signbit(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Checks if signbit is set element-wise (less than zero). + + Parameters + ---------- + x : DNDarray + The input array. + out : DNDarray, optional + The output array. + + Examples + -------- + >>> a = ht.array([2, -1.3, 0]) + >>> ht.signbit(a) + DNDarray([False, True, False], dtype=ht.bool, device=cpu:0, split=None) diff --git a/doc/api/heat/core/manipulations.md b/doc/api/heat/core/manipulations.md new file mode 100644 index 0000000000..9a9e91b89c --- /dev/null +++ b/doc/api/heat/core/manipulations.md @@ -0,0 +1,1690 @@ +Module heat.core.manipulations +============================== +Manipulation operations for (potentially distributed) `DNDarray`s. + +Functions +--------- + +`balance(array: DNDarray, copy=False) ‑> heat.core.dndarray.DNDarray` +: Out of place balance function. More information on the meaning of balance can be found in + :func:`DNDarray.balance_() `. + + Parameters + ---------- + array : DNDarray + the DNDarray to be balanced + copy : bool, optional + if the DNDarray should be copied before being balanced. If false (default) this will balance + the original array and return that array. Otherwise (true), a balanced copy of the array + will be returned. + Default: False + +`broadcast_arrays(*arrays: DNDarray) ‑> List[heat.core.dndarray.DNDarray]` +: Broadcasts one or more arrays against one another. Returns the broadcasted arrays, distributed along the split dimension of the first array in the list. If the first array is not distributed, the output will not be distributed. + + Parameters + ---------- + arrays : DNDarray + An arbitrary number of to-be broadcasted ``DNDarray``s. + + Notes + ----- + Broadcasted arrays are a view of the original arrays if possible, otherwise a copy is made. + + Examples + -------- + >>> import heat as ht + >>> a = ht.ones((100, 10), split=0) + >>> b = ht.ones((10,), split=None) + >>> c = ht.ones((1, 10), split=1) + >>> d, e, f = ht.broadcast_arrays(a, b, c) + >>> d.shape + (100, 10) + >>> e.shape + (100, 10) + >>> f.shape + (100, 10) + >>> d.split + 0 + >>> e.split + 0 + >>> f.split + 0 + +`broadcast_to(x: DNDarray, shape: Tuple[int, ...]) ‑> heat.core.dndarray.DNDarray` +: Broadcasts an array to a specified shape. Returns a view of ``x`` if ``x`` is not distributed, otherwise it returns a broadcasted, distributed, load-balanced copy of ``x``. + + Parameters + ---------- + x : DNDarray + `DNDarray` to broadcast. + shape : Tuple[int, ...] + Array shape. Must be compatible with ``x``. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to PyTorch's broadcasting rules. + + Examples + -------- + >>> import heat as ht + >>> a = ht.arange(100, split=0) + >>> b = ht.broadcast_to(a, (10, 100)) + >>> b.shape + (10, 100) + >>> b.split + 1 + >>> c = ht.broadcast_to(a, (100, 10)) + ValueError: Shape mismatch: object cannot be broadcast to the given shape. Original shape: (100,), target shape: (100, 10) + +`collect(arr: DNDarray, target_rank: Optional[int] = 0) ‑> heat.core.dndarray.DNDarray` +: A function collecting a distributed DNDarray to one rank, chosen by the `target_rank` variable. + It is a specific case of the ``redistribute_`` method. + + Parameters + ---------- + arr : DNDarray + The DNDarray to be collected. + target_rank : int, optional + The rank to which the DNDarray will be collected. Default: 0. + + Raises + ------ + TypeError + If the target rank is not an integer. + ValueError + If the target rank is out of bounds. + + Examples + -------- + >>> st = ht.ones((50, 81, 67), split=2) + >>> print(st.lshape) + [0/2] (50, 81, 23) + [1/2] (50, 81, 22) + [2/2] (50, 81, 22) + >>> collected_st = collect(st) + >>> print(collected_st) + [0/2] (50, 81, 67) + [1/2] (50, 81, 0) + [2/2] (50, 81, 0) + >>> collected_st = collect(collected_st, 1) + >>> print(st.lshape) + [0/2] (50, 81, 0) + [1/2] (50, 81, 67) + [2/2] (50, 81, 0) + +`column_stack(arrays: Sequence[DNDarray, ...]) ‑> DNDarray` +: Stack 1-D or 2-D `DNDarray`s as columns into a 2-D `DNDarray`. + If the input arrays are 1-D, they will be stacked as columns. If they are 2-D, + they will be concatenated along the second axis. + + Parameters + ---------- + arrays : Sequence[DNDarray, ...] + Sequence of `DNDarray`s. + + Raises + ------ + ValueError + If arrays have more than 2 dimensions + + Notes + ----- + All `DNDarray`s in the sequence must have the same number of rows. + All `DNDarray`s must be split along the same axis! Note that distributed + 1-D arrays (`split = 0`) by default will be transposed into distributed + column arrays with `split == 1`. + + See Also + -------- + :func:`concatenate` + :func:`hstack` + :func:`row_stack` + :func:`stack` + :func:`vstack` + + Examples + -------- + >>> # 1-D tensors + >>> a = ht.array([1, 2, 3]) + >>> b = ht.array([2, 3, 4]) + >>> ht.column_stack((a, b)).larray + tensor([[1, 2], + [2, 3], + [3, 4]]) + >>> # 1-D and 2-D tensors + >>> a = ht.array([1, 2, 3]) + >>> b = ht.array([[2, 5], [3, 6], [4, 7]]) + >>> c = ht.array([[7, 10], [8, 11], [9, 12]]) + >>> ht.column_stack((a, b, c)).larray + tensor([[ 1, 2, 5, 7, 10], + [ 2, 3, 6, 8, 11], + [ 3, 4, 7, 9, 12]]) + >>> # distributed DNDarrays, 3 processes + >>> a = ht.arange(10, split=0).reshape((5, 2)) + >>> b = ht.arange(5, 20, split=0).reshape((5, 3)) + >>> c = ht.arange(20, 40, split=0).reshape((5, 4)) + >>> ht_column_stack((a, b, c)).larray + [0/2] tensor([[ 0, 1, 5, 6, 7, 20, 21, 22, 23], + [0/2] [ 2, 3, 8, 9, 10, 24, 25, 26, 27]], dtype=torch.int32) + [1/2] tensor([[ 4, 5, 11, 12, 13, 28, 29, 30, 31], + [1/2] [ 6, 7, 14, 15, 16, 32, 33, 34, 35]], dtype=torch.int32) + [2/2] tensor([[ 8, 9, 17, 18, 19, 36, 37, 38, 39]], dtype=torch.int32) + >>> # distributed 1-D and 2-D DNDarrays, 3 processes + >>> a = ht.arange(5, split=0) + >>> b = ht.arange(5, 20, split=1).reshape((5, 3)) + >>> ht_column_stack((a, b)).larray + [0/2] tensor([[ 0, 5], + [0/2] [ 1, 8], + [0/2] [ 2, 11], + [0/2] [ 3, 14], + [0/2] [ 4, 17]], dtype=torch.int32) + [1/2] tensor([[ 6], + [1/2] [ 9], + [1/2] [12], + [1/2] [15], + [1/2] [18]], dtype=torch.int32) + [2/2] tensor([[ 7], + [2/2] [10], + [2/2] [13], + [2/2] [16], + [2/2] [19]], dtype=torch.int32) + +`concatenate(arrays: Sequence[DNDarray, ...], axis: int = 0) ‑> DNDarray` +: Join 2 or more `DNDarrays` along an existing axis. + + Parameters + ---------- + arrays: Sequence[DNDarray, ...] + The arrays must have the same shape, except in the dimension corresponding to axis. + axis: int, optional + The axis along which the arrays will be joined (default is 0). + + Raises + ------ + RuntimeError + If the concatenated :class:`~heat.core.dndarray.DNDarray` meta information, e.g. `split` or `comm`, does not match. + TypeError + If the passed parameters are not of correct type. + ValueError + If the number of passed arrays is less than two or their shapes do not match. + + Examples + -------- + >>> x = ht.zeros((3, 5), split=None) + [0/1] tensor([[0., 0., 0., 0., 0.], + [0/1] [0., 0., 0., 0., 0.], + [0/1] [0., 0., 0., 0., 0.]]) + [1/1] tensor([[0., 0., 0., 0., 0.], + [1/1] [0., 0., 0., 0., 0.], + [1/1] [0., 0., 0., 0., 0.]]) + >>> y = ht.ones((3, 6), split=0) + [0/1] tensor([[1., 1., 1., 1., 1., 1.], + [0/1] [1., 1., 1., 1., 1., 1.]]) + [1/1] tensor([[1., 1., 1., 1., 1., 1.]]) + >>> ht.concatenate((x, y), axis=1) + [0/1] tensor([[0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.], + [0/1] [0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.]]) + [1/1] tensor([[0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.]]) + >>> x = ht.zeros((4, 5), split=1) + [0/1] tensor([[0., 0., 0.], + [0/1] [0., 0., 0.], + [0/1] [0., 0., 0.], + [0/1] [0., 0., 0.]]) + [1/1] tensor([[0., 0.], + [1/1] [0., 0.], + [1/1] [0., 0.], + [1/1] [0., 0.]]) + >>> y = ht.ones((3, 5), split=1) + [0/1] tensor([[1., 1., 1.], + [0/1] [1., 1., 1.], + [0/1] [1., 1., 1.]]) + [1/1] tensor([[1., 1.], + [1/1] [1., 1.], + [1/1] [1., 1.]]) + >>> ht.concatenate((x, y), axis=0) + [0/1] tensor([[0., 0., 0.], + [0/1] [0., 0., 0.], + [0/1] [0., 0., 0.], + [0/1] [0., 0., 0.], + [0/1] [1., 1., 1.], + [0/1] [1., 1., 1.], + [0/1] [1., 1., 1.]]) + [1/1] tensor([[0., 0.], + [1/1] [0., 0.], + [1/1] [0., 0.], + [1/1] [0., 0.], + [1/1] [1., 1.], + [1/1] [1., 1.], + [1/1] [1., 1.]]) + +`diag(a: DNDarray, offset: int = 0) ‑> heat.core.dndarray.DNDarray` +: Extract a diagonal or construct a diagonal array. + See the documentation for :func:`diagonal` for more information about extracting the diagonal. + + Parameters + ---------- + a: DNDarray + The array holding data for creating a diagonal array or extracting a diagonal. + If `a` is a 1-dimensional array, a diagonal 2d-array will be returned. + If `a` is a n-dimensional array with n > 1 the diagonal entries will be returned in an n-1 dimensional array. + offset: int, optional + The offset from the main diagonal. + Offset greater than zero means above the main diagonal, smaller than zero is below the main diagonal. + + See Also + -------- + :func:`diagonal` + + Examples + -------- + >>> import heat as ht + >>> a = ht.array([1, 2]) + >>> ht.diag(a) + DNDarray([[1, 0], + [0, 2]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.diag(a, offset=1) + DNDarray([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.equal(ht.diag(ht.diag(a)), a) + True + >>> a = ht.array([[1, 2], [3, 4]]) + >>> ht.diag(a) + DNDarray([1, 4], dtype=ht.int64, device=cpu:0, split=None) + +`diagonal(a: DNDarray, offset: int = 0, dim1: int = 0, dim2: int = 1) ‑> heat.core.dndarray.DNDarray` +: Extract a diagonal of an n-dimensional array with n > 1. + The returned array will be of dimension n-1. + + Parameters + ---------- + a: DNDarray + The array of which the diagonal should be extracted. + offset: int, optional + The offset from the main diagonal. + Offset greater than zero means above the main diagonal, smaller than zero is below the main diagonal. + Default is 0 which means the main diagonal will be selected. + dim1: int, optional + First dimension with respect to which to take the diagonal. + dim2: int, optional + Second dimension with respect to which to take the diagonal. + + Examples + -------- + >>> import heat as ht + >>> a = ht.array([[1, 2], [3, 4]]) + >>> ht.diagonal(a) + DNDarray([1, 4], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.diagonal(a, offset=1) + DNDarray([2], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.diagonal(a, offset=-1) + DNDarray([3], dtype=ht.int64, device=cpu:0, split=None) + >>> a = ht.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) + >>> ht.diagonal(a) + DNDarray([[0, 6], + [1, 7]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.diagonal(a, dim2=2) + DNDarray([[0, 5], + [2, 7]], dtype=ht.int64, device=cpu:0, split=None) + +`dsplit(x: Sequence[DNDarray, ...], indices_or_sections: Iterable) ‑> List[DNDarray, ...]` +: Split array into multiple sub-DNDarrays along the 3rd axis (depth). + Returns a list of sub-DNDarrays as copies of parts of `x`. + + Parameters + ---------- + x : DNDarray + DNDArray to be divided into sub-DNDarrays. + indices_or_sections : int or 1-dimensional array_like (i.e. undistributed DNDarray, list or tuple) + If `indices_or_sections` is an integer, N, the DNDarray will be divided into N equal DNDarrays along the 3rd axis. + If such a split is not possible, an error is raised. + If `indices_or_sections` is a 1-D DNDarray of sorted integers, the entries indicate where along the 3rd axis + the array is split. + If an index exceeds the dimension of the array along the 3rd axis, an empty sub-DNDarray is returned correspondingly. + + Raises + ------ + ValueError + If `indices_or_sections` is given as integer, but a split does not result in equal division. + + Notes + ----- + Please refer to the split documentation. dsplit is equivalent to split with axis=2, + the array is always split along the third axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + :func:`split` + :func:`hsplit` + :func:`vsplit` + + Examples + -------- + >>> x = ht.array(24).reshape((2, 3, 4)) + >>> ht.dsplit(x, 2) + [DNDarray([[[ 0, 1], + [ 4, 5], + [ 8, 9]], + [[12, 13], + [16, 17], + [20, 21]]]), + DNDarray([[[ 2, 3], + [ 6, 7], + [10, 11]], + [[14, 15], + [18, 19], + [22, 23]]])] + >>> ht.dsplit(x, [1, 4]) + [DNDarray([[[ 0], + [ 4], + [ 8]], + [[12], + [16], + [20]]]), + DNDarray([[[ 1, 2, 3], + [ 5, 6, 7], + [ 9, 10, 11]], + [[13, 14, 15], + [17, 18, 19], + [21, 22, 23]]]), + DNDarray([])] + +`expand_dims(a: DNDarray, axis: int) ‑> heat.core.dndarray.DNDarray` +: Expand the shape of an array. + Insert a new axis that will appear at the axis position in the expanded array shape. + + Parameters + ---------- + a : DNDarray + Input array to be expanded. + axis : int + Position in the expanded axes where the new axis is placed. + + Raises + ------ + ValueError + If `axis` is not consistent with the available dimensions. + + Examples + -------- + >>> x = ht.array([1, 2]) + >>> x.shape + (2,) + >>> y = ht.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + >>> y = ht.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + +`flatten(a: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Flattens an array into one dimension. + + Parameters + ---------- + a : DNDarray + Array to collapse + + Warning + ---------- + If `a.split>0`, the array must be redistributed along the first axis (see :func:`resplit`). + + + See Also + -------- + :func:`ravel` + + Examples + -------- + >>> a = ht.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + >>> ht.flatten(a) + DNDarray([1, 2, 3, 4, 5, 6, 7, 8], dtype=ht.int64, device=cpu:0, split=None) + +`flip(a: DNDarray, axis: Union[int, Tuple[int, ...]] = None) ‑> heat.core.dndarray.DNDarray` +: Reverse the order of elements in an array along the given axis. + The shape of the array is preserved, but the elements are reordered. + + Parameters + ---------- + a: DNDarray + Input array to be flipped + axis: int or Tuple[int,...] + A list of axes to be flipped + + See Also + -------- + :func:`fliplr` + :func:`flipud` + + Examples + -------- + >>> a = ht.array([[0, 1], [2, 3]]) + >>> ht.flip(a, [0]) + DNDarray([[2, 3], + [0, 1]], dtype=ht.int64, device=cpu:0, split=None) + >>> b = ht.array([[0, 1, 2], [3, 4, 5]], split=1) + >>> ht.flip(a, [0, 1]) + (1/2) tensor([5,4,3]) + (2/2) tensor([2,1,0]) + +`fliplr(a: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Flip array in the left/right direction. If `a.ndim>2`, flip along dimension 1. + + Parameters + ---------- + a: DNDarray + Input array to be flipped, must be at least 2-D + + See Also + -------- + :func:`flip` + :func:`flipud` + + Examples + -------- + >>> a = ht.array([[0, 1], [2, 3]]) + >>> ht.fliplr(a) + DNDarray([[1, 0], + [3, 2]], dtype=ht.int64, device=cpu:0, split=None) + >>> b = ht.array([[0, 1, 2], [3, 4, 5]], split=0) + >>> ht.fliplr(b) + (1/2) tensor([[2, 1, 0]]) + (2/2) tensor([[5, 4, 3]]) + +`flipud(a: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Flip array in the up/down direction. + + Parameters + ---------- + a: DNDarray + Input array to be flipped + + See Also + -------- + :func:`flip` + :func:`fliplr` + + Examples + -------- + >>> a = ht.array([[0, 1], [2, 3]]) + >>> ht.flipud(a) + DNDarray([[2, 3], + [0, 1]], dtype=ht.int64, device=cpu:0, split=None)) + >>> b = ht.array([[0, 1, 2], [3, 4, 5]], split=0) + >>> ht.flipud(b) + (1/2) tensor([3,4,5]) + (2/2) tensor([0,1,2]) + +`hsplit(x: DNDarray, indices_or_sections: Iterable) ‑> List[DNDarray, ...]` +: Split array into multiple sub-DNDarrays along the 2nd axis (horizontally/column-wise). + Returns a list of sub-DNDarrays as copies of parts of `x`. + + Parameters + ---------- + x : DNDarray + DNDArray to be divided into sub-DNDarrays. + indices_or_sections : int or 1-dimensional array_like (i.e. undistributed DNDarray, list or tuple) + If `indices_or_sections` is an integer, N, the DNDarray will be divided into N equal DNDarrays along the 2nd axis. + If such a split is not possible, an error is raised. + If `indices_or_sections` is a 1-D DNDarray of sorted integers, the entries indicate where along the 2nd axis + the array is split. + If an index exceeds the dimension of the array along the 2nd axis, an empty sub-DNDarray is returned correspondingly. + + Raises + ------ + ValueError + If `indices_or_sections` is given as integer, but a split does not result in equal division. + + Notes + ----- + Please refer to the split documentation. hsplit is nearly equivalent to split with axis=1, + the array is always split along the second axis though, in contrary to split, regardless of the array dimension. + + See Also + -------- + :func:`split` + :func:`dsplit` + :func:`vsplit` + + Examples + -------- + >>> x = ht.arange(24).reshape((2, 4, 3)) + >>> ht.hsplit(x, 2) + [DNDarray([[[ 0, 1, 2], + [ 3, 4, 5]], + [[12, 13, 14], + [15, 16, 17]]]), + DNDarray([[[ 6, 7, 8], + [ 9, 10, 11]], + [[18, 19, 20], + [21, 22, 23]]])] + >>> ht.hsplit(x, [1, 3]) + [DNDarray([[[ 0, 1, 2]], + [[12, 13, 14]]]), + DNDarray([[[ 3, 4, 5], + [ 6, 7, 8]], + [[15, 16, 17], + [18, 19, 20]]]), + DNDarray([[[ 9, 10, 11]], + [[21, 22, 23]]])] + +`hstack(arrays: Sequence[DNDarray, ...]) ‑> DNDarray` +: Stack arrays in sequence horizontally (column-wise). + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. + + Parameters + ---------- + arrays : Sequence[DNDarray, ...] + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. + + See Also + -------- + :func:`concatenate` + :func:`stack` + :func:`vstack` + :func:`column_stack` + :func:`row_stack` + + Examples + -------- + >>> a = ht.array((1, 2, 3)) + >>> b = ht.array((2, 3, 4)) + >>> ht.hstack((a, b)).larray + [0/1] tensor([1, 2, 3, 2, 3, 4]) + [1/1] tensor([1, 2, 3, 2, 3, 4]) + >>> a = ht.array((1, 2, 3), split=0) + >>> b = ht.array((2, 3, 4), split=0) + >>> ht.hstack((a, b)).larray + [0/1] tensor([1, 2, 3]) + [1/1] tensor([2, 3, 4]) + >>> a = ht.array([[1], [2], [3]], split=0) + >>> b = ht.array([[2], [3], [4]], split=0) + >>> ht.hstack((a, b)).larray + [0/1] tensor([[1, 2], + [0/1] [2, 3]]) + [1/1] tensor([[3, 4]]) + +`moveaxis(x: DNDarray, source: Union[int, Sequence[int]], destination: Union[int, Sequence[int]]) ‑> heat.core.dndarray.DNDarray` +: Moves axes at the positions in `source` to new positions. + + Parameters + ---------- + x : DNDarray + The input array. + source : int or Sequence[int, ...] + Original positions of the axes to move. These must be unique. + destination : int or Sequence[int, ...] + Destination positions for each of the original axes. These must also be unique. + + See Also + -------- + ~heat.core.linalg.basics.transpose + Permute the dimensions of an array. + + Raises + ------ + TypeError + If `source` or `destination` are not ints, lists or tuples. + ValueError + If `source` and `destination` do not have the same number of elements. + + + Examples + -------- + >>> x = ht.zeros((3, 4, 5)) + >>> ht.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> ht.moveaxis(x, -1, 0).shape + (5, 3, 4) + +`pad(array: DNDarray, pad_width: Union[int, Sequence[Sequence[int, int], ...]], mode: str = 'constant', constant_values: int = 0) ‑> DNDarray` +: Pads tensor with a specific value (default=0). + (Not all dimensions supported) + + Parameters + ---------- + array : DNDarray + Array to be padded + pad_width: Union[int, Sequence[Sequence[int, int], ...]] + Number of values padded to the edges of each axis. ((before_1, after_1),...(before_N, after_N)) unique pad widths for each axis. + Determines how many elements are padded along which dimension. + + Shortcuts: + + - ((before, after),) or (before, after): before and after pad width for each axis. + - (pad_width,) or int: before = after = pad width for all axes. + + Therefore: + + - pad last dimension: (padding_left, padding_right) + - pad last 2 dimensions: ((padding_top, padding_bottom),(padding_left, padding_right)) + - pad last 3 dimensions: ((padding_front, padding_back),(padding_top, padding_bottom),(paddling_left, padding_right) ) + - ... (same pattern) + mode : str, optional + - 'constant' (default): Pads the input tensor boundaries with a constant value. This is available for arbitrary dimensions + constant_values: Union[int, float, Sequence[Sequence[int,int], ...], Sequence[Sequence[float,float], ...]] + Number or tuple of 2-element-sequences (containing numbers), optional (default=0) + The fill values for each axis (1 tuple per axis). + ((before_1, after_1), ... (before_N, after_N)) unique pad values for each axis. + + Shortcuts: + + - ((before, after),) or (before, after): before and after padding values for each axis. + - (value,) or int: before = after = padding value for all axes. + + + Notes + ----- + This function follows the principle of datatype integrity. + Therefore, an array can only be padded with values of the same datatype. + All values that violate this rule are implicitly cast to the datatype of the `DNDarray`. + + Examples + -------- + >>> a = torch.arange(2 * 3 * 4).reshape(2, 3, 4) + >>> b = ht.array(a, split=0) + Pad last dimension + >>> c = ht.pad(b, (2, 1), constant_values=1) + tensor([[[ 1, 1, 0, 1, 2, 3, 1], + [ 1, 1, 4, 5, 6, 7, 1], + [ 1, 1, 8, 9, 10, 11, 1]], + [[ 1, 1, 12, 13, 14, 15, 1], + [ 1, 1, 16, 17, 18, 19, 1], + [ 1, 1, 20, 21, 22, 23, 1]]]) + Pad last 2 dimensions + >>> d = ht.pad(b, [(1, 0), (2, 1)]) + DNDarray([[[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 1, 2, 3, 0], + [ 0, 0, 4, 5, 6, 7, 0], + [ 0, 0, 8, 9, 10, 11, 0]], + + [[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 12, 13, 14, 15, 0], + [ 0, 0, 16, 17, 18, 19, 0], + [ 0, 0, 20, 21, 22, 23, 0]]], dtype=ht.int64, device=cpu:0, split=0) + Pad last 3 dimensions + >>> e = ht.pad(b, ((2, 1), [1, 0], (2, 1))) + DNDarray([[[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0]], + + [[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0]], + + [[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 1, 2, 3, 0], + [ 0, 0, 4, 5, 6, 7, 0], + [ 0, 0, 8, 9, 10, 11, 0]], + + [[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 12, 13, 14, 15, 0], + [ 0, 0, 16, 17, 18, 19, 0], + [ 0, 0, 20, 21, 22, 23, 0]], + + [[ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0]]], dtype=ht.int64, device=cpu:0, split=0) + +`ravel(a: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Return a flattened view of `a` if possible. A copy is returned otherwise. + + Parameters + ---------- + a : DNDarray + array to collapse + + Notes + ----- + Returning a view of distributed data is only possible when `split != 0`. The returned DNDarray may be unbalanced. + Otherwise, data must be communicated among processes, and `ravel` falls back to `flatten`. + + See Also + -------- + :func:`flatten` + + Examples + -------- + >>> a = ht.ones((2, 3), split=0) + >>> b = ht.ravel(a) + >>> a[0, 0] = 4 + >>> b + DNDarray([4., 1., 1., 1., 1., 1.], dtype=ht.float32, device=cpu:0, split=0) + +`redistribute(arr: DNDarray, lshape_map: torch.Tensor = None, target_map: torch.Tensor = None) ‑> heat.core.dndarray.DNDarray` +: Redistributes the data of the :class:`DNDarray` *along the split axis* to match the given target map. + This function does not modify the non-split dimensions of the ``DNDarray``. + This is an abstraction and extension of the balance function. + + Parameters + ---------- + arr: DNDarray + DNDarray to redistribute + lshape_map : torch.Tensor, optional + The current lshape of processes. + Units are ``[rank, lshape]``. + target_map : torch.Tensor, optional + The desired distribution across the processes. + Units are ``[rank, target lshape]``. + Note: the only important parts of the target map are the values along the split axis, + values which are not along this axis are there to mimic the shape of the ``lshape_map``. + + Examples + -------- + >>> st = ht.ones((50, 81, 67), split=2) + >>> target_map = torch.zeros((st.comm.size, 3), dtype=torch.int64) + >>> target_map[0, 2] = 67 + >>> print(target_map) + [0/2] tensor([[ 0, 0, 67], + [0/2] [ 0, 0, 0], + [0/2] [ 0, 0, 0]], dtype=torch.int32) + [1/2] tensor([[ 0, 0, 67], + [1/2] [ 0, 0, 0], + [1/2] [ 0, 0, 0]], dtype=torch.int32) + [2/2] tensor([[ 0, 0, 67], + [2/2] [ 0, 0, 0], + [2/2] [ 0, 0, 0]], dtype=torch.int32) + >>> print(st.lshape) + [0/2] (50, 81, 23) + [1/2] (50, 81, 22) + [2/2] (50, 81, 22) + >>> ht.redistribute_(st, target_map=target_map) + >>> print(st.lshape) + [0/2] (50, 81, 67) + [1/2] (50, 81, 0) + [2/2] (50, 81, 0) + +`repeat(a: Iterable, repeats: Iterable, axis: Optional[int] = None) ‑> heat.core.dndarray.DNDarray` +: Creates a new `DNDarray` by repeating elements of array `a`. The output has + the same shape as `a`, except along the given axis. If axis is None, this + function returns a flattened `DNDarray`. + + Parameters + ---------- + a : array_like (i.e. int, float, or tuple/ list/ np.ndarray/ ht.DNDarray of ints/floats) + Array containing the elements to be repeated. + repeats : int, or 1-dimensional/ DNDarray/ np.ndarray/ list/ tuple of ints + The number of repetitions for each element, indicates broadcast if int or array_like of 1 element. + In this case, the given value is broadcasted to fit the shape of the given axis. + Otherwise, its length must be the same as a in the specified axis. To put it differently, the + amount of repetitions has to be determined for each element in the corresponding dimension + (or in all dimensions if axis is None). + axis: int, optional + The axis along which to repeat values. By default, use the flattened input array and return a flat output + array. + + Examples + -------- + >>> ht.repeat(3, 4) + DNDarray([3, 3, 3, 3]) + + >>> x = ht.array([[1, 2], [3, 4]]) + >>> ht.repeat(x, 2) + DNDarray([1, 1, 2, 2, 3, 3, 4, 4]) + + >>> x = ht.array([[1, 2], [3, 4]]) + >>> ht.repeat(x, [0, 1, 2, 0]) + DNDarray([2, 3, 3]) + + >>> ht.repeat(x, [1, 2], axis=0) + DNDarray([[1, 2], + [3, 4], + [3, 4]]) + +`reshape(a: DNDarray, *shape: Union[int, Tuple[int, ...]], **kwargs) ‑> heat.core.dndarray.DNDarray` +: Returns an array with the same data and number of elements as `a`, but with the specified shape. + + Parameters + ---------- + a : DNDarray + The input array + shape : Union[int, Tuple[int,...]] + Shape of the new array. Must be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. + new_split : int, optional + The distribution axis of the reshaped array. If `new_split` is not provided, the reshaped array will have: + - the same split axis as the input array, if the original dimensionality is unchanged; + - split axis 0, if the number of dimensions is modified by reshaping. + **kwargs + Extra keyword arguments. + + Raises + ------ + ValueError + If the number of elements in the new shape is inconsistent with the input data. + + Notes + ----- + `reshape()` might require significant communication among processes. Communication is minimized if the input array is distributed along axis 0, i.e. `a.split == 0`. + + See Also + -------- + :func:`ravel` + + Examples + -------- + >>> a = ht.zeros((3, 4)) + >>> ht.reshape(a, (4, 3)) + DNDarray([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.linspace(0, 14, 8, split=0) + >>> ht.reshape(a, (2, 4)) + (1/2) tensor([[0., 2., 4., 6.]]) + (2/2) tensor([[ 8., 10., 12., 14.]]) + # 3-dim array, distributed along axis 1 + >>> a = ht.random.rand(2, 3, 4, split=1) + >>> a + DNDarray([[[0.5525, 0.5434, 0.9477, 0.9503], + [0.4165, 0.3924, 0.3310, 0.3935], + [0.1008, 0.1750, 0.9030, 0.8579]], + + [[0.0680, 0.4944, 0.4114, 0.6669], + [0.6423, 0.2625, 0.5413, 0.2225], + [0.0197, 0.5079, 0.4739, 0.4387]]], dtype=ht.float32, device=cpu:0, split=1) + >>> a.reshape(-1, 3) # reshape to 2-dim array: split axis will be set to 0 + DNDarray([[0.5525, 0.5434, 0.9477], + [0.9503, 0.4165, 0.3924], + [0.3310, 0.3935, 0.1008], + [0.1750, 0.9030, 0.8579], + [0.0680, 0.4944, 0.4114], + [0.6669, 0.6423, 0.2625], + [0.5413, 0.2225, 0.0197], + [0.5079, 0.4739, 0.4387]], dtype=ht.float32, device=cpu:0, split=0) + >>> a.reshape(2, 3, 2, 2, new_split=1) # reshape to 4-dim array, specify distribution axis + DNDarray([[[[0.5525, 0.5434], + [0.9477, 0.9503]], + + [[0.4165, 0.3924], + [0.3310, 0.3935]], + + [[0.1008, 0.1750], + [0.9030, 0.8579]]], + + + [[[0.0680, 0.4944], + [0.4114, 0.6669]], + + [[0.6423, 0.2625], + [0.5413, 0.2225]], + + [[0.0197, 0.5079], + [0.4739, 0.4387]]]], dtype=ht.float32, device=cpu:0, split=1) + +`resplit(arr: DNDarray, axis: Optional[int] = None) ‑> heat.core.dndarray.DNDarray` +: Out-of-place redistribution of the content of the `DNDarray`. Allows to "unsplit" (i.e. gather) all values from all + nodes, as well as to define a new axis along which the array is split without changes to the values. + + Parameters + ---------- + arr : DNDarray + The array from which to resplit + axis : int or None + The new split axis, `None` denotes gathering, an int will set the new split axis + + Warning + ---------- + This operation might involve a significant communication overhead. Use it sparingly and preferably for + small arrays. + + Examples + -------- + >>> a = ht.zeros( + ... ( + ... 4, + ... 5, + ... ), + ... split=0, + ... ) + >>> a.lshape + (0/2) (2, 5) + (1/2) (2, 5) + >>> b = resplit(a, None) + >>> b.split + None + >>> b.lshape + (0/2) (4, 5) + (1/2) (4, 5) + >>> a = ht.zeros( + ... ( + ... 4, + ... 5, + ... ), + ... split=0, + ... ) + >>> a.lshape + (0/2) (2, 5) + (1/2) (2, 5) + >>> b = resplit(a, 1) + >>> b.split + 1 + >>> b.lshape + (0/2) (4, 3) + (1/2) (4, 2) + +`roll(x: DNDarray, shift: Union[int, Tuple[int]], axis: Optional[Union[int, Tuple[int]]] = None) ‑> heat.core.dndarray.DNDarray` +: Rolls array elements along a specified axis. Array elements that roll beyond the last position are re-introduced at the first position. + Array elements that roll beyond the first position are re-introduced at the last position. + + Parameters + ---------- + x : DNDarray + input array + shift : Union[int, Tuple[int, ...]] + number of places by which the elements are shifted. If 'shift' is a tuple, then 'axis' must be a tuple of the same size, and each of + the given axes is shifted by the corrresponding element in 'shift'. If 'shift' is an `int` and 'axis' a `tuple`, then the same shift + is used for all specified axes. + axis : Optional[Union[int, Tuple[int, ...]]] + axis (or axes) along which elements to shift. If 'axis' is `None`, the array is flattened, shifted, and then restored to its original shape. + Default: `None`. + + Raises + ------ + TypeError + If 'shift' or 'axis' is not of type `int`, `list` or `tuple`. + ValueError + If 'shift' and 'axis' are tuples with different sizes. + + Examples + -------- + >>> a = ht.arange(20).reshape((4, 5)) + >>> a + DNDarray([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.roll(a, 1) + DNDarray([[19, 0, 1, 2, 3], + [ 4, 5, 6, 7, 8], + [ 9, 10, 11, 12, 13], + [14, 15, 16, 17, 18]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.roll(a, -1, 0) + DNDarray([[ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19], + [ 0, 1, 2, 3, 4]], dtype=ht.int32, device=cpu:0, split=None) + +`rot90(m: DNDarray, k: int = 1, axes: Sequence[int, int] = (0, 1)) ‑> DNDarray` +: Rotate an array by 90 degrees in the plane specified by `axes`. + Rotation direction is from the first towards the second axis. + + Parameters + ---------- + m : DNDarray + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes: (2,) Sequence[int, int] + The array is rotated in the plane defined by the axes. + Axes must be different. + + Raises + ------ + ValueError + If `len(axis)!=2`. + ValueError + If the axes are the same. + ValueError + If axes are out of range. + + Notes + ----- + - ``rot90(m, k=1, axes=(1,0))`` is the reverse of ``rot90(m, k=1, axes=(0,1))``. + + - ``rot90(m, k=1, axes=(1,0))`` is equivalent to ``rot90(m, k=-1, axes=(0,1))``. + + May change the split axis on distributed tensors. + + Examples + -------- + >>> m = ht.array([[1, 2], [3, 4]], dtype=ht.int) + >>> m + DNDarray([[1, 2], + [3, 4]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.rot90(m) + DNDarray([[2, 4], + [1, 3]], dtype=ht.int32, device=cpu:0, split=None) + >>> ht.rot90(m, 2) + DNDarray([[4, 3], + [2, 1]], dtype=ht.int32, device=cpu:0, split=None) + >>> m = ht.arange(8).reshape((2, 2, 2)) + >>> ht.rot90(m, 1, (1, 2)) + DNDarray([[[1, 3], + [0, 2]], + + [[5, 7], + [4, 6]]], dtype=ht.int32, device=cpu:0, split=None) + +`row_stack(arrays: Sequence[DNDarray, ...]) ‑> DNDarray` +: Stack 1-D or 2-D `DNDarray`s as rows into a 2-D `DNDarray`. + If the input arrays are 1-D, they will be stacked as rows. If they are 2-D, + they will be concatenated along the first axis. + + Parameters + ---------- + arrays : Sequence[DNDarrays, ...] + Sequence of `DNDarray`s. + + Raises + ------ + ValueError + If arrays have more than 2 dimensions + + Notes + ----- + All ``DNDarray``s in the sequence must have the same number of columns. + All ``DNDarray``s must be split along the same axis! + + See Also + -------- + :func:`column_stack` + :func:`concatenate` + :func:`hstack` + :func:`stack` + :func:`vstack` + + Examples + -------- + >>> # 1-D tensors + >>> a = ht.array([1, 2, 3]) + >>> b = ht.array([2, 3, 4]) + >>> ht.row_stack((a, b)).larray + tensor([[1, 2, 3], + [2, 3, 4]]) + >>> # 1-D and 2-D tensors + >>> a = ht.array([1, 2, 3]) + >>> b = ht.array([[2, 3, 4], [5, 6, 7]]) + >>> c = ht.array([[7, 8, 9], [10, 11, 12]]) + >>> ht.row_stack((a, b, c)).larray + tensor([[ 1, 2, 3], + [ 2, 3, 4], + [ 5, 6, 7], + [ 7, 8, 9], + [10, 11, 12]]) + >>> # distributed DNDarrays, 3 processes + >>> a = ht.arange(10, split=0).reshape((2, 5)) + >>> b = ht.arange(5, 20, split=0).reshape((3, 5)) + >>> c = ht.arange(20, 40, split=0).reshape((4, 5)) + >>> ht.row_stack((a, b, c)).larray + [0/2] tensor([[0, 1, 2, 3, 4], + [0/2] [5, 6, 7, 8, 9], + [0/2] [5, 6, 7, 8, 9]], dtype=torch.int32) + [1/2] tensor([[10, 11, 12, 13, 14], + [1/2] [15, 16, 17, 18, 19], + [1/2] [20, 21, 22, 23, 24]], dtype=torch.int32) + [2/2] tensor([[25, 26, 27, 28, 29], + [2/2] [30, 31, 32, 33, 34], + [2/2] [35, 36, 37, 38, 39]], dtype=torch.int32) + >>> # distributed 1-D and 2-D DNDarrays, 3 processes + >>> a = ht.arange(5, split=0) + >>> b = ht.arange(5, 20, split=0).reshape((3, 5)) + >>> ht.row_stack((a, b)).larray + [0/2] tensor([[0, 1, 2, 3, 4], + [0/2] [5, 6, 7, 8, 9]]) + [1/2] tensor([[10, 11, 12, 13, 14]]) + [2/2] tensor([[15, 16, 17, 18, 19]]) + +`shape(a: DNDarray) ‑> Tuple[int, ...]` +: Returns the global shape of a (potentially distributed) `DNDarray` as a tuple. + + Parameters + ---------- + a : DNDarray + The input `DNDarray`. + +`sort(a: DNDarray, axis: int = -1, descending: bool = False, out: Optional[DNDarray] = None)` +: Sorts the elements of `a` along the given dimension (by default in ascending order) by their value. + The sorting is not stable which means that equal elements in the result may have a different ordering than in the + original array. + Sorting where `axis==a.split` needs a lot of communication between the processes of MPI. + Returns a tuple `(values, indices)` with the sorted local results and the indices of the elements in the original data + + Parameters + ---------- + a : DNDarray + Input array to be sorted. + axis : int, optional + The dimension to sort along. + Default is the last axis. + descending : bool, optional + If set to `True`, values are sorted in descending order. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to `None`, a fresh array is allocated. + + Raises + ------ + ValueError + If `axis` is not consistent with the available dimensions. + + Examples + -------- + >>> x = ht.array([[4, 1], [2, 3]], split=0) + >>> x.shape + (1, 2) + (1, 2) + >>> y = ht.sort(x, axis=0) + >>> y + (array([[2, 1]], array([[1, 0]])) + (array([[4, 3]], array([[0, 1]])) + >>> ht.sort(x, descending=True) + (array([[4, 1]], array([[0, 1]])) + (array([[3, 2]], array([[1, 0]])) + +`split(x: DNDarray, indices_or_sections: Iterable, axis: int = 0) ‑> List[DNDarray, ...]` +: Split a DNDarray into multiple sub-DNDarrays. + Returns a list of sub-DNDarrays as copies of parts of `x`. + + Parameters + ---------- + x : DNDarray + DNDArray to be divided into sub-DNDarrays. + indices_or_sections : int or 1-dimensional array_like (i.e. undistributed DNDarray, list or tuple) + If `indices_or_sections` is an integer, N, the DNDarray will be divided into N equal DNDarrays along axis. + If such a split is not possible, an error is raised. + If `indices_or_sections` is a 1-D DNDarray of sorted integers, the entries indicate where along axis + the array is split. + For example, `indices_or_sections = [2, 3]` would, for `axis = 0`, result in + + - `x[:2]` + - `x[2:3]` + - `x[3:]` + + If an index exceeds the dimension of the array along axis, an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + `axis` is not allowed to equal `x.split` if `x` is distributed. + + Raises + ------ + ValueError + If `indices_or_sections` is given as integer, but a split does not result in equal division. + + Warnings + -------- + Though it is possible to distribute `x`, this function has nothing to do with the split + parameter of a DNDarray. + + See Also + -------- + :func:`dsplit` + :func:`hsplit` + :func:`vsplit` + + Examples + -------- + >>> x = ht.arange(12).reshape((4, 3)) + >>> ht.split(x, 2) + [ DNDarray([[0, 1, 2], + [3, 4, 5]]), + DNDarray([[ 6, 7, 8], + [ 9, 10, 11]])] + >>> ht.split(x, [2, 3, 5]) + [ DNDarray([[0, 1, 2], + [3, 4, 5]]), + DNDarray([[6, 7, 8]] + DNDarray([[ 9, 10, 11]]), + DNDarray([])] + >>> ht.split(x, [1, 2], 1) + [DNDarray([[0], + [3], + [6], + [9]]), + DNDarray([[ 1], + [ 4], + [ 7], + [10]], + DNDarray([[ 2], + [ 5], + [ 8], + [11]])] + +`squeeze(x: DNDarray, axis: Union[int, Tuple[int, ...]] = None) ‑> heat.core.dndarray.DNDarray` +: Remove single-element entries from the shape of a `DNDarray`. + Returns the input array, but with all or a subset (indicated by `axis`) of the dimensions of length 1 removed. + Split semantics: see Notes below. + + Parameters + ---------- + x : DNDarray + Input data. + axis : None or int or Tuple[int,...], optional + Selects a subset of the single-element entries in the shape. + If axis is `None`, all single-element entries will be removed from the shape. + + Raises + ------ + `ValueError`, if an axis is selected with shape entry greater than one. + + Notes + ----- + Split semantics: a distributed DNDarray will keep its original split dimension after "squeezing", + which, depending on the squeeze axis, may result in a lower numerical `split` value (see Examples). + + Examples + -------- + >>> import heat as ht + >>> a = ht.random.randn(1, 3, 1, 5) + >>> a + DNDarray([[[[-0.2604, 1.3512, 0.1175, 0.4197, 1.3590]], + [[-0.2777, -1.1029, 0.0697, -1.3074, -1.1931]], + [[-0.4512, -1.2348, -1.1479, -0.0242, 0.4050]]]], dtype=ht.float32, device=cpu:0, split=None) + >>> a.shape + (1, 3, 1, 5) + >>> ht.squeeze(a).shape + (3, 5) + >>> ht.squeeze(a) + DNDarray([[-0.2604, 1.3512, 0.1175, 0.4197, 1.3590], + [-0.2777, -1.1029, 0.0697, -1.3074, -1.1931], + [-0.4512, -1.2348, -1.1479, -0.0242, 0.4050]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.squeeze(a, axis=0).shape + (3, 1, 5) + >>> ht.squeeze(a, axis=-2).shape + (1, 3, 5) + >>> ht.squeeze(a, axis=1).shape + Traceback (most recent call last): + ... + ValueError: Dimension along axis 1 is not 1 for shape (1, 3, 1, 5) + >>> x.shape + (10, 1, 12, 13) + >>> x.split + 2 + >>> x.squeeze().shape + (10, 12, 13) + >>> x.squeeze().split + 1 + +`stack(arrays: Sequence[DNDarray, ...], axis: int = 0, out: Optional[DNDarray] = None) ‑> DNDarray` +: Join a sequence of `DNDarray`s along a new axis. + The `axis` parameter specifies the index of the new axis in the dimensions of the result. + For example, if `axis=0`, the arrays will be stacked along the first dimension; if `axis=-1`, + they will be stacked along the last dimension. See Notes below for split semantics. + + Parameters + ---------- + arrays : Sequence[DNDarrays, ...] + Each DNDarray must have the same shape, must be split along the same axis, and must be balanced. + axis : int, optional + The axis in the result array along which the input arrays are stacked. + out : DNDarray, optional + If provided, the destination to place the result. The shape and split axis must be correct, matching + that of what stack would have returned if no out argument were specified (see Notes below). + + Raises + ------ + TypeError + If arrays in sequence are not `DNDarray`s, or if their `dtype` attribute does not match. + ValueError + If `arrays` contains less than 2 `DNDarray`s. + ValueError + If the `DNDarray`s are of different shapes, or if they are split along different axes (`split` attribute). + RuntimeError + If the `DNDarrays` reside on different devices. + + Notes + ----- + Split semantics: :func:`stack` requires that all arrays in the sequence be split along the same dimension. + After stacking, the data are still distributed along the original dimension, however a new dimension has been added at `axis`, + therefore: + + - if :math:`axis <= split`, output will be distributed along :math:`split+1` + + - if :math:`axis > split`, output will be distributed along `split` + + See Also + -------- + :func:`column_stack` + :func:`concatenate` + :func:`hstack` + :func:`row_stack` + :func:`vstack` + + Examples + -------- + >>> a = ht.arange(20).reshape((4, 5)) + >>> b = ht.arange(20, 40).reshape((4, 5)) + >>> ht.stack((a, b), axis=0).larray + tensor([[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [25, 26, 27, 28, 29], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]]]) + >>> # distributed DNDarrays, 3 processes, stack along last dimension + >>> a = ht.arange(20, split=0).reshape(4, 5) + >>> b = ht.arange(20, 40, split=0).reshape(4, 5) + >>> ht.stack((a, b), axis=-1).larray + [0/2] tensor([[[ 0, 20], + [0/2] [ 1, 21], + [0/2] [ 2, 22], + [0/2] [ 3, 23], + [0/2] [ 4, 24]], + [0/2] [[ 5, 25], + [0/2] [ 6, 26], + [0/2] [ 7, 27], + [0/2] [ 8, 28], + [0/2] [ 9, 29]]]) + [1/2] tensor([[[10, 30], + [1/2] [11, 31], + [1/2] [12, 32], + [1/2] [13, 33], + [1/2] [14, 34]]]) + [2/2] tensor([[[15, 35], + [2/2] [16, 36], + [2/2] [17, 37], + [2/2] [18, 38], + [2/2] [19, 39]]]) + +`swapaxes(x: DNDarray, axis1: int, axis2: int) ‑> heat.core.dndarray.DNDarray` +: Interchanges two axes of an array. + + Parameters + ---------- + x : DNDarray + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + See Also + -------- + :func:`~heat.core.linalg.basics.transpose` + Permute the dimensions of an array. + + Examples + -------- + >>> x = ht.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) + >>> ht.swapaxes(x, 0, 1) + DNDarray([[[0, 1], + [4, 5]], + [[2, 3], + [6, 7]]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.swapaxes(x, 0, 2) + DNDarray([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]], dtype=ht.int64, device=cpu:0, split=None) + +`tile(x: DNDarray, reps: Sequence[int, ...]) ‑> DNDarray` +: Construct a new DNDarray by repeating 'x' the number of times given by 'reps'. + + If 'reps' has length 'd', the result will have 'max(d, x.ndim)' dimensions: + + - if 'x.ndim < d', 'x' is promoted to be d-dimensional by prepending new axes. + So a shape (3,) array is promoted to (1, 3) for 2-D replication, or shape (1, 1, 3) + for 3-D replication (if this is not the desired behavior, promote 'x' to d-dimensions + manually before calling this function); + + - if 'x.ndim > d', 'reps' will replicate the last 'd' dimensions of 'x', i.e., if + 'x.shape' is (2, 3, 4, 5), a 'reps' of (2, 2) will be expanded to (1, 1, 2, 2). + + Parameters + ---------- + x : DNDarray + Input + + reps : Sequence[ints,...] + Repetitions + + Returns + ------- + tiled : DNDarray + Split semantics: if `x` is distributed, the tiled data will be distributed along the + same dimension. Note that nominally `tiled.split != x.split` in the case where + `len(reps) > x.ndim`. See example below. + + Examples + -------- + >>> x = ht.arange(12).reshape((4, 3)).resplit_(0) + >>> x + DNDarray([[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8], + [ 9, 10, 11]], dtype=ht.int32, device=cpu:0, split=0) + >>> reps = (1, 2, 2) + >>> tiled = ht.tile(x, reps) + >>> tiled + DNDarray([[[ 0, 1, 2, 0, 1, 2], + [ 3, 4, 5, 3, 4, 5], + [ 6, 7, 8, 6, 7, 8], + [ 9, 10, 11, 9, 10, 11], + [ 0, 1, 2, 0, 1, 2], + [ 3, 4, 5, 3, 4, 5], + [ 6, 7, 8, 6, 7, 8], + [ 9, 10, 11, 9, 10, 11]]], dtype=ht.int32, device=cpu:0, split=1) + +`topk(a: DNDarray, k: int, dim: int = -1, largest: bool = True, sorted: bool = True, out: Optional[Tuple[DNDarray, DNDarray]] = None) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Returns the :math:`k` highest entries in the array. + (Not Stable for split arrays) + + Parameters + ---------- + a: DNDarray + Input data + k: int + Desired number of output items + dim: int, optional + Dimension along which to sort, per default the last dimension + largest: bool, optional + If `True`, return the :math:`k` largest items, otherwise return the :math:`k` smallest items + sorted: bool, optional + Whether to sort the output (descending if `largest` is `True`, else ascending) + out: Tuple[DNDarray, ...], optional + output buffer + + Examples + -------- + >>> a = ht.array([1, 2, 3]) + >>> ht.topk(a, 2) + (DNDarray([3, 2], dtype=ht.int64, device=cpu:0, split=None), DNDarray([2, 1], dtype=ht.int64, device=cpu:0, split=None)) + >>> a = ht.array([[1, 2, 3], [1, 2, 3]]) + >>> ht.topk(a, 2, dim=1) + (DNDarray([[3, 2], + [3, 2]], dtype=ht.int64, device=cpu:0, split=None), + DNDarray([[2, 1], + [2, 1]], dtype=ht.int64, device=cpu:0, split=None)) + >>> a = ht.array([[1, 2, 3], [1, 2, 3]], split=1) + >>> ht.topk(a, 2, dim=1) + (DNDarray([[3, 2], + [3, 2]], dtype=ht.int64, device=cpu:0, split=1), + DNDarray([[2, 1], + [2, 1]], dtype=ht.int64, device=cpu:0, split=1)) + +`unfold(a: DNDarray, axis: int, size: int, step: int = 1)` +: Returns a DNDarray which contains all slices of size `size` in the axis `axis`. + Behaves like torch.Tensor.unfold for DNDarrays. [torch.Tensor.unfold](https://pytorch.org/docs/stable/generated/torch.Tensor.unfold.html) + + Parameters + ---------- + a : DNDarray + array to unfold + axis : int + axis in which unfolding happens + size : int + the size of each slice that is unfolded, must be greater than 1 + step : int + the step between each slice, must be at least 1 + Example: + ``` + >>> x = ht.arange(1., 8) + >>> x + DNDarray([1., 2., 3., 4., 5., 6., 7.], dtype=ht.float32, device=cpu:0, split=e) + >>> ht.unfold(x, 0, 2, 1) + DNDarray([[1., 2.], + [2., 3.], + [3., 4.], + [4., 5.], + [5., 6.], + [6., 7.]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.unfold(x, 0, 2, 2) + DNDarray([[1., 2.], + [3., 4.], + [5., 6.]], dtype=ht.float32, device=cpu:0, split=None) + ``` + Note + --------- + You have to make sure that every node has at least chunk size size-1 if the split axis of the array is the unfold axis. + +`unique(a: DNDarray, sorted: bool = False, return_inverse: bool = False, axis: int = None) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Finds and returns the unique elements of a `DNDarray`. + If return_inverse is `True`, the second tensor will hold the list of inverse indices + If distributed, it is most efficient if `axis!=a.split`. + + Parameters + ---------- + a : DNDarray + Input array. + sorted : bool, optional + Whether the found elements should be sorted before returning as output. + Warning: sorted is not working if `axis!=None and axis!=a.split` + return_inverse : bool, optional + Whether to also return the indices for where elements in the original input ended up in the returned + unique list. + axis : int, optional + Axis along which unique elements should be found. Default to `None`, which will return a one dimensional list of + unique values. + + Examples + -------- + >>> x = ht.array([[3, 2], [1, 3]]) + >>> ht.unique(x, sorted=True) + array([1, 2, 3]) + >>> ht.unique(x, sorted=True, axis=0) + array([[1, 3], + [2, 3]]) + >>> ht.unique(x, sorted=True, axis=1) + array([[2, 3], + [3, 1]]) + +`vsplit(x: DNDarray, indices_or_sections: Iterable) ‑> List[DNDarray, ...]` +: Split array into multiple sub-DNDNarrays along the 1st axis (vertically/row-wise). + Returns a list of sub-DNDarrays as copies of parts of ``x``. + + Parameters + ---------- + x : DNDarray + DNDArray to be divided into sub-DNDarrays. + indices_or_sections : Iterable + If `indices_or_sections` is an integer, N, the DNDarray will be divided into N equal DNDarrays along the 1st axis. + + If such a split is not possible, an error is raised. + + If `indices_or_sections` is a 1-D DNDarray of sorted integers, the entries indicate where along the 1st axis the array is split. + + If an index exceeds the dimension of the array along the 1st axis, an empty sub-DNDarray is returned correspondingly. + + + Raises + ------ + ValueError + If `indices_or_sections` is given as integer, but a split does not result in equal division. + + Notes + ----- + Please refer to the split documentation. :func:`hsplit` is equivalent to split with `axis=0`, + the array is always split along the first axis regardless of the array dimension. + + See Also + -------- + :func:`split` + :func:`dsplit` + :func:`hsplit` + + Examples + -------- + >>> x = ht.arange(24).reshape((4, 3, 2)) + >>> ht.vsplit(x, 2) + [DNDarray([[[ 0, 1], + [ 2, 3], + [ 4, 5]], + [[ 6, 7], + [ 8, 9], + [10, 11]]]), + DNDarray([[[12, 13], + [14, 15], + [16, 17]], + [[18, 19], + [20, 21], + [22, 23]]])] + >>> ht.vsplit(x, [1, 3]) + [DNDarray([[[0, 1], + [2, 3], + [4, 5]]]), + DNDarray([[[ 6, 7], + [ 8, 9], + [10, 11]], + [[12, 13], + [14, 15], + [16, 17]]]), + DNDarray([[[18, 19], + [20, 21], + [22, 23]]])] + +`vstack(arrays: Sequence[DNDarray, ...]) ‑> DNDarray` +: Stack arrays in sequence vertically (row wise). + This is equivalent to concatenation along the first axis. + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The :func:`concatenate` function provides more general + stacking operations. + + Parameters + ---------- + arrays : Sequence[DNDarray,...] + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. + + Notes + ----- + The split axis will be switched to 1 in the case that both elements are 1D and split=0 + + See Also + -------- + :func:`concatenate` + :func:`stack` + :func:`hstack` + :func:`column_stack` + :func:`row_stack` + + + Examples + -------- + >>> a = ht.array([1, 2, 3]) + >>> b = ht.array([2, 3, 4]) + >>> ht.vstack((a, b)).larray + [0/1] tensor([[1, 2, 3], + [0/1] [2, 3, 4]]) + [1/1] tensor([[1, 2, 3], + [1/1] [2, 3, 4]]) + >>> a = ht.array([1, 2, 3], split=0) + >>> b = ht.array([2, 3, 4], split=0) + >>> ht.vstack((a, b)).larray + [0/1] tensor([[1, 2], + [0/1] [2, 3]]) + [1/1] tensor([[3], + [1/1] [4]]) + >>> a = ht.array([[1], [2], [3]], split=0) + >>> b = ht.array([[2], [3], [4]], split=0) + >>> ht.vstack((a, b)).larray + [0] tensor([[1], + [0] [2], + [0] [3]]) + [1] tensor([[2], + [1] [3], + [1] [4]]) diff --git a/doc/api/heat/core/memory.md b/doc/api/heat/core/memory.md new file mode 100644 index 0000000000..7a25648680 --- /dev/null +++ b/doc/api/heat/core/memory.md @@ -0,0 +1,37 @@ +Module heat.core.memory +======================= +Utilities to manage the internal memory of an array. + +Functions +--------- + +`copy(x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Return a deep copy of the given object. + + Parameters + ---------- + x : DNDarray + Input array to be copied. + + Examples + -------- + >>> a = ht.array([1, 2, 3]) + >>> b = ht.copy(a) + >>> b + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> a[0] = 4 + >>> a + DNDarray([4, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + >>> b + DNDarray([1, 2, 3], dtype=ht.int64, device=cpu:0, split=None) + +`sanitize_memory_layout(x: torch.Tensor, order: str = 'C') ‑> torch.Tensor` +: Return the given object with memory layout as defined below. The default memory distribution is assumed. + + Parameters + ---------- + x: torch.Tensor + Input data + order: str, optional. + Default is ``'C'`` as in C-like (row-major) memory layout. The array is stored first dimension first (rows first if ``ndim=2``). + Alternative is ``'F'``, as in Fortran-like (column-major) memory layout. The array is stored last dimension first (columns first if ``ndim=2``). diff --git a/doc/api/heat/core/printing.md b/doc/api/heat/core/printing.md new file mode 100644 index 0000000000..88f94a6399 --- /dev/null +++ b/doc/api/heat/core/printing.md @@ -0,0 +1,119 @@ +Module heat.core.printing +========================= +Allows to output DNDarrays to stdout. + +Functions +--------- + +`get_printoptions() ‑> dict` +: Returns the currently configured printing options as key-value pairs. + +`global_printing() ‑> None` +: For `DNDarray`s, the builtin `print` function will gather all of the data, format it + then print it on ONLY rank 0. + + Returns + ------- + None + + Examples + -------- + >>> x = ht.arange(15 * 5, dtype=ht.float).reshape((15, 5)).resplit(0) + >>> print(x) + [0] DNDarray([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.], + [35., 36., 37., 38., 39.], + [40., 41., 42., 43., 44.], + [45., 46., 47., 48., 49.], + [50., 51., 52., 53., 54.], + [55., 56., 57., 58., 59.], + [60., 61., 62., 63., 64.], + [65., 66., 67., 68., 69.], + [70., 71., 72., 73., 74.]], dtype=ht.float32, device=cpu:0, split=0) + +`local_printing() ‑> None` +: The builtin `print` function will now print the local PyTorch Tensor values for + `DNDarrays` given as arguments. + + Examples + -------- + >>> x = ht.ht.arange(15 * 5, dtype=ht.float).reshape((15, 5)).resplit(0) + >>> ht.local_printing() + [0/2]Printing options set to LOCAL. DNDarrays will print the local PyTorch Tensors + >>> print(x) + [0/2] [[ 0., 1., 2., 3., 4.], + [0/2] [ 5., 6., 7., 8., 9.], + [0/2] [10., 11., 12., 13., 14.], + [0/2] [15., 16., 17., 18., 19.], + [0/2] [20., 21., 22., 23., 24.]] + [1/2] [[25., 26., 27., 28., 29.], + [1/2] [30., 31., 32., 33., 34.], + [1/2] [35., 36., 37., 38., 39.], + [1/2] [40., 41., 42., 43., 44.], + [1/2] [45., 46., 47., 48., 49.]] + [2/2] [[50., 51., 52., 53., 54.], + [2/2] [55., 56., 57., 58., 59.], + [2/2] [60., 61., 62., 63., 64.], + [2/2] [65., 66., 67., 68., 69.], + [2/2] [70., 71., 72., 73., 74.]] + +`print0(*args, **kwargs) ‑> None` +: Wraps the builtin `print` function in such a way that it will only run the command on + rank 0. If this is called with DNDarrays and local printing, only the data local to + process 0 is printed. For more information see the examples. + + This function is also available as a builtin when importing heat. + + Examples + -------- + >>> x = ht.arange(15 * 5, dtype=ht.float).reshape((15, 5)).resplit(0) + >>> # GLOBAL PRINTING + >>> ht.print0(x) + [0] DNDarray([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.], + [10., 11., 12., 13., 14.], + [15., 16., 17., 18., 19.], + [20., 21., 22., 23., 24.], + [25., 26., 27., 28., 29.], + [30., 31., 32., 33., 34.], + [35., 36., 37., 38., 39.], + [40., 41., 42., 43., 44.], + [45., 46., 47., 48., 49.], + [50., 51., 52., 53., 54.], + [55., 56., 57., 58., 59.], + [60., 61., 62., 63., 64.], + [65., 66., 67., 68., 69.], + [70., 71., 72., 73., 74.]], dtype=ht.float32, device=cpu:0, split=0) + >>> ht.local_printing() + [0/2] Printing options set to LOCAL. DNDarrays will print the local PyTorch Tensors + >>> print0(x) + [0/2] [[ 0., 1., 2., 3., 4.], + [0/2] [ 5., 6., 7., 8., 9.], + [0/2] [10., 11., 12., 13., 14.], + [0/2] [15., 16., 17., 18., 19.], + [0/2] [20., 21., 22., 23., 24.]], device: cpu:0, split: 0 + +`set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, profile=None, sci_mode=None)` +: Configures the printing options. List of items shamelessly taken from NumPy and PyTorch (thanks guys!). + + Parameters + ---------- + precision : int, optional + Number of digits of precision for floating point output (default=4). + threshold : int, optional + Total number of array elements which trigger summarization rather than full `repr` string (default=1000). + edgeitems : int, optional + Number of array items in summary at beginning and end of each dimension (default=3). + linewidth : int, optional + The number of characters per line for the purpose of inserting line breaks (default = 80). + profile : str, optional + Sane defaults for pretty printing. Can override with any of the above options. Can be any one of `default`, + `short`, `full`. + sci_mode : bool, optional + Enable (True) or disable (False) scientific notation. If None (default) is specified, the value is automatically + inferred by HeAT. diff --git a/doc/api/heat/core/random.md b/doc/api/heat/core/random.md new file mode 100644 index 0000000000..5b6f9b22cf --- /dev/null +++ b/doc/api/heat/core/random.md @@ -0,0 +1,351 @@ +Module heat.core.random +======================= +Provides parallel random number generators (pRNG) +Two options are aviable: + +1. Batchparallel RNG (default): + This is a simple, fast, and (weakly) reproducible random number generator (RNG) that is based on the idea of a global seed + that results in process-local seeds for each MPI-process; then, on each MPI-process torch's RNG is used with these process-local seeds. + To reproduce results, the global seed needs to be set to the same value and the number of MPI-processes needs to be the same (=weak reproducibility). + +2. Threefry RNG: + This is a fully reproducible parallel RNG that is based on the Threefry encryption algorithm. + It is slower than the batchparallel RNG and limited to generating random DNDarrays with less than maxint32 many entries. + However, unlike batchparallel RNG it ensures full reproducibility even for different numbers of MPI-processes. + +Functions +--------- + +`get_state() ‑> Tuple[str, int, int, int, float]` +: Return a tuple representing the internal state of the generator. The returned tuple has the following items: + + 1. The string 'Batchparallel' or ‘Threefry’, describing the type of random number generator, + + 2. The seed. For batchparallel RNG this refers to the global seed. For Threefry RNG the seed is the key value, + + 3. The local seed (for batchparallel RNG), or the internal counter value (for Threefry RNG), respectively, + + 4. An integer has_gauss, always set to 0 (present for compatibility with numpy), and + + 5. A float cached_gaussian, always set to 0.0 (present for compatibility with numpy). + +`normal(mean: Union[float, DNDarray] = 0.0, std: Union[float, DNDarray] = 1.0, shape: Optional[Tuple[int, ...]] = None, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Returns an array filled with random numbers from a normal distribution whose mean and standard deviation are given. + If `std` and `mean` are DNDarrays, they have to match `shape`. + + Parameters + ---------- + mean : float or DNDarray + The mean of the distribution. + std : float or DNDarray + The standard deviation of the distribution. Must be non-negative. + shape : tuple[int] + The shape of the returned array, should all be positive. If no argument is given a single random sample is + generated. + dtype : Type[datatype], optional + The datatype of the returned values. Has to be one of :class:`~heat.core.types.float32` or + :class:`~heat.core.types.float64`. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally + set default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + randn + Uses the standard normal distribution + standard_noramal + Uses the standard normal distribution + + Examples + -------- + >>> ht.random.normal(ht.array([-1, 2]), ht.array([0.5, 2]), (2,)) + DNDarray([-1.4669, 1.6596], dtype=ht.float64, device=cpu:0, split=None) + +`permutation(x: Union[int, DNDarray], **kwargs) ‑> heat.core.dndarray.DNDarray` +: Randomly permute a sequence, or return a permuted range. If ``x`` is a multi-dimensional array, it is only shuffled + along its first index. + + Parameters + ---------- + x : int or DNDarray + If ``x`` is an integer, call :func:`heat.random.randperm `. If ``x`` is an array, + make a copy and shuffle the elements randomly. + + kwargs : dict, optional + Additional keyword arguments passed to :func:`heat.random.randperm ` if ``x`` is an integer. + + See Also + -------- + :func:`heat.random.randperm ` for randomly permuted ranges. + + Examples + -------- + >>> ht.random.permutation(10) + DNDarray([9, 1, 5, 4, 8, 2, 7, 6, 3, 0], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.random.permutation(ht.array([1, 4, 9, 12, 15])) + DNDarray([ 9, 1, 12, 4, 15], dtype=ht.int64, device=cpu:0, split=None) + >>> arr = ht.arange(9).reshape((3, 3)) + >>> ht.random.permutation(arr) + DNDarray([[3, 4, 5], + [6, 7, 8], + [0, 1, 2]], dtype=ht.int32, device=cpu:0, split=None) + + Notes + ----- + This routine makes usage of torch's RNG to generate an array of the permuted indices of axis 0. + Thus, the array containing these indices needs to fit into the memory of a single MPI-process. + +`rand(*d: int, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[Device] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Random values in a given shape. Create a :class:`~heat.core.dndarray.DNDarray` of the given shape and populate it + with random samples from a uniform distribution over :math:`[0, 1)`. + + Parameters + ---------- + *d : int, optional + The dimensions of the returned array, should all be positive. If no argument is given a single random samples is + generated. + dtype : Type[datatype], optional + The datatype of the returned values. Has to be one of :class:`~heat.core.types.float32` or + :class:`~heat.core.types.float64`. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + Raises + ------ + ValueError + If there are negative or not-integer convertible dimensions given or if the passed ``dtype`` was non-floating + point. + + Examples + -------- + >>> ht.rand(3) + DNDarray([0.1921, 0.9635, 0.5047], dtype=ht.float32, device=cpu:0, split=None) + +`randint(low: int, high: Optional[int] = None, size: Optional[Union[int, Tuple[int]]] = None, dtype: Optional[Type[datatype]] = heat.core.types.int32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Random values in a given shape. Create a tensor of the given shape and populate it with random integer samples from + a uniform distribution over :math:`[low, high)` or :math:`[0, low)` if ``high`` is not provided. + + Parameters + ---------- + low : int + Lowest (signed) integer to be drawn from the distribution (unless `high=None`, in which case this parameter + is one above the highest such integer). + high : int, optional + If provided, one above the largest (signed) integer to be drawn from the distribution (see above for behavior + if `high=None`). + size : int or Tuple[int,...], optional + Output shape. If the given shape is, e.g., :math:`(m, n, k)`, then :math:`m \times n \times k` samples are drawn. + Default is None, in which case a single value is returned. + dtype : datatype, optional + Desired datatype of the result. Must be an integer type, defaults to int32. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + Raises + ------ + TypeError + If one of low or high is not an int. + ValueError + If low >= high, dimensions are negative or the passed datatype is not an integer. + + Examples + -------- + >>> ht.randint(3) + DNDarray([4, 101, 16], dtype=ht.int32, device=cpu:0, split=None) + +`randn(*d: int, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Returns a tensor filled with random numbers from a standard normal distribution with zero mean and variance of one. + + Parameters + ---------- + *d : int, optional + The dimensions of the returned array, should be all positive. + dtype : Type[datatype], optional + The datatype of the returned values. Has to be one of :class:`~heat.core.types.float32` or + :class:`~heat.core.types.float64`. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally set + default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + normal + Similar, but takes a tuple as its argumant. + standard_normal + Accepts arguments for mean and standard deviation. + + Raises + ------ + TypeError + If one of ``d1`` to ``dn`` is not an integer. + ValueError + If one of ``d1`` to ``dn`` is less or equal to 0. + + Examples + -------- + >>> ht.randn(3) + DNDarray([ 0.1921, -0.9635, 0.5047], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.randn(4, 4) + DNDarray([[-1.1261, 0.5971, 0.2851, 0.9998], + [-1.8548, -1.2574, 0.2391, -0.3302], + [ 1.3365, -1.5212, 1.4159, -0.1671], + [ 0.1260, 1.2126, -0.0804, 0.0907]], dtype=ht.float32, device=cpu:0, split=None) + +`random(shape: Optional[Tuple[int]] = None, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None)` +: Populates a :class:`~heat.core.dndarray.DNDarray` of the given shape with random samples from a continuous uniform + distribution over :math:`[0.0, 1.0)`. + + Parameters + ---------- + shape : tuple[int] + The shape of the returned array, should all be positive. If no argument is given a single random sample is + generated. + dtype: Type[datatype], optional + The datatype of the returned values. Has to be one of :class:`~heat.core.types.float32` or + :class:`~heat.core.types.float64`. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally + set default device. + comm: Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + Examples + -------- + >>> ht.random.random_sample() + 0.47108547995356098 + >>> ht.random.random_sample((3,)) + DNDarray([0.30220482, 0.86820401, 0.1654503], dtype=ht.float32, device=cpu:0, split=None) + +`random_integer(low: int, high: Optional[int] = None, size: Optional[Union[int, Tuple[int]]] = None, dtype: Optional[Type[datatype]] = heat.core.types.int32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Alias for :func:`heat.random.randint `. + +`random_sample(shape: Optional[Tuple[int]] = None, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None)` +: Alias for :func:`heat.random.random `. + +`randperm(n: int, dtype: Type[datatype] = heat.core.types.int64, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Returns a random permutation of integers from :math:`0` to :math:`n - 1`. + + Parameters + ---------- + n : int + Upper, exclusive bound for the integer range. + dtype : datatype, optional + The datatype of the returned values. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally + set default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + Raises + ------ + TypeError + If ``n`` is not an integer. + + Examples + -------- + >>> ht.random.randperm(4) + DNDarray([2, 3, 1, 0], dtype=ht.int64, device=cpu:0, split=None) + + Notes + ----- + This routine makes usage of torch's RNG. Thus, the resulting array needs to fit into the memory of a single MPI-process. + +`ranf(shape: Optional[Tuple[int]] = None, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None)` +: Alias for :func:`heat.random.random `. + +`sample(shape: Optional[Tuple[int]] = None, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None)` +: Alias for :func:`heat.random.random `. + +`seed(seed: Optional[int] = None)` +: Seed the random number generator. + + Parameters + ---------- + seed : int, optional + Value to seed the algorithm with, if not set a time-based seed is generated. + +`set_state(state: Tuple[str, int, int, int, int, float])` +: Set the internal state of the generator from a tuple. The tuple has the following items: + + 1. The string 'Batchparallel' or ‘Threefry’, describing the type of random number generator, + + 2. The seed. For batchparallel RNG this refers to the global seed. For Threefry RNG the seed is the key value, + + 3. The local seed (for batchparallel RNG), or the internal counter value (for Threefry RNG), respectively, + (For batchparallel RNG, this value is ignored if a global seed is provided. If you want to prescribe a process-local + seed manually, you need to set the global seed to None.) + + 4. An integer ``has_gauss``, ignored (present for compatibility with numpy), optional and + + 5. A float ``cached_gaussian``, ignored (present for compatibility with numpy), optional. + + Parameters + ---------- + state : Tuple[str, int, int, int, float] + Sets the state of the random generator to the passed values. Allows to select seed and counter values manually. + + Raises + ------ + TypeError + If and improper state is passed. + ValueError + If one of the items in the state tuple is of wrong type or value. + +`standard_normal(shape: Optional[Tuple[int, ...]] = None, dtype: Type[datatype] = heat.core.types.float32, split: Optional[int] = None, device: Optional[str] = None, comm: Optional[Communication] = None) ‑> heat.core.dndarray.DNDarray` +: Returns an array filled with random numbers from a standard normal distribution with zero mean and variance of one. + + Parameters + ---------- + shape : tuple[int] + The shape of the returned array, should all be positive. If no argument is given a single random sample is + generated. + dtype : Type[datatype], optional + The datatype of the returned values. Has to be one of :class:`~heat.core.types.float32` or + :class:`~heat.core.types.float64`. + split : int, optional + The axis along which the array is split and distributed, defaults to no distribution. + device : str, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on, defaults to globally + set default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + + See Also + -------- + randn + Similar, but accepts separate arguments for the shape dimensions. + normal + Equivalent function with arguments for the mean and standard deviation. + + Examples + -------- + >>> ht.random.standard_normal((3,)) + DNDarray([ 0.1921, -0.9635, 0.5047], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.random.standard_normal((4, 4)) + DNDarray([[-1.1261, 0.5971, 0.2851, 0.9998], + [-1.8548, -1.2574, 0.2391, -0.3302], + [ 1.3365, -1.5212, 1.4159, -0.1671], + [ 0.1260, 1.2126, -0.0804, 0.0907]], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/relational.md b/doc/api/heat/core/relational.md new file mode 100644 index 0000000000..5e098c5193 --- /dev/null +++ b/doc/api/heat/core/relational.md @@ -0,0 +1,302 @@ +Module heat.core.relational +=========================== +Functions for relational oprations, i.e. equal/no equal... + +Functions +--------- + +`eq(x, y) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise comparision. + Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be + compared as argument. + Returns False if the operands are not scalars or :class:`~heat.core.dndarray.DNDarray` + + Parameters + ---------- + x: DNDarray or scalar + The first operand involved in the comparison + y: DNDarray or scalar + The second operand involved in the comparison + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.eq(x, 3.0) + DNDarray([[False, False], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.eq(x, y) + DNDarray([[False, True], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.eq(x, slice(None)) + False + +`equal(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> bool` +: Overall comparison of equality between two :class:`~heat.core.dndarray.DNDarray`. Returns ``True`` if two arrays + have the same size and elements, and ``False`` otherwise. + + Parameters + ---------- + x: DNDarray or scalar + The first operand involved in the comparison + y: DNDarray or scalar + The second operand involved in the comparison + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.equal(x, ht.float32([[1, 2], [3, 4]])) + True + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.equal(x, y) + False + >>> ht.equal(x, 3.0) + False + +`ge(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Returns a D:class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich greater than or equal comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x>=y``), not commutative. Takes the first and second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. + + Parameters + ---------- + x: DNDarray or scalar + The first operand to be compared greater than or equal to second operand + y: DNDarray or scalar + The second operand to be compared less than or equal to first operand + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.ge(x, 3.0) + DNDarray([[False, False], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.ge(x, y) + DNDarray([[False, True], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + +`greater(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich greater than comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x>y``), not commutative. Takes the first and second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. + + Parameters + ---------- + x: DNDarray or scalar + The first operand to be compared greater than second operand + y: DNDarray or scalar + The second operand to be compared less than first operand + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.gt(x, 3.0) + DNDarray([[False, False], + [False, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.gt(x, y) + DNDarray([[False, False], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + +`greater_equal(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Returns a D:class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich greater than or equal comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x>=y``), not commutative. Takes the first and second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. + + Parameters + ---------- + x: DNDarray or scalar + The first operand to be compared greater than or equal to second operand + y: DNDarray or scalar + The second operand to be compared less than or equal to first operand + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.ge(x, 3.0) + DNDarray([[False, False], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.ge(x, y) + DNDarray([[False, True], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + +`gt(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich greater than comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x>y``), not commutative. Takes the first and second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. + + Parameters + ---------- + x: DNDarray or scalar + The first operand to be compared greater than second operand + y: DNDarray or scalar + The second operand to be compared less than first operand + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.gt(x, 3.0) + DNDarray([[False, False], + [False, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.gt(x, y) + DNDarray([[False, False], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + +`le(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Return a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich less than or equal comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x<=y``), not commutative. Takes the first and second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. + + Parameters + ---------- + x: DNDarray or scalar + The first operand to be compared less than or equal to second operand + y: DNDarray or scalar + The second operand to be compared greater than or equal to first operand + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.le(x, 3.0) + DNDarray([[ True, True], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.le(x, y) + DNDarray([[ True, True], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + +`less(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich less than comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.lt(x, 3.0) + DNDarray([[ True, True], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.lt(x, y) + DNDarray([[ True, False], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + +`less_equal(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Return a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich less than or equal comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x<=y``), not commutative. Takes the first and second operand (scalar or + :class:`~heat.core.dndarray.DNDarray`) whose elements are to be compared as argument. + + Parameters + ---------- + x: DNDarray or scalar + The first operand to be compared less than or equal to second operand + y: DNDarray or scalar + The second operand to be compared greater than or equal to first operand + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.le(x, 3.0) + DNDarray([[ True, True], + [ True, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.le(x, y) + DNDarray([[ True, True], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + +`lt(x: Union[DNDarray, float, int], y: Union[DNDarray, float, int]) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich less than comparison between values from operand ``x`` with respect to values of + operand ``y`` (i.e. ``x>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.lt(x, 3.0) + DNDarray([[ True, True], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.lt(x, y) + DNDarray([[ True, False], + [False, False]], dtype=ht.bool, device=cpu:0, split=None) + +`ne(x, y) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich comparison of non-equality between values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be + compared as argument. + Returns True if the operands are not scalars or :class:`~heat.core.dndarray.DNDarray` + + Parameters + ---------- + x: DNDarray or scalar + The first operand involved in the comparison + y: DNDarray or scalar + The second operand involved in the comparison + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.ne(x, 3.0) + DNDarray([[ True, True], + [False, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.ne(x, y) + DNDarray([[ True, False], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.ne(x, slice(None)) + True + +`not_equal(x, y) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` containing the results of element-wise rich comparison of non-equality between values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.core.dndarray.DNDarray`) whose elements are to be + compared as argument. + Returns True if the operands are not scalars or :class:`~heat.core.dndarray.DNDarray` + + Parameters + ---------- + x: DNDarray or scalar + The first operand involved in the comparison + y: DNDarray or scalar + The second operand involved in the comparison + + Examples + -------- + >>> import heat as ht + >>> x = ht.float32([[1, 2], [3, 4]]) + >>> ht.ne(x, 3.0) + DNDarray([[ True, True], + [False, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> y = ht.float32([[2, 2], [2, 2]]) + >>> ht.ne(x, y) + DNDarray([[ True, False], + [ True, True]], dtype=ht.bool, device=cpu:0, split=None) + >>> ht.ne(x, slice(None)) + True diff --git a/doc/api/heat/core/rounding.md b/doc/api/heat/core/rounding.md new file mode 100644 index 0000000000..b7729428db --- /dev/null +++ b/doc/api/heat/core/rounding.md @@ -0,0 +1,234 @@ +Module heat.core.rounding +========================= +Rounding functions for DNDarrays + +Functions +--------- + +`abs(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None, dtype: Type[heat.core.types.datatype] | None = None) ‑> heat.core.dndarray.DNDarray` +: Returns :class:`~heat.core.dndarray.DNDarray` containing the elementwise abolute values of the input array ``x``. + + Parameters + ---------- + x : DNDarray + The array for which the compute the absolute value. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + dtype : datatype, optional + Determines the data type of the output array. The values are cast to this type with potential loss of + precision. + + Raises + ------ + TypeError + If dtype is not a heat type. + +`absolute(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None, dtype: Type[heat.core.types.datatype] | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the absolute value element-wise. + :func:`abs` is a shorthand for this function. + + Parameters + ---------- + x : DNDarray + The array for which the compute the absolute value. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + dtype : datatype, optional + Determines the data type of the output array. The values are cast to this type with potential loss of + precision. + +`ceil(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the ceil of the input, element-wise. Result is a :class:`~heat.core.dndarray.DNDarray` of the same shape as + ``x``. The ceil of the scalar ``x`` is the smallest integer i, such that ``i>=x``. It is often denoted as + :math:`\lceil x \rceil`. + + Parameters + ---------- + x : DNDarray + The value for which to compute the ceiled values. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> import heat as ht + >>> ht.ceil(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -1., -1., -0., -0., 0., 1., 1., 2., 2.], dtype=ht.float32, device=cpu:0, split=None) + +`clip(x: heat.core.dndarray.DNDarray, min, max, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Returns a :class:`~heat.core.dndarray.DNDarray` with the elements of this array, but where values + ``a_max`` with ``a_max``. + + Parameters + ---------- + x : DNDarray + Array containing elements to clip. + min : scalar or None + Minimum value. If ``None``, clipping is not performed on lower interval edge. Not more than one of ``a_min`` and + ``a_max`` may be ``None``. + max : scalar or None + Maximum value. If ``None``, clipping is not performed on upper interval edge. Not more than one of ``a_min`` and + ``a_max`` may be None. + out : DNDarray, optional + The results will be placed in this array. It may be the input array for in-place clipping. ``out`` must be of + the right shape to hold the output. Its type is preserved. + + Raises + ------ + ValueError + if either min or max is not set + +`fabs(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the absolute value element-wise and return floating-point class:`~heat.core.dndarray.DNDarray`. + This function exists besides ``abs==absolute`` since it will be needed in case complex numbers will be introduced + in the future. + + Parameters + ---------- + x : DNDarray + The array for which the compute the absolute value. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + +`floor(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the floor of the input, element-wise. + The floor of the scalar ``x`` is the largest integer i, such that ``i<=x``. + It is often denoted as :math:`\lfloor x \rfloor`. + + Parameters + ---------- + x : DNDarray + The array for which to compute the floored values. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh :class:`~heat.core.dndarray.DNDarray` is allocated. + + Examples + -------- + >>> import heat as ht + >>> ht.floor(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -2., -2., -1., -1., 0., 0., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) + +`modf(x: heat.core.dndarray.DNDarray, out: Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray] | None = None) ‑> Tuple[heat.core.dndarray.DNDarray, heat.core.dndarray.DNDarray]` +: Return the fractional and integral parts of a :class:`~heat.core.dndarray.DNDarray`, element-wise. + The fractional and integral parts are negative if the given number is negative. + + Parameters + ---------- + x : DNDarray + Input array + out : Tuple[DNDarray, DNDarray], optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + + Raises + ------ + TypeError + if ``x`` is not a :class:`~heat.core.dndarray.DNDarray` + TypeError + if ``out`` is not None or a tuple of :class:`~heat.core.dndarray.DNDarray` + ValueError + if ``out`` is a tuple of length unqual 2 + + Examples + -------- + >>> import heat as ht + >>> ht.modf(ht.arange(-2.0, 2.0, 0.4)) + (DNDarray([ 0.0000, -0.6000, -0.2000, -0.8000, -0.4000, 0.0000, 0.4000, 0.8000, 0.2000, 0.6000], dtype=ht.float32, device=cpu:0, split=None), DNDarray([-2., -1., -1., -0., -0., 0., 0., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None)) + +`round(x: heat.core.dndarray.DNDarray, decimals: int = 0, out: heat.core.dndarray.DNDarray | None = None, dtype: Type[heat.core.types.datatype] | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculate the rounded value element-wise. + + Parameters + ---------- + x : DNDarray + The array for which the compute the rounded value. + decimals: int, optional + Number of decimal places to round to. + If decimals is negative, it specifies the number of positions to the left of the decimal point. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + dtype : datatype, optional + Determines the data type of the output array. The values are cast to this type with potential loss of + precision. + + Raises + ------ + TypeError + if dtype is not a heat data type + + Examples + -------- + >>> import heat as ht + >>> ht.round(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -2., -1., -1., -0., 0., 0., 1., 1., 2.], dtype=ht.float32, device=cpu:0, split=None) + +`sgn(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Returns an indication of the sign of a number, element-wise. The definition for complex values is equivalent to :math:`x / |x|`. + + Parameters + ---------- + x : DNDarray + Input array + out : DNDarray, optional + A location in which to store the results. + + See Also + -------- + :func:`sign` + Equivalent function on non-complex arrays. The definition for complex values is equivalent to :math:`x / \sqrt{x \cdot x}` + + Examples + -------- + >>> a = ht.array([-1, -0.5, 0, 0.5, 1]) + >>> ht.sign(a) + DNDarray([-1., -1., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sgn(ht.array([5 - 2j, 3 + 4j])) + DNDarray([(0.9284766912460327-0.3713906705379486j), (0.6000000238418579+0.800000011920929j)], dtype=ht.complex64, device=cpu:0, split=None) + +`sign(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Returns an indication of the sign of a number, element-wise. The definition for complex values is equivalent to :math:`x / \sqrt{x \cdot x}`. + + Parameters + ---------- + x : DNDarray + Input array + out : DNDarray, optional + A location in which to store the results. + + See Also + -------- + :func:`sgn` + Equivalent function on non-complex arrays. The definition for complex values is equivalent to :math:`x / |x|`. + + Examples + -------- + >>> a = ht.array([-1, -0.5, 0, 0.5, 1]) + >>> ht.sign(a) + DNDarray([-1., -1., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.sign(ht.array([5 - 2j, 3 + 4j])) + DNDarray([(1+0j), (1+0j)], dtype=ht.complex64, device=cpu:0, split=None) + +`trunc(x: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the trunc of the input, element-wise. + The truncated value of the scalar ``x`` is the nearest integer ``i`` which is closer to zero than ``x`` is. In short, the + fractional part of the signed number ``x`` is discarded. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trunced values. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> import heat as ht + >>> ht.trunc(ht.arange(-2.0, 2.0, 0.4)) + DNDarray([-2., -1., -1., -0., -0., 0., 0., 0., 1., 1.], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/sanitation.md b/doc/api/heat/core/sanitation.md new file mode 100644 index 0000000000..ba9345d11b --- /dev/null +++ b/doc/api/heat/core/sanitation.md @@ -0,0 +1,133 @@ +Module heat.core.sanitation +=========================== +Collection of validation/sanitation routines. + +Functions +--------- + +`sanitize_distribution(*args: DNDarray, target: DNDarray, diff_map: torch.Tensor = None) ‑> Union[DNDarray, Tuple(DNDarray)]` +: Distribute every `arg` according to `target.lshape_map` or, if provided, `diff_map`. + After this sanitation, the lshapes are compatible along the split dimension. + `Args` can contain non-distributed DNDarrays, they will be split afterwards, if `target` is split. + + Parameters + ---------- + args : DNDarray + Dndarrays to be distributed + + target : DNDarray + Dndarray used to sanitize the metadata and to, if diff_map is not given, determine the resulting distribution. + + diff_map : torch.Tensor (optional) + Different lshape_map. Overwrites the distribution of the target array. + Used in cases when the target array does not correspond to the actually wanted distribution, + e.g. because it only contains a single element along the split axis and gets broadcast. + + Raises + ------ + TypeError + When an argument is not a ``DNDarray`` or ``None``. + ValueError + When the split-axes or sizes along the split-axis do not match. + + See Also + -------- + :func:`~heat.core.dndarray.create_lshape_map` + Function to create the lshape_map. + +`sanitize_in(x: Any)` +: Verify that input object is ``DNDarray``. + + Parameters + ---------- + x : Any + Input object + + Raises + ------ + TypeError + When ``x`` is not a ``DNDarray``. + +`sanitize_in_tensor(x: Any)` +: Verify that input object is ``torch.Tensor``. + + Parameters + ---------- + x : Any + Input object. + + Raises + ------ + TypeError + When ``x`` is not a ``torch.Tensor``. + +`sanitize_infinity(x: Union[DNDarray, torch.Tensor]) ‑> int | float` +: Returns largest possible value for the ``dtype`` of the input array. + + Parameters + ---------- + x: Union[DNDarray, torch.Tensor] + Input object. + +`sanitize_lshape(array: DNDarray, tensor: torch.Tensor)` +: Verify shape consistency when manipulating process-local arrays. + + Parameters + ---------- + array : DNDarray + the original, potentially distributed ``DNDarray`` + tensor : torch.Tensor + process-local data meant to replace ``array.larray`` + + Raises + ------ + ValueError + if shape of local ``torch.Tensor`` is inconsistent with global ``DNDarray``. + +`sanitize_out(out: DNDarray, output_shape: Tuple, output_split: int, output_device: str, output_comm: Communication = None)` +: Validate output buffer ``out``. + + Parameters + ---------- + out : DNDarray + the `out` buffer where the result of some operation will be stored + + output_shape : Tuple + the calculated shape returned by the operation + + output_split : Int + the calculated split axis returned by the operation + + output_device : Str + "cpu" or "gpu" as per location of data + + output_comm : Communication + Communication object of the result of the operation + + Raises + ------ + TypeError + if ``out`` is not a ``DNDarray``. + ValueError + if shape, split direction, or device of the output buffer ``out`` do not match the operation result. + +`sanitize_sequence(seq: Union[Sequence[int, ...], Sequence[float, ...], DNDarray, torch.Tensor]) ‑> List` +: Check if sequence is valid, return list. + + Parameters + ---------- + seq : Union[Sequence[int, ...], Sequence[float, ...], DNDarray, torch.Tensor] + Input sequence. + + Raises + ------ + TypeError + if ``seq`` is neither a list nor a tuple + +`scalar_to_1d(x: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Turn a scalar ``DNDarray`` into a 1-D ``DNDarray`` with 1 element. + + Parameters + ---------- + x : DNDarray + with `x.ndim = 0` diff --git a/doc/api/heat/core/signal.md b/doc/api/heat/core/signal.md new file mode 100644 index 0000000000..5937afcdbd --- /dev/null +++ b/doc/api/heat/core/signal.md @@ -0,0 +1,102 @@ +Module heat.core.signal +======================= +Provides a collection of signal-processing operations + +Functions +--------- + +`convolve(a: heat.core.dndarray.DNDarray, v: heat.core.dndarray.DNDarray, mode: str = 'full', stride: int = 1) ‑> heat.core.dndarray.DNDarray` +: Returns the discrete, linear convolution of two one-dimensional `DNDarray`s or scalars. + Unlike `numpy.signal.convolve`, if ``a`` and/or ``v`` have more than one dimension, batch-convolution along the last dimension will be attempted. See `Examples` below. + + Parameters + ---------- + a : DNDarray or scalar + One- or N-dimensional signal ``DNDarray`` of shape (..., N), or scalar. If ``a`` has more than one dimension, it will be treated as a batch of 1D signals. + Distribution along the batch dimension is required for distributed batch processing. See the examples for details. + v : DNDarray or scalar + One- or N-dimensional filter weight `DNDarray` of shape (..., M), or scalar. If ``v`` has more than one dimension, it will be treated as a batch of 1D filter weights. + The batch dimension(s) of ``v`` must match the batch dimension(s) of ``a``. + mode : str + Can be 'full', 'valid', or 'same'. Default is 'full'. + 'full': + Returns the convolution at + each point of overlap, with a length of '(N+M-2)//stride+1'. At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + 'same': + Mode 'same' returns output of length 'N'. Boundary + effects are still visible. This mode is not supported for + even-sized filter weights + 'valid': + Mode 'valid' returns output of length '(N-M)//stride+1'. The + convolution product is only given for points where the signals + overlap completely. Values outside the signal boundary have no + effect. + stride : int + Stride of the convolution. Must be a positive integer. Default is 1. + Stride must be 1 for mode 'same'. + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> a = ht.ones(5) + >>> v = ht.arange(3).astype(ht.float) + >>> ht.convolve(a, v, mode="full") + DNDarray([0., 1., 3., 3., 3., 3., 2.]) + >>> ht.convolve(a, v, mode="same") + DNDarray([1., 3., 3., 3., 3.]) + >>> ht.convolve(a, v, mode="valid") + DNDarray([3., 3., 3.]) + >>> ht.convolve(a, v, stride=2) + DNDarray([0., 3., 3., 2.]) + >>> ht.convolve(a, v, mode="valid", stride=2) + DNDarray([3., 3.]) + + >>> a = ht.ones(10, split=0) + >>> v = ht.arange(3, split=0).astype(ht.float) + >>> ht.convolve(a, v, mode="valid") + DNDarray([3., 3., 3., 3., 3., 3., 3., 3.]) + + [0/3] DNDarray([3., 3., 3.]) + [1/3] DNDarray([3., 3., 3.]) + [2/3] DNDarray([3., 3.]) + + >>> a = ht.ones(10, split=0) + >>> v = ht.arange(3, split=0) + >>> ht.convolve(a, v) + DNDarray([0., 1., 3., 3., 3., 3., 3., 3., 3., 3., 3., 2.], dtype=ht.float32, device=cpu:0, split=0) + + [0/3] DNDarray([0., 1., 3., 3.]) + [1/3] DNDarray([3., 3., 3., 3.]) + [2/3] DNDarray([3., 3., 3., 2.]) + + >>> a = ht.arange(50, dtype=ht.float64, split=0) + >>> a = a.reshape(10, 5) # 10 signals of length 5 + >>> v = ht.arange(3) + >>> ht.convolve(a, v) # batch processing: 10 signals convolved with filter v + DNDarray([[ 0., 0., 1., 4., 7., 10., 8.], + [ 0., 5., 16., 19., 22., 25., 18.], + [ 0., 10., 31., 34., 37., 40., 28.], + [ 0., 15., 46., 49., 52., 55., 38.], + [ 0., 20., 61., 64., 67., 70., 48.], + [ 0., 25., 76., 79., 82., 85., 58.], + [ 0., 30., 91., 94., 97., 100., 68.], + [ 0., 35., 106., 109., 112., 115., 78.], + [ 0., 40., 121., 124., 127., 130., 88.], + [ 0., 45., 136., 139., 142., 145., 98.]], dtype=ht.float64, device=cpu:0, split=0) + + >>> v = ht.random.randint(0, 3, (10, 3), split=0) # 10 filters of length 3 + >>> ht.convolve(a, v) # batch processing: 10 signals convolved with 10 filters + DNDarray([[ 0., 0., 2., 4., 6., 8., 0.], + [ 5., 6., 7., 8., 9., 0., 0.], + [ 20., 42., 56., 61., 66., 41., 14.], + [ 0., 15., 16., 17., 18., 19., 0.], + [ 20., 61., 64., 67., 70., 48., 0.], + [ 50., 52., 104., 108., 112., 56., 58.], + [ 0., 30., 61., 63., 65., 67., 34.], + [ 35., 106., 109., 112., 115., 78., 0.], + [ 0., 40., 81., 83., 85., 87., 44.], + [ 0., 0., 45., 46., 47., 48., 49.]], dtype=ht.float64, device=cpu:0, split=0) diff --git a/doc/api/heat/core/statistics.md b/doc/api/heat/core/statistics.md new file mode 100644 index 0000000000..ad2d6c3708 --- /dev/null +++ b/doc/api/heat/core/statistics.md @@ -0,0 +1,730 @@ +Module heat.core.statistics +=========================== +Distributed statistical operations. + +Functions +--------- + +`argmax(x: heat.core.dndarray.DNDarray, axis: int | None = None, out: heat.core.dndarray.DNDarray | None = None, **kwargs: object) ‑> heat.core.dndarray.DNDarray` +: Returns an array of the indices of the maximum values along an axis. It has the same shape as ``x.shape`` with the + dimension along axis removed. + + Parameters + ---------- + x : DNDarray + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise along the specified axis. + out : DNDarray, optional. + If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. + **kwargs + Extra keyword arguments + + Examples + -------- + >>> a = ht.random.randn(3, 3) + >>> a + DNDarray([[ 1.0661, 0.7036, -2.0908], + [-0.7534, -0.4986, -0.7751], + [-0.4815, 1.9436, 0.6400]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.argmax(a) + DNDarray([7], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmax(a, axis=0) + DNDarray([0, 2, 2], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmax(a, axis=1) + DNDarray([0, 1, 1], dtype=ht.int64, device=cpu:0, split=None) + +`argmin(x: heat.core.dndarray.DNDarray, axis: int | None = None, out: heat.core.dndarray.DNDarray | None = None, **kwargs: object) ‑> heat.core.dndarray.DNDarray` +: Returns an array of the indices of the minimum values along an axis. It has the same shape as ``x.shape`` with the + dimension along axis removed. + + Parameters + ---------- + x : DNDarray + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise along the specified axis. + out : DNDarray, optional + Issue #100 If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. + **kwargs + Extra keyword arguments + + Examples + -------- + >>> a = ht.random.randn(3, 3) + >>> a + DNDarray([[ 1.0661, 0.7036, -2.0908], + [-0.7534, -0.4986, -0.7751], + [-0.4815, 1.9436, 0.6400]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.argmin(a) + DNDarray([2], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmin(a, axis=0) + DNDarray([1, 1, 0], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.argmin(a, axis=1) + DNDarray([2, 2, 0], dtype=ht.int64, device=cpu:0, split=None) + +`average(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int, ...] | None = None, weights: heat.core.dndarray.DNDarray | None = None, returned: bool = False) ‑> heat.core.dndarray.DNDarray | Tuple[heat.core.dndarray.DNDarray, ...]` +: Compute the weighted average along the specified axis. + + If ``returned=True``, return a tuple with the average as the first element and the sum + of the weights as the second element. ``sum_of_weights`` is of the same type as ``average``. + + Parameters + ---------- + x : DNDarray + Array containing data to be averaged. + axis : None or int or Tuple[int,...], optional + Axis or axes along which to average ``x``. The default, + ``axis=None``, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + #TODO Issue #351: If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : DNDarray, optional + An array of weights associated with the values in ``x``. Each value in + ``x`` contributes to the average according to its associated weight. + The weights array can either be 1D (in which case its length must be + the size of ``x`` along the given axis) or of the same shape as ``x``. + If ``weights=None``, then all data in ``x`` are assumed to have a + weight equal to one, the result is equivalent to :func:`mean`. + returned : bool, optional + If ``True``, the tuple ``(average, sum_of_weights)`` + is returned, otherwise only the average is returned. + If ``weights=None``, ``sum_of_weights`` is equivalent to the number of + elements over which the average is taken. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. + TypeError + When the length of 1D weights is not the same as the shape of ``x`` + along axis. + + Examples + -------- + >>> data = ht.arange(1, 5, dtype=float) + >>> data + DNDarray([1., 2., 3., 4.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(data) + DNDarray(2.5000, dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(ht.arange(1, 11, dtype=float), weights=ht.arange(10, 0, -1)) + DNDarray([4.], dtype=ht.float64, device=cpu:0, split=None) + >>> data = ht.array([[0, 1], + [2, 3], + [4, 5]], dtype=float, split=1) + >>> weights = ht.array([1.0 / 4, 3.0 / 4]) + >>> ht.average(data, axis=1, weights=weights) + DNDarray([0.7500, 2.7500, 4.7500], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.average(data, weights=weights) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of x and weights differ. + +`bincount(x: heat.core.dndarray.DNDarray, weights: heat.core.dndarray.DNDarray | None = None, minlength: int = 0) ‑> heat.core.dndarray.DNDarray` +: Count number of occurrences of each value in array of non-negative ints. Return a + non-distributed ``DNDarray`` of length `max(x) + 1` if input is non-empty, else 0. + + The number of bins (size 1) is one larger than the largest value in `x` + unless `x` is empty, in which case the result is a tensor of size 0. + If `minlength` is specified, the number of bins is at least `minlength` and + if `x` is empty, then the result is tensor of size `minlength` filled with zeros. + If `n` is the value at position `i`, `out[n] += weights[i]` if weights is specified else `out[n] += 1`. + + Parameters + ---------- + x : DNDarray + 1-dimensional, non-negative ints + weights : DNDarray, optional + Weight for each value in the input tensor. Array of the same shape as x. Same split as `x`. + minlength : int, non-negative, optional + Minimum number of bins + + Raises + ------ + ValueError + If `x` and `weights` don't have the same distribution. + + Examples + -------- + >>> ht.bincount(ht.arange(5)) + DNDarray([1, 1, 1, 1, 1], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bincount(ht.array([0, 1, 3, 2, 1]), weights=ht.array([0, 0.5, 1, 1.5, 2])) + DNDarray([0.0000, 2.5000, 1.5000, 1.0000], dtype=ht.float32, device=cpu:0, split=None) + +`bucketize(input: heat.core.dndarray.DNDarray, boundaries: heat.core.dndarray.DNDarray | torch.Tensor, out_int32: bool = False, right: bool = False, out: heat.core.dndarray.DNDarray = None) ‑> heat.core.dndarray.DNDarray` +: Returns the indices of the buckets to which each value in the input belongs, where the boundaries of the buckets are set by boundaries. + + Parameters + ---------- + input : DNDarray + The input array. + boundaries : DNDarray or torch.Tensor + monotonically increasing sequence defining the bucket boundaries, 1-dimensional, not distributed + out_int32 : bool, optional + set the dtype of the output to ``ht.int64`` (`False`) or ``ht.int32`` (True) + right : bool, optional + indicate whether the buckets include the right (`False`) or left (`True`) boundaries, see Notes. + out : DNDarray, optional + The output array, must be the shame shape and split as the input array. + + Notes + ----- + This function uses the PyTorch's setting for ``right``: + + ===== ==================================== + right returned index `i` satisfies + ===== ==================================== + False boundaries[i-1] < x <= boundaries[i] + True boundaries[i-1] <= x < boundaries[i] + ===== ==================================== + + Raises + ------ + RuntimeError + If `boundaries` is distributed. + + See Also + -------- + digitize + NumPy-like version of this function. + + Examples + -------- + >>> boundaries = ht.array([1, 3, 5, 7, 9]) + >>> v = ht.array([[3, 6, 9], [3, 6, 9]]) + >>> ht.bucketize(v, boundaries) + DNDarray([[1, 3, 4], + [1, 3, 4]], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.bucketize(v, boundaries, right=True) + DNDarray([[2, 3, 5], + [2, 3, 5]], dtype=ht.int64, device=cpu:0, split=None) + +`cov(m: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray | None = None, rowvar: bool = True, bias: bool = False, ddof: int | None = None) ‑> heat.core.dndarray.DNDarray` +: Estimate the covariance matrix of some data, m. For more imformation on the algorithm please see the numpy function of the same name + + Parameters + ---------- + m : DNDarray + A 1-D or 2-D array containing multiple variables and observations. Each row of ``m`` represents a variable, and each column a single + observation of all those variables. + y : DNDarray, optional + An additional set of variables and observations. ``y`` has the same form as that of ``m``. + rowvar : bool, optional + If ``True`` (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows contain observations. + bias : bool, optional + Default normalization (``False``) is by (N - 1), where N is the number of observations given (unbiased estimate). + If ``True``, then normalization is by N. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by ``bias`` is overridden. Note that ``ddof=1`` will return the unbiased estimate and + ``ddof=0`` will return the simple average. + +`digitize(x: heat.core.dndarray.DNDarray, bins: heat.core.dndarray.DNDarray | torch.Tensor, right: bool = False) ‑> heat.core.dndarray.DNDarray` +: Return the indices of the bins to which each value in the input array `x` belongs. + If values in `x` are beyond the bounds of bins, 0 or len(bins) is returned as appropriate. + + Parameters + ---------- + x : DNDarray + The input array + bins : DNDarray or torch.Tensor + A 1-dimensional array containing a monotonic sequence describing the bin boundaries, not distributed. + right : bool, optional + Indicating whether the intervals include the right or the left bin edge, see Notes. + + Notes + ----- + This function uses NumPy's setting for ``right``: + + ===== ============= ============================ + right order of bins returned index `i` satisfies + ===== ============= ============================ + False increasing bins[i-1] <= x < bins[i] + True increasing bins[i-1] < x <= bins[i] + False decreasing bins[i-1] > x >= bins[i] + True decreasing bins[i-1] >= x > bins[i] + ===== ============= ============================ + + Raises + ------ + RuntimeError + If `bins` is distributed. + + See Also + -------- + bucketize + PyTorch-like version of this function. + + Examples + -------- + >>> x = ht.array([1.2, 10.0, 12.4, 15.5, 20.0]) + >>> bins = ht.array([0, 5, 10, 15, 20]) + >>> ht.digitize(x, bins, right=True) + DNDarray([1, 2, 3, 4, 4], dtype=ht.int64, device=cpu:0, split=None) + >>> ht.digitize(x, bins, right=False) + DNDarray([1, 3, 3, 4, 5], dtype=ht.int64, device=cpu:0, split=None) + +`histc(input: heat.core.dndarray.DNDarray, bins: int = 100, min: int = 0, max: int = 0, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the histogram of a DNDarray. + + The elements are sorted into equal width bins between min and max. + If min and max are both equal, the minimum and maximum values of the data are used. + Elements lower than min and higher than max are ignored. + + Parameters + ---------- + input : DNDarray + the input array, must be of float type + bins : int, optional + number of histogram bins + min : int, optional + lower end of the range (inclusive) + max : int, optional + upper end of the range (inclusive) + out : DNDarray, optional + the output tensor, same dtype as input + + Examples + -------- + >>> ht.histc(ht.array([1.0, 2, 1]), bins=4, min=0, max=3) + DNDarray([0., 2., 1., 0.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.histc(ht.arange(10, dtype=ht.float64, split=0), bins=10) + DNDarray([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], dtype=ht.float64, device=cpu:0, split=None) + +`histogram(a: heat.core.dndarray.DNDarray, bins: int = 10, range: Tuple[int, int] = (0, 0), normed: bool | None = None, weights: heat.core.dndarray.DNDarray | None = None, density: bool | None = None) ‑> heat.core.dndarray.DNDarray` +: Compute the histogram of a DNDarray. + + Parameters + ---------- + a : DNDarray + the input array, must be of float type + bins : int, optional + number of histogram bins + range : Tuple[int,int], optional + lower and upper end of the bins. If not provided, range is simply (a.min(), a.max()). + normed : bool, optional + Deprecated since NumPy version 1.6. TODO: remove. + weights : DNDarray, optional + array of weights. Not implemented yet. + density : bool, optional + Not implemented yet. + + Notes + ----- + This is a wrapper function of :func:`histc` for some basic compatibility with the NumPy API. + + See Also + -------- + :func:`histc` + +`kurtosis(x: heat.core.dndarray.DNDarray, axis: int | None = None, unbiased: bool = True, Fischer: bool = True) ‑> heat.core.dndarray.DNDarray` +: Compute the kurtosis (Fisher or Pearson) of a dataset. + Kurtosis is the fourth central moment divided by the square of the variance. + If Fisher’s definition is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution. + + If unbiased is True (defualt) then the kurtosis is calculated using k statistics to + eliminate bias coming from biased moment estimators + + Parameters + ---------- + x : ht.DNDarray + Input array + axis : NoneType or Int + Axis along which skewness is calculated, Default is to compute over the whole array `x` + unbiased : Bool + if True (default) the calculations are corrected for bias + Fischer : bool + Whether use Fischer's definition or not. If true 3. is subtracted from the result. + + Warnings + -------- + UserWarning: Dependent on the axis given and the split configuration, a UserWarning may be thrown during this function as data is transferred between processes. + +`max(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int, ...] | None = None, out: heat.core.dndarray.DNDarray | None = None, keepdims: bool | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the maximum along a given axis. + + Parameters + ---------- + x : DNDarray + Input array. + axis : None or int or Tuple[int,...], optional + Axis or axes along which to operate. By default, flattened input is used. + If this is a tuple of ints, the maximum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : DNDarray, optional + Tuple of two output arrays ``(max, max_indices)``. Must be of the same shape and buffer length as the expected + output. The minimum value of an output element. Must be present to allow computation on empty slice. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + Examples + -------- + >>> a = ht.float32([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + [10, 11, 12] + ]) + >>> ht.max(a) + DNDarray([12.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.max(a, axis=0) + DNDarray([10., 11., 12.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.max(a, axis=1) + DNDarray([ 3., 6., 9., 12.], dtype=ht.float32, device=cpu:0, split=None) + +`maximum(x1: heat.core.dndarray.DNDarray, x2: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Compares two ``DNDarrays`` and returns a new :class:`~heat.core.dndarray.DNDarray` containing the element-wise maxima. + The ``DNDarrays`` must have the same shape, or shapes that can be broadcast to a single shape. + For broadcasting semantics, see: https://pytorch.org/docs/stable/notes/broadcasting.html + If one of the elements being compared is ``NaN``, then that element is returned. + TODO: Check this: If both elements are NaNs then the first is returned. + The latter distinction is important for complex NaNs, which are defined as at least one of the real or + imaginary parts being ``NaN``. The net effect is that NaNs are propagated. + + Parameters + ---------- + x1 : DNDarray + The first array containing the elements to be compared. + x2 : DNDarray + The second array containing the elements to be compared. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + + Examples + -------- + >>> import heat as ht + >>> a = ht.random.randn(3, 4) + >>> a + DNDarray([[ 0.2701, -0.6993, 1.2197, 0.0579], + [ 0.6815, 0.4722, -0.3947, -0.3030], + [ 1.0101, -1.2460, -1.3953, -0.6879]], dtype=ht.float32, device=cpu:0, split=None) + >>> b = ht.random.randn(3, 4) + >>> b + DNDarray([[ 0.9664, 0.6159, -0.8555, 0.8204], + [-1.2200, -0.0759, 0.0437, 0.4700], + [ 1.2271, 1.0530, 0.1095, 0.8386]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.maximum(a, b) + DNDarray([[0.9664, 0.6159, 1.2197, 0.8204], + [0.6815, 0.4722, 0.0437, 0.4700], + [1.2271, 1.0530, 0.1095, 0.8386]], dtype=ht.float32, device=cpu:0, split=None) + >>> c = ht.random.randn(1, 4) + >>> c + DNDarray([[-0.5363, -0.9765, 0.4099, 0.3520]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.maximum(a, c) + DNDarray([[ 0.2701, -0.6993, 1.2197, 0.3520], + [ 0.6815, 0.4722, 0.4099, 0.3520], + [ 1.0101, -0.9765, 0.4099, 0.3520]], dtype=ht.float32, device=cpu:0, split=None) + >>> d = ht.random.randn(3, 4, 5) + >>> ht.maximum(a, d) + ValueError: operands could not be broadcast, input shapes (3, 4) (3, 4, 5) + +`mean(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int, ...] | None = None) ‑> heat.core.dndarray.DNDarray` +: Calculates and returns the mean of a ``DNDarray``. + If an axis is given, the mean will be taken in that direction. + + Parameters + ---------- + x : DNDarray + Values for which the mean is calculated for. + The dtype of ``x`` must be a float + axis : None or int or iterable + Axis which the mean is taken in. Default ``None`` calculates mean of all data items. + + Notes + ----- + Split semantics when axis is an integer: + + - if ``axis==x.split``, then ``mean(x).split=None`` + + - if ``axis>split``, then ``mean(x).split=x.split`` + + - if ``axis>> a = ht.random.randn(1, 3) + >>> a + DNDarray([[-0.1164, 1.0446, -0.4093]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a) + DNDarray(0.1730, dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[-1.0585, 0.7541, -1.1011, 0.5009], + [-1.3575, 0.3344, 0.4506, 0.7379], + [-0.4337, -0.6516, -1.3690, -0.8772], + [ 0.6929, -1.0989, -0.9961, 0.3547]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a, 1) + DNDarray([-0.2262, 0.0413, -0.8328, -0.2619], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a, 0) + DNDarray([-0.5392, -0.1655, -0.7539, 0.1791], dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[-0.1441, 0.5016, 0.8907, 0.6318], + [-1.1690, -1.2657, 1.4840, -0.1014], + [ 0.4133, 1.4168, 1.3499, 1.0340], + [-0.9236, -0.7535, -0.2466, -0.9703]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.mean(a, (0, 1)) + DNDarray(0.1342, dtype=ht.float32, device=cpu:0, split=None) + +`median(x: heat.core.dndarray.DNDarray, axis: int | None = None, keepdims: bool = False, sketched: bool = False, sketch_size: float | None = 1.0) ‑> heat.core.dndarray.DNDarray` +: Compute the median of the data along the specified axis. + Returns the median of the ``DNDarray`` elements. + Per default, the "true" median of the entire data set is computed; however, the argument + `sketched` allows to switch to a faster but less accurate version that computes + the median only on behalf of a random subset of the data set ("sketch"). + + Parameters + ---------- + x : DNDarray + Input tensor + axis : int, or None, optional + Axis along which the median is computed. Default is ``None``, i.e., + the median is computed along a flattened version of the ``DNDarray``. + + keepdims : bool, optional + If True, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result can broadcast correctly against the original array ``a``. + + sketched : bool, optional + If True, the median is computed on a random subset of the data set ("sketch"). + This is faster but less accurate. Default is False. The size of the sketch is controlled by the argument `sketch_size`. + sketch_size : float, optional + The size of the sketch as a fraction of the data set size. Default is `1./n_proc` where `n_proc` is the number of MPI processes, e.g. `n_proc = MPI.COMM_WORLD.size`. Must be in the range (0, 1). + Ignored for sketched = False. + +`min(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int, ...] | None = None, out: heat.core.dndarray.DNDarray | None = None, keepdims: bool | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the minimum along a given axis. + + Parameters + ---------- + x : DNDarray + Input array. + axis : None or int or Tuple[int,...] + Axis or axes along which to operate. By default, flattened input is used. + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : Tuple[DNDarray,DNDarray], optional + Tuple of two output arrays ``(min, min_indices)``. Must be of the same shape and buffer length as the expected + output. The maximum value of an output element. Must be present to allow computation on empty slice. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result will broadcast correctly against the original array. + + + Examples + -------- + >>> a = ht.float32([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + [10, 11, 12] + ]) + >>> ht.min(a) + DNDarray([1.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.min(a, axis=0) + DNDarray([1., 2., 3.], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.min(a, axis=1) + DNDarray([ 1., 4., 7., 10.], dtype=ht.float32, device=cpu:0, split=None) + +`minimum(x1: heat.core.dndarray.DNDarray, x2: heat.core.dndarray.DNDarray, out: heat.core.dndarray.DNDarray | None = None) ‑> heat.core.dndarray.DNDarray` +: Compares two ``DNDarrays`` and returns a new :class:`~heat.core.dndarray.DNDarray` containing the element-wise minima. + If one of the elements being compared is ``NaN``, then that element is returned. They must have the same shape, + or shapes that can be broadcast to a single shape. For broadcasting semantics, + see: https://pytorch.org/docs/stable/notes/broadcasting.html + TODO: Check this: If both elements are NaNs then the first is returned. + The latter distinction is important for complex NaNs, which are defined as at least one of the real or + imaginary parts being ``NaN``. The net effect is that NaNs are propagated. + + Parameters + ---------- + x1 : DNDarray + The first array containing the elements to be compared. + x2 : DNDarray + The second array containing the elements to be compared. + out : DNDarray, optional + A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. + If not provided or ``None``, a freshly-allocated array is returned. + + Examples + -------- + >>> import heat as ht + >>> a = ht.random.randn(3, 4) + >>> a + DNDarray([[-0.5462, 0.0079, 1.2828, 1.4980], + [ 0.6503, -1.1069, 1.2131, 1.4003], + [-0.3203, -0.2318, 1.0388, 0.4439]], dtype=ht.float32, device=cpu:0, split=None) + >>> b = ht.random.randn(3, 4) + >>> b + DNDarray([[ 1.8505, 2.3055, -0.2825, -1.4718], + [-0.3684, 1.6866, -0.8570, -0.4779], + [ 1.0532, 0.3775, -0.8669, -1.7275]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.minimum(a, b) + DNDarray([[-0.5462, 0.0079, -0.2825, -1.4718], + [-0.3684, -1.1069, -0.8570, -0.4779], + [-0.3203, -0.2318, -0.8669, -1.7275]], dtype=ht.float32, device=cpu:0, split=None) + >>> c = ht.random.randn(1, 4) + >>> c + DNDarray([[-1.4358, 1.2914, -0.6042, -1.4009]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.minimum(a, c) + DNDarray([[-1.4358, 0.0079, -0.6042, -1.4009], + [-1.4358, -1.1069, -0.6042, -1.4009], + [-1.4358, -0.2318, -0.6042, -1.4009]], dtype=ht.float32, device=cpu:0, split=None) + >>> d = ht.random.randn(3, 4, 5) + >>> ht.minimum(a, d) + ValueError: operands could not be broadcast, input shapes (3, 4) (3, 4, 5) + +`percentile(x: heat.core.dndarray.DNDarray, q: heat.core.dndarray.DNDarray | int | float | Tuple | List, axis: int | Tuple[int, ...] | None = None, out: heat.core.dndarray.DNDarray | None = None, interpolation: str = 'linear', keepdims: bool = False, sketched: bool = False, sketch_size: float | None = 1.0) ‑> heat.core.dndarray.DNDarray` +: Compute the q-th percentile of the data along the specified axis/axes. + Returns the q-th percentile(s) of the ``DNDarray`` elements. + Per default, the "true" percentile(s) of the entire data set are computed; however, the argument + `sketched` allows to switch to a faster but inaccurate version that computes + the percentile only on behalf of a random subset of the data set ("sketch"). + + Parameters + ---------- + x : DNDarray + Input tensor + q : DNDarray, scalar, or list of scalars + Percentile or sequence of percentiles to compute. Must belong to the interval [0, 100]. + axis : int, tuple of ints, or None, optional + Axis (if int) or axes (if tuple) along which the percentiles are computed. Default is None, corresponds to calculating the percentile over the flattened array. + out : DNDarray, optional. + Output buffer. + interpolation : str, optional + Interpolation method to use when the desired percentile lies between two data points :math:`i < j`. + Can be one of: + + - ‘linear’: :math:`i + (j - i) \cdot fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. + + - ‘lower’: `i`. + + - ‘higher’: `j`. + + - ‘nearest’: `i` or `j`, whichever is nearest. + + - ‘midpoint’: :math:`(i + j) / 2`. + + keepdims : bool, optional + If True, the axes which are reduced are left in the result as dimensions with size one. + With this option, the result can broadcast correctly against the original array x. + sketched : bool, optional + If False (default), the entire data is used and no sketching is performed. + If True, a fraction of the data to use for estimating the percentile. The fraction is determined by `sketch_size`. + sketch_size : float, optional + The fraction of the data to use for estimating the percentile; needs to be strictly between 0 and 1. + The default is `1/nprocs`, where `nprocs` is the number of MPI processes involved in the calculation, i.e., roughly the portion of the data that is anyway processed on a single process. + Ignored for sketched = False. + +`skew(x: heat.core.dndarray.DNDarray, axis: int = None, unbiased: bool = True) ‑> heat.core.dndarray.DNDarray` +: Compute the sample skewness of a data set. + + Parameters + ---------- + x : ht.DNDarray + Input array + axis : NoneType or Int + Axis along which skewness is calculated, Default is to compute over the whole array `x` + unbiased : Bool + if True (default) the calculations are corrected for bias + + Warnings + -------- + UserWarning: Dependent on the axis given and the split configuration, a UserWarning may be thrown during this function as data is transferred between processes. + +`std(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int] | List[int] = None, ddof: int = 0, **kwargs: object) ‑> heat.core.dndarray.DNDarray` +: Calculates the standard deviation of a ``DNDarray`` with the bessel correction. + If an axis is given, the variance will be taken in that direction. + + Parameters + ---------- + x : DNDarray + array for which the std is calculated for. + The datatype of ``x`` must be a float + axis : None or int or iterable + Axis which the std is taken in. Default ``None`` calculates std of all data items. + ddof : int, optional + Delta Degrees of Freedom: the denominator implicitely used in the calculation is N - ddof, where N + represents the number of elements. If ``ddof=1``, the Bessel correction will be applied. + Setting ``ddof>1`` raises a ``NotImplementedError``. + **kwargs + Extra keyword arguments + + Examples + -------- + >>> a = ht.random.randn(1, 3) + >>> a + DNDarray([[ 0.5714, 0.0048, -0.2942]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.std(a) + DNDarray(0.3590, dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[ 0.8488, 1.2225, 1.2498, -1.4592], + [-0.5820, -0.3928, 0.1509, -0.0174], + [ 0.6426, -1.8149, 0.1369, 0.0042], + [-0.6043, -0.0523, -1.6653, 0.6631]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.std(a, 1, ddof=1) + DNDarray([1.2961, 0.3362, 1.0739, 0.9820], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.std(a, 1) + DNDarray([1.2961, 0.3362, 1.0739, 0.9820], dtype=ht.float32, device=cpu:0, split=None) + +`var(x: heat.core.dndarray.DNDarray, axis: int | Tuple[int] | List[int] = None, ddof: int = 0, **kwargs: object) ‑> heat.core.dndarray.DNDarray` +: Calculates and returns the variance of a ``DNDarray``. If an axis is given, the variance will be + taken in that direction. + + Parameters + ---------- + x : DNDarray + Array for which the variance is calculated for. + The datatype of ``x`` must be a float + axis : None or int or iterable + Axis which the std is taken in. Default ``None`` calculates std of all data items. + ddof : int, optional + Delta Degrees of Freedom: the denominator implicitely used in the calculation is N - ddof, where N + represents the number of elements. If ``ddof=1``, the Bessel correction will be applied. + Setting ``ddof>1`` raises a ``NotImplementedError``. + **kwargs + Extra keyword arguments + + + Notes + ----- + Split semantics when axis is an integer: + + - if ``axis=x.split``, then ``var(x).split=None`` + + - if ``axis>split``, then ``var(x).split = x.split`` + + - if ``axis>> a = ht.random.randn(1, 3) + >>> a + DNDarray([[-2.3589, -0.2073, 0.8806]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a) + DNDarray(1.8119, dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, ddof=1) + DNDarray(2.7179, dtype=ht.float32, device=cpu:0, split=None) + >>> a = ht.random.randn(4, 4) + >>> a + DNDarray([[-0.8523, -1.4982, -0.5848, -0.2554], + [ 0.8458, -0.3125, -0.2430, 1.9016], + [-0.6778, -0.3584, -1.5112, 0.6545], + [-0.9161, 0.0168, 0.0462, 0.5964]], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 1) + DNDarray([0.2777, 1.0957, 0.8015, 0.3936], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 0) + DNDarray([0.7001, 0.4376, 0.4576, 0.7890], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 0, ddof=1) + DNDarray([0.7001, 0.4376, 0.4576, 0.7890], dtype=ht.float32, device=cpu:0, split=None) + >>> ht.var(a, 0, ddof=0) + DNDarray([0.7001, 0.4376, 0.4576, 0.7890], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/stride_tricks.md b/doc/api/heat/core/stride_tricks.md new file mode 100644 index 0000000000..b00281de53 --- /dev/null +++ b/doc/api/heat/core/stride_tricks.md @@ -0,0 +1,166 @@ +Module heat.core.stride_tricks +============================== +A collection of functions used for inferring or correcting things before major computation + +Functions +--------- + +`broadcast_shape(shape_a: Tuple[int, ...], shape_b: Tuple[int, ...]) ‑> Tuple[int, ...]` +: Infers, if possible, the broadcast output shape of two operands a and b. Inspired by stackoverflow post: + https://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape + + Parameters + ---------- + shape_a : Tuple[int,...] + Shape of first operand + shape_b : Tuple[int,...] + Shape of second operand + + Raises + ------ + ValueError + If the two shapes cannot be broadcast. + + Examples + -------- + >>> import heat as ht + >>> ht.core.stride_tricks.broadcast_shape((5, 4), (4,)) + (5, 4) + >>> ht.core.stride_tricks.broadcast_shape((1, 100, 1), (10, 1, 5)) + (10, 100, 5) + >>> ht.core.stride_tricks.broadcast_shape( + ... (8, 1, 6, 1), + ... ( + ... 7, + ... 1, + ... 5, + ... ), + ... ) + (8,7,6,5)) + >>> ht.core.stride_tricks.broadcast_shape((2, 1), (8, 4, 3)) + Traceback (most recent call last): + File "", line 1, in + File "heat/core/stride_tricks.py", line 42, in broadcast_shape + "operands could not be broadcast, input shapes {} {}".format(shape_a, shape_b) + ValueError: operands could not be broadcast, input shapes (2, 1) (8, 4, 3) + +`broadcast_shapes(*shapes: Tuple[int, ...]) ‑> Tuple[int, ...]` +: Infers, if possible, the broadcast output shape of multiple operands. + + Parameters + ---------- + *shapes : Tuple[int,...] + Shapes of operands. + + Returns + ------- + Tuple[int, ...] + The broadcast output shape. + + Raises + ------ + ValueError + If the shapes cannot be broadcast. + + Examples + -------- + >>> import heat as ht + >>> ht.broadcast_shapes((5, 4), (4,)) + (5, 4) + >>> ht.broadcast_shapes((1, 100, 1), (10, 1, 5)) + (10, 100, 5) + >>> ht.broadcast_shapes( + ... (8, 1, 6, 1), + ... ( + ... 7, + ... 1, + ... 5, + ... ), + ... ) + (8,7,6,5)) + >>> ht.broadcast_shapes((2, 1), (8, 4, 3)) + Traceback (most recent call last): + File "", line 1, in + File "heat/core/stride_tricks.py", line 100, in broadcast_shapes + "operands could not be broadcast, input shapes {}".format(shapes)) + ValueError: operands could not be broadcast, input shapes ((2, 1), (8, 4, 3)) + +`sanitize_axis(shape: Tuple[int, ...], axis: int | Tuple[int, ...] | None) ‑> int | Tuple[int, ...] | None` +: Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent + and is checked to be within bounds + + Parameters + ---------- + shape : Tuple[int, ...] + Shape of an array + axis : ints or Tuple[int, ...] or None + The axis to be sanitized + + Raises + ------ + ValueError + if the axis cannot be sanitized, i.e. out of bounds. + TypeError + if the axis is not integral. + + Examples + -------- + >>> import heat as ht + >>> ht.core.stride_tricks.sanitize_axis((5, 4, 4), 1) + 1 + >>> ht.core.stride_tricks.sanitize_axis((5, 4, 4), -1) + 2 + >>> ht.core.stride_tricks.sanitize_axis((5, 4), (1,)) + (1,) + >>> ht.core.stride_tricks.sanitize_axis((5, 4), 1.0) + Traceback (most recent call last): + File "", line 1, in + File "heat/heat/core/stride_tricks.py", line 99, in sanitize_axis + raise TypeError("axis must be None or int or tuple, but was {}".format(type(axis))) + TypeError: axis must be None or int or tuple, but was + +`sanitize_shape(shape: int | Tuple[int, ...], lval: int = 0) ‑> Tuple[int, ...]` +: Verifies and normalizes the given shape. + + Parameters + ---------- + shape : int or Tupe[int,...] + Shape of an array. + lval : int + Lowest legal value + + Raises + ------ + ValueError + If the shape contains illegal values, e.g. negative numbers. + TypeError + If the given shape is neither and int or a sequence of ints. + + Examples + -------- + >>> import heat as ht + >>> ht.core.stride_tricks.sanitize_shape(3) + (3,) + >>> ht.core.stride_tricks.sanitize_shape([1, 2, 3]) + (1, 2, 3,) + >>> ht.core.stride_tricks.sanitize_shape(1.0) + Traceback (most recent call last): + File "", line 1, in + File "heat/heat/core/stride_tricks.py", line 159, in sanitize_shape + raise TypeError("expected sequence object with length >= 0 or a single integer") + TypeError: expected sequence object with length >= 0 or a single integer + +`sanitize_slice(sl: slice, max_dim: int) ‑> slice` +: Remove None-types from a slice + + Parameters + ---------- + sl : slice + slice to adjust + max_dim : int + maximum index for the given slice + + Raises + ------ + TypeError + if sl is not a slice. diff --git a/doc/api/heat/core/tests/index.md b/doc/api/heat/core/tests/index.md new file mode 100644 index 0000000000..687fa5a2b1 --- /dev/null +++ b/doc/api/heat/core/tests/index.md @@ -0,0 +1,32 @@ +Module heat.core.tests +====================== + +Sub-modules +----------- +* heat.core.tests.test_arithmetics +* heat.core.tests.test_communication +* heat.core.tests.test_complex_math +* heat.core.tests.test_constants +* heat.core.tests.test_devices +* heat.core.tests.test_dndarray +* heat.core.tests.test_exponential +* heat.core.tests.test_factories +* heat.core.tests.test_indexing +* heat.core.tests.test_io +* heat.core.tests.test_logical +* heat.core.tests.test_manipulations +* heat.core.tests.test_memory +* heat.core.tests.test_operations +* heat.core.tests.test_printing +* heat.core.tests.test_random +* heat.core.tests.test_relational +* heat.core.tests.test_rounding +* heat.core.tests.test_sanitation +* heat.core.tests.test_signal +* heat.core.tests.test_statistics +* heat.core.tests.test_stride_tricks +* heat.core.tests.test_suites +* heat.core.tests.test_tiling +* heat.core.tests.test_trigonometrics +* heat.core.tests.test_types +* heat.core.tests.test_vmap diff --git a/doc/api/heat/core/tests/test_arithmetics.md b/doc/api/heat/core/tests/test_arithmetics.md new file mode 100644 index 0000000000..6c1e846fcf --- /dev/null +++ b/doc/api/heat/core/tests/test_arithmetics.md @@ -0,0 +1,211 @@ +Module heat.core.tests.test_arithmetics +======================================= + +Classes +------- + +`TestArithmetics(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_add(self)` + : + + `test_add_(self)` + : + + `test_bitwise_and(self)` + : + + `test_bitwise_and_(self)` + : + + `test_bitwise_or(self)` + : + + `test_bitwise_or_(self)` + : + + `test_bitwise_xor(self)` + : + + `test_bitwise_xor_(self)` + : + + `test_copysign(self)` + : + + `test_copysign_(self)` + : + + `test_cumprod(self)` + : + + `test_cumprod_(self)` + : + + `test_cumsum(self)` + : + + `test_cumsum_(self)` + : + + `test_diff(self)` + : + + `test_div(self)` + : + + `test_div_(self)` + : + + `test_divmod(self)` + : + + `test_floordiv(self)` + : + + `test_floordiv_(self)` + : + + `test_fmod(self)` + : + + `test_fmod_(self)` + : + + `test_gcd(self)` + : + + `test_gcd_(self)` + : + + `test_hypot(self)` + : + + `test_hypot_(self)` + : + + `test_invert(self)` + : + + `test_invert_(self)` + : + + `test_lcm(self)` + : + + `test_lcm_(self)` + : + + `test_left_shift(self)` + : + + `test_left_shift_(self)` + : + + `test_mul(self)` + : + + `test_mul_(self)` + : + + `test_nan_to_num(self)` + : + + `test_nan_to_num_(self)` + : + + `test_nanprod(self)` + : + + `test_nansum(self)` + : + + `test_neg(self)` + : + + `test_neg_(self)` + : + + `test_pos(self)` + : + + `test_pow(self)` + : + + `test_pow_(self)` + : + + `test_prod(self)` + : + + `test_remainder(self)` + : + + `test_remainder_(self)` + : + + `test_right_hand_side_operations(self)` + : This test ensures that for each arithmetic operation (e.g. +, -, *, ...) that is implemented + in the tensor class, it works both ways. + + Examples + -------- + >>> import heat as ht + >>> T = ht.float32([[1., 2.], [3., 4.]]) + >>> assert T * 3 == 3 * T + + `test_right_shift(self)` + : + + `test_right_shift_(self)` + : + + `test_sub(self)` + : + + `test_sub_(self)` + : + + `test_sum(self)` + : diff --git a/doc/api/heat/core/tests/test_communication.md b/doc/api/heat/core/tests/test_communication.md new file mode 100644 index 0000000000..39a1417684 --- /dev/null +++ b/doc/api/heat/core/tests/test_communication.md @@ -0,0 +1,165 @@ +Module heat.core.tests.test_communication +========================================= + +Classes +------- + +`TestCommunication(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_allgather(self)` + : + + `test_allgatherv(self)` + : + + `test_allgathervSorting(self)` + : + + `test_allreduce(self)` + : + + `test_alltoall(self)` + : + + `test_alltoallSorting(self)` + : + + `test_alltoallv(self)` + : + + `test_bcast(self)` + : + + `test_contiguous_memory_buffer(self)` + : + + `test_cuda_aware_mpi(self)` + : + + `test_default_comm(self)` + : + + `test_exscan(self)` + : + + `test_gather(self)` + : + + `test_gatherv(self)` + : + + `test_iallgather(self)` + : + + `test_iallgatherv(self)` + : + + `test_iallreduce(self)` + : + + `test_ialltoall(self)` + : + + `test_ialltoallv(self)` + : + + `test_ibcast(self)` + : + + `test_iexscan(self)` + : + + `test_igather(self)` + : + + `test_igatherv(self)` + : + + `test_ireduce(self)` + : + + `test_iscan(self)` + : + + `test_iscatter(self)` + : + + `test_iscatterv(self)` + : + + `test_largecount_workaround_Allreduce(self)` + : + + `test_largecount_workaround_IsendRecv(self)` + : + + `test_mpi_communicator(self)` + : + + `test_mpi_in_place(self)` + : + + `test_non_contiguous_memory_buffer(self)` + : + + `test_reduce(self)` + : + + `test_scan(self)` + : + + `test_scatter(self)` + : + + `test_scatter_like_axes(self)` + : + + `test_scatterv(self)` + : + + `test_self_communicator(self)` + : + + `test_split(self)` + : diff --git a/doc/api/heat/core/tests/test_complex_math.md b/doc/api/heat/core/tests/test_complex_math.md new file mode 100644 index 0000000000..5d0d77d0c8 --- /dev/null +++ b/doc/api/heat/core/tests/test_complex_math.md @@ -0,0 +1,63 @@ +Module heat.core.tests.test_complex_math +======================================== + +Classes +------- + +`TestComplex(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_abs(self)` + : + + `test_angle(self)` + : + + `test_conjugate(self)` + : + + `test_imag(self)` + : + + `test_real(self)` + : diff --git a/doc/api/heat/core/tests/test_constants.md b/doc/api/heat/core/tests/test_constants.md new file mode 100644 index 0000000000..9a365ae6ce --- /dev/null +++ b/doc/api/heat/core/tests/test_constants.md @@ -0,0 +1,51 @@ +Module heat.core.tests.test_constants +===================================== + +Classes +------- + +`TestConstants(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_constants(self)` + : diff --git a/doc/api/heat/core/tests/test_devices.md b/doc/api/heat/core/tests/test_devices.md new file mode 100644 index 0000000000..16b051780d --- /dev/null +++ b/doc/api/heat/core/tests/test_devices.md @@ -0,0 +1,66 @@ +Module heat.core.tests.test_devices +=================================== + +Classes +------- + +`TestDevices(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_get_default_device_cpu(self)` + : + + `test_get_default_device_gpu(self)` + : + + `test_sanitize_device_cpu(self)` + : + + `test_sanitize_device_gpu(self)` + : + + `test_set_default_device_cpu(self)` + : + + `test_set_default_device_gpu(self)` + : diff --git a/doc/api/heat/core/tests/test_dndarray.md b/doc/api/heat/core/tests/test_dndarray.md new file mode 100644 index 0000000000..c783c6fb6b --- /dev/null +++ b/doc/api/heat/core/tests/test_dndarray.md @@ -0,0 +1,159 @@ +Module heat.core.tests.test_dndarray +==================================== + +Classes +------- + +`TestDNDarray(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_and(self)` + : + + `test_array(self)` + : + + `test_array_function(self)` + : + + `test_array_ufunc(self)` + : + + `test_astype(self)` + : + + `test_balance_and_lshape_map(self)` + : + + `test_bool_cast(self)` + : + + `test_collect(self)` + : + + `test_complex_cast(self)` + : + + `test_counts_displs(self)` + : + + `test_fill_diagonal(self)` + : + + `test_flatten(self)` + : + + `test_float_cast(self)` + : + + `test_gethalo(self)` + : + + `test_int_cast(self)` + : + + `test_invert(self)` + : + + `test_is_balanced(self)` + : + + `test_is_distributed(self)` + : + + `test_item(self)` + : + + `test_larray(self)` + : + + `test_len(self)` + : + + `test_lloc(self)` + : + + `test_lnbytes(self)` + : + + `test_nbytes(self)` + : + + `test_ndim(self)` + : + + `test_numpy(self)` + : + + `test_or(self)` + : + + `test_partitioned(self)` + : + + `test_redistribute(self)` + : + + `test_resplit(self)` + : + + `test_setitem_getitem(self)` + : + + `test_size_gnumel(self)` + : + + `test_stride_and_strides(self)` + : + + `test_tolist(self)` + : + + `test_torch_function(self)` + : + + `test_torch_proxy(self)` + : + + `test_xor(self)` + : diff --git a/doc/api/heat/core/tests/test_exponential.md b/doc/api/heat/core/tests/test_exponential.md new file mode 100644 index 0000000000..c0f3a2c442 --- /dev/null +++ b/doc/api/heat/core/tests/test_exponential.md @@ -0,0 +1,90 @@ +Module heat.core.tests.test_exponential +======================================= + +Classes +------- + +`TestExponential(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `set_torch_dtype(self)` + : + + `test_exp(self)` + : + + `test_exp2(self)` + : + + `test_expm1(self)` + : + + `test_log(self)` + : + + `test_log10(self)` + : + + `test_log1p(self)` + : + + `test_log2(self)` + : + + `test_logaddexp(self)` + : + + `test_logaddexp2(self)` + : + + `test_sqrt(self)` + : + + `test_sqrt_method(self)` + : + + `test_sqrt_out_of_place(self)` + : + + `test_square(self)` + : diff --git a/doc/api/heat/core/tests/test_factories.md b/doc/api/heat/core/tests/test_factories.md new file mode 100644 index 0000000000..60c3d28dc7 --- /dev/null +++ b/doc/api/heat/core/tests/test_factories.md @@ -0,0 +1,99 @@ +Module heat.core.tests.test_factories +===================================== + +Classes +------- + +`TestFactories(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_arange(self)` + : + + `test_array(self)` + : + + `test_asarray(self)` + : + + `test_empty(self)` + : + + `test_empty_like(self)` + : + + `test_eye(self)` + : + + `test_from_partition_dict(self)` + : + + `test_from_partitioned(self)` + : + + `test_full(self)` + : + + `test_full_like(self)` + : + + `test_linspace(self)` + : + + `test_logspace(self)` + : + + `test_meshgrid(self)` + : + + `test_ones(self)` + : + + `test_ones_like(self)` + : + + `test_zeros(self)` + : + + `test_zeros_like(self)` + : diff --git a/doc/api/heat/core/tests/test_indexing.md b/doc/api/heat/core/tests/test_indexing.md new file mode 100644 index 0000000000..dee0e66287 --- /dev/null +++ b/doc/api/heat/core/tests/test_indexing.md @@ -0,0 +1,54 @@ +Module heat.core.tests.test_indexing +==================================== + +Classes +------- + +`TestIndexing(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_nonzero(self)` + : + + `test_where(self)` + : diff --git a/doc/api/heat/core/tests/test_io.md b/doc/api/heat/core/tests/test_io.md new file mode 100644 index 0000000000..24e3f10c43 --- /dev/null +++ b/doc/api/heat/core/tests/test_io.md @@ -0,0 +1,141 @@ +Module heat.core.tests.test_io +============================== + +Classes +------- + +`TestIO(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `tearDown(self)` + : Hook method for deconstructing the test fixture after testing it. + + `test_load(self)` + : + + `test_load_csv(self)` + : + + `test_load_exception(self)` + : + + `test_load_hdf5(self)` + : + + `test_load_hdf5_exception(self)` + : + + `test_load_multiple_csv(self)` + : + + `test_load_multiple_csv_exception(self)` + : + + `test_load_netcdf(self)` + : + + `test_load_netcdf_exception(self)` + : + + `test_load_npy_exception(self)` + : + + `test_load_npy_float(self)` + : + + `test_load_npy_int(self)` + : + + `test_load_partial_hdf5(self)` + : + + `test_load_zarr(self)` + : + + `test_load_zarr_arguments(self)` + : + + `test_load_zarr_group(self)` + : + + `test_load_zarr_slice(self)` + : + + `test_save(self)` + : + + `test_save_csv(self)` + : + + `test_save_exception(self)` + : + + `test_save_hdf5(self)` + : + + `test_save_hdf5_exception(self)` + : + + `test_save_netcdf(self)` + : + + `test_save_netcdf_exception(self)` + : + + `test_save_zarr_1d_split_0(self)` + : + + `test_save_zarr_2d_split0(self)` + : + + `test_save_zarr_2d_split1(self)` + : + + `test_save_zarr_arguments(self)` + : + + `test_save_zarr_split_none(self)` + : + + `test_size_from_slice(self)` + : diff --git a/doc/api/heat/core/tests/test_logical.md b/doc/api/heat/core/tests/test_logical.md new file mode 100644 index 0000000000..9126759cf9 --- /dev/null +++ b/doc/api/heat/core/tests/test_logical.md @@ -0,0 +1,90 @@ +Module heat.core.tests.test_logical +=================================== + +Classes +------- + +`TestLogical(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_all(self)` + : + + `test_allclose(self)` + : + + `test_any(self)` + : + + `test_isclose(self)` + : + + `test_isfinite(self)` + : + + `test_isinf(self)` + : + + `test_isnan(self)` + : + + `test_isneginf(self)` + : + + `test_isposinf(self)` + : + + `test_logical_and(self)` + : + + `test_logical_not(self)` + : + + `test_logical_or(self)` + : + + `test_logical_xor(self)` + : + + `test_signbit(self)` + : diff --git a/doc/api/heat/core/tests/test_manipulations.md b/doc/api/heat/core/tests/test_manipulations.md new file mode 100644 index 0000000000..bb27bf841a --- /dev/null +++ b/doc/api/heat/core/tests/test_manipulations.md @@ -0,0 +1,156 @@ +Module heat.core.tests.test_manipulations +========================================= + +Classes +------- + +`TestManipulations(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_broadcast_arrays(self)` + : + + `test_collect(self)` + : + + `test_column_stack(self)` + : + + `test_concatenate(self)` + : + + `test_diag(self)` + : + + `test_diagonal(self)` + : + + `test_dsplit(self)` + : + + `test_expand_dims(self)` + : + + `test_flatten(self)` + : + + `test_flip(self)` + : + + `test_fliplr(self)` + : + + `test_flipud(self)` + : + + `test_hsplit(self)` + : + + `test_hstack(self)` + : + + `test_moveaxis(self)` + : + + `test_pad(self)` + : + + `test_ravel(self)` + : + + `test_repeat(self)` + : + + `test_reshape(self)` + : + + `test_resplit(self)` + : + + `test_roll(self)` + : + + `test_rot90(self)` + : + + `test_row_stack(self)` + : + + `test_shape(self)` + : + + `test_sort(self)` + : + + `test_split(self)` + : + + `test_squeeze(self)` + : + + `test_stack(self)` + : + + `test_swapaxes(self)` + : + + `test_tile(self)` + : + + `test_topk(self)` + : + + `test_unfold(self)` + : + + `test_unique(self)` + : + + `test_vsplit(self)` + : + + `test_vstack(self)` + : + + `tests_broadcast_to(self)` + : diff --git a/doc/api/heat/core/tests/test_memory.md b/doc/api/heat/core/tests/test_memory.md new file mode 100644 index 0000000000..d8249a25a9 --- /dev/null +++ b/doc/api/heat/core/tests/test_memory.md @@ -0,0 +1,54 @@ +Module heat.core.tests.test_memory +================================== + +Classes +------- + +`TestMemory(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_copy(self)` + : + + `test_sanitize_memory_layout(self)` + : diff --git a/doc/api/heat/core/tests/test_operations.md b/doc/api/heat/core/tests/test_operations.md new file mode 100644 index 0000000000..faeb67def2 --- /dev/null +++ b/doc/api/heat/core/tests/test_operations.md @@ -0,0 +1,51 @@ +Module heat.core.tests.test_operations +====================================== + +Classes +------- + +`TestOperations(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test___binary_bit_op_broadcast(self)` + : diff --git a/doc/api/heat/core/tests/test_printing.md b/doc/api/heat/core/tests/test_printing.md new file mode 100644 index 0000000000..eb222f2f67 --- /dev/null +++ b/doc/api/heat/core/tests/test_printing.md @@ -0,0 +1,163 @@ +Module heat.core.tests.test_printing +==================================== + +Classes +------- + +`TestPrinting(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `setUp(self)` + : Hook method for setting up the test fixture before exercising it. + + `tearDown(self)` + : Hook method for deconstructing the test fixture after testing it. + + `test___repr__(self)` + : + + `test_empty(self)` + : + + `test_get_default_options(self)` + : + + `test_local_printing(self)` + : + + `test_scalar(self)` + : + + `test_set_get_edgeitems(self)` + : + + `test_set_get_full_options(self)` + : + + `test_set_get_linewidth(self)` + : + + `test_set_get_precision(self)` + : + + `test_set_get_sci_mode(self)` + : + + `test_set_get_short_options(self)` + : + + `test_set_get_threshold(self)` + : + + `test_split_0_above_threshold(self)` + : + + `test_split_0_below_threshold(self)` + : + + `test_split_1_above_threshold(self)` + : + + `test_split_1_below_threshold(self)` + : + + `test_split_2_above_threshold(self)` + : + + `test_split_2_below_threshold(self)` + : + + `test_unbalanced(self)` + : + + `test_unsplit_above_threshold(self)` + : + + `test_unsplit_below_threshold(self)` + : + +`TestPrintingGPU(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_print_GPU(self)` + : diff --git a/doc/api/heat/core/tests/test_random.md b/doc/api/heat/core/tests/test_random.md new file mode 100644 index 0000000000..f879544e06 --- /dev/null +++ b/doc/api/heat/core/tests/test_random.md @@ -0,0 +1,145 @@ +Module heat.core.tests.test_random +================================== + +Classes +------- + +`TestRandom_Batchparallel(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_default(self)` + : + + `test_normal(self)` + : + + `test_permutation(self)` + : + + `test_rand(self)` + : + + `test_randint(self)` + : + + `test_randn(self)` + : + + `test_random_sample(self)` + : + + `test_randperm(self)` + : + + `test_set_state(self)` + : + + `test_standard_normal(self)` + : + +`TestRandom_Threefry(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_normal(self)` + : + + `test_permutation(self)` + : + + `test_rand(self)` + : + + `test_randint(self)` + : + + `test_randn(self)` + : + + `test_randperm(self)` + : + + `test_setting_threefry(self)` + : + + `test_standard_normal(self)` + : diff --git a/doc/api/heat/core/tests/test_relational.md b/doc/api/heat/core/tests/test_relational.md new file mode 100644 index 0000000000..89cc031e01 --- /dev/null +++ b/doc/api/heat/core/tests/test_relational.md @@ -0,0 +1,69 @@ +Module heat.core.tests.test_relational +====================================== + +Classes +------- + +`TestRelational(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_eq(self)` + : + + `test_equal(self)` + : + + `test_ge(self)` + : + + `test_gt(self)` + : + + `test_le(self)` + : + + `test_lt(self)` + : + + `test_ne(self)` + : diff --git a/doc/api/heat/core/tests/test_rounding.md b/doc/api/heat/core/tests/test_rounding.md new file mode 100644 index 0000000000..f0ff56cb13 --- /dev/null +++ b/doc/api/heat/core/tests/test_rounding.md @@ -0,0 +1,75 @@ +Module heat.core.tests.test_rounding +==================================== + +Classes +------- + +`TestRounding(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_abs(self)` + : + + `test_ceil(self)` + : + + `test_clip(self)` + : + + `test_floor(self)` + : + + `test_modf(self)` + : + + `test_round(self)` + : + + `test_sgn(self)` + : + + `test_sign(self)` + : + + `test_trunc(self)` + : diff --git a/doc/api/heat/core/tests/test_sanitation.md b/doc/api/heat/core/tests/test_sanitation.md new file mode 100644 index 0000000000..299919f41a --- /dev/null +++ b/doc/api/heat/core/tests/test_sanitation.md @@ -0,0 +1,63 @@ +Module heat.core.tests.test_sanitation +====================================== + +Classes +------- + +`TestSanitation(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `sanitize_in_nd_realfloating(self)` + : + + `test_sanitize_in(self)` + : + + `test_sanitize_out(self)` + : + + `test_sanitize_sequence(self)` + : + + `test_scalar_to_1d(self)` + : diff --git a/doc/api/heat/core/tests/test_signal.md b/doc/api/heat/core/tests/test_signal.md new file mode 100644 index 0000000000..d3cfd1f280 --- /dev/null +++ b/doc/api/heat/core/tests/test_signal.md @@ -0,0 +1,81 @@ +Module heat.core.tests.test_signal +================================== + +Classes +------- + +`TestSignal(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `assert_convolution_stride(self, signal, kernel, mode, stride, solution)` + : + + `test_convolution_stride_kernel_size_1(self)` + : + + `test_convolution_stride_large_signal_and_kernel_modes(self)` + : + + `test_convolve(self)` + : + + `test_convolve_stride_batch_convolutions(self)` + : + + `test_convolve_stride_errors(self)` + : + + `test_convolve_stride_kernel_even_mode_full(self)` + : + + `test_convolve_stride_kernel_even_mode_valid(self)` + : + + `test_convolve_stride_kernel_odd_mode_full(self)` + : + + `test_convolve_stride_kernel_odd_mode_valid(self)` + : + + `test_only_balanced_kernel(self)` + : diff --git a/doc/api/heat/core/tests/test_statistics.md b/doc/api/heat/core/tests/test_statistics.md new file mode 100644 index 0000000000..8f82d9a7f9 --- /dev/null +++ b/doc/api/heat/core/tests/test_statistics.md @@ -0,0 +1,105 @@ +Module heat.core.tests.test_statistics +====================================== + +Classes +------- + +`TestStatistics(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_argmax(self)` + : + + `test_argmin(self)` + : + + `test_average(self)` + : + + `test_bincount(self)` + : + + `test_bucketize(self)` + : + + `test_cov(self)` + : + + `test_digitize(self)` + : + + `test_histc(self)` + : + + `test_kurtosis(self)` + : + + `test_max(self)` + : + + `test_maximum(self)` + : + + `test_mean(self)` + : + + `test_min(self)` + : + + `test_minimum(self)` + : + + `test_percentile(self)` + : + + `test_percentile_sketched(self)` + : + + `test_skew(self)` + : + + `test_std(self)` + : + + `test_var(self)` + : diff --git a/doc/api/heat/core/tests/test_stride_tricks.md b/doc/api/heat/core/tests/test_stride_tricks.md new file mode 100644 index 0000000000..7e4880d648 --- /dev/null +++ b/doc/api/heat/core/tests/test_stride_tricks.md @@ -0,0 +1,63 @@ +Module heat.core.tests.test_stride_tricks +========================================= + +Classes +------- + +`TestStrideTricks(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_broadcast_shape(self)` + : + + `test_broadcast_shapes(self)` + : + + `test_sanitize_axis(self)` + : + + `test_sanitize_shape(self)` + : + + `test_sanitize_slice(self)` + : diff --git a/doc/api/heat/core/tests/test_suites/basic_test.md b/doc/api/heat/core/tests/test_suites/basic_test.md new file mode 100644 index 0000000000..99a09e57c8 --- /dev/null +++ b/doc/api/heat/core/tests/test_suites/basic_test.md @@ -0,0 +1,293 @@ +Module heat.core.tests.test_suites.basic_test +============================================= + +Classes +------- + +`TestCase(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * unittest.case.TestCase + + ### Descendants + + * heat.classification.tests.test_knn.TestKNN + * heat.cluster.tests.test_batchparallelclustering.TestAuxiliaryFunctions + * heat.cluster.tests.test_batchparallelclustering.TestBatchParallelKCluster + * heat.cluster.tests.test_kmeans.TestKMeans + * heat.cluster.tests.test_kmedians.TestKMedians + * heat.cluster.tests.test_kmedoids.TestKMeans + * heat.cluster.tests.test_spectral.TestSpectral + * heat.core.linalg.tests.test_basics.TestLinalgBasics + * heat.core.linalg.tests.test_eigh.TestEigh + * heat.core.linalg.tests.test_polar.TestZolopolar + * heat.core.linalg.tests.test_qr.TestQR + * heat.core.linalg.tests.test_solver.TestSolver + * heat.core.linalg.tests.test_svd.TestTallSkinnySVD + * heat.core.linalg.tests.test_svd.TestZoloSVD + * heat.core.linalg.tests.test_svdtools.TestHSVD + * heat.core.linalg.tests.test_svdtools.TestISVD + * heat.core.linalg.tests.test_svdtools.TestRSVD + * heat.core.tests.test_arithmetics.TestArithmetics + * heat.core.tests.test_communication.TestCommunication + * heat.core.tests.test_complex_math.TestComplex + * heat.core.tests.test_constants.TestConstants + * heat.core.tests.test_devices.TestDevices + * heat.core.tests.test_dndarray.TestDNDarray + * heat.core.tests.test_exponential.TestExponential + * heat.core.tests.test_factories.TestFactories + * heat.core.tests.test_indexing.TestIndexing + * heat.core.tests.test_io.TestIO + * heat.core.tests.test_logical.TestLogical + * heat.core.tests.test_manipulations.TestManipulations + * heat.core.tests.test_memory.TestMemory + * heat.core.tests.test_operations.TestOperations + * heat.core.tests.test_printing.TestPrinting + * heat.core.tests.test_printing.TestPrintingGPU + * heat.core.tests.test_random.TestRandom_Batchparallel + * heat.core.tests.test_random.TestRandom_Threefry + * heat.core.tests.test_relational.TestRelational + * heat.core.tests.test_rounding.TestRounding + * heat.core.tests.test_sanitation.TestSanitation + * heat.core.tests.test_signal.TestSignal + * heat.core.tests.test_statistics.TestStatistics + * heat.core.tests.test_stride_tricks.TestStrideTricks + * heat.core.tests.test_suites.test_basic_test.TestBasicTest + * heat.core.tests.test_tiling.TestSplitTiles + * heat.core.tests.test_tiling.TestSquareDiagTiles + * heat.core.tests.test_trigonometrics.TestTrigonometrics + * heat.core.tests.test_types.TestTypeConversion + * heat.core.tests.test_types.TestTypes + * heat.core.tests.test_vmap.TestVmap + * heat.decomposition.tests.test_dmd.TestDMD + * heat.decomposition.tests.test_dmd.TestDMDc + * heat.decomposition.tests.test_pca.TestIncrementalPCA + * heat.decomposition.tests.test_pca.TestPCA + * heat.fft.tests.test_fft.TestFFT + * heat.graph.tests.test_laplacian.TestLaplacian + * heat.naive_bayes.tests.test_gaussiannb.TestGaussianNB + * heat.optim.tests.test_dp_optimizer.TestDASO + * heat.optim.tests.test_optim.TestLRScheduler + * heat.optim.tests.test_optim.TestOptim + * heat.optim.tests.test_utils.TestUtils + * heat.preprocessing.tests.test_preprocessing.TestMaxAbsScaler + * heat.preprocessing.tests.test_preprocessing.TestMinMaxScaler + * heat.preprocessing.tests.test_preprocessing.TestNormalizer + * heat.preprocessing.tests.test_preprocessing.TestRobustScaler + * heat.preprocessing.tests.test_preprocessing.TestStandardScaler + * heat.regression.tests.test_lasso.TestLasso + * heat.sparse.tests.test_arithmetics_csr.TestArithmeticsCSR + * heat.sparse.tests.test_dcscmatrix.TestDCSC_matrix + * heat.sparse.tests.test_dcsrmatrix.TestDCSR_matrix + * heat.sparse.tests.test_factories.TestFactories + * heat.sparse.tests.test_manipulations.TestManipulations + * heat.spatial.tests.test_distances.TestDistances + * heat.utils.data.tests.test_matrixgallery.TestMatrixgallery + * heat.utils.data.tests.test_spherical.TestCreateClusters + + ### Class variables + + `device: heat.core.devices.Device` + : + + `envar: str | None` + : + + `other_device: heat.core.devices.Device | None` + : + + ### Static methods + + `get_hostnames() ‑> list[str]` + : + + `setUpClass() ‑> None` + : Read the environment variable 'HEAT_TEST_USE_DEVICE' and return the requested devices. + Supported values + - cpu: Use CPU only (default) + - gpu: Use GPU only + + Raises + ------ + RuntimeError if value of 'HEAT_TEST_USE_DEVICE' is not recognized + + ### Instance variables + + `comm: heat.core.communication.MPICommunication` + : + + ### Methods + + `assertTrue_memory_layout(self, tensor: heat.core.dndarray.DNDarray, order: str) ‑> None` + : Checks that the memory layout of a given heat tensor is as specified by argument order. + + Parameters + ---------- + order: str, 'C' for C-like (row-major), 'F' for Fortran-like (column-major) memory layout. + + `assert_array_equal(self, heat_array: heat.core.dndarray.DNDarray, expected_array: numpy.ndarray | torch.Tensor, rtol: float = 1e-05, atol: float = 1e-08) ‑> None` + : Check if the heat_array is equivalent to the expected_array. Therefore first the split heat_array is compared to + the corresponding expected_array slice locally and second the heat_array is combined and fully compared with the + expected_array. + Note if the heat array is split it also needs to be balanced. + + Parameters + ---------- + heat_array: heat.DNDarray + The heat array which should be checked. + expected_array: numpy.ndarray or torch.Tensor + The array against which the heat_array should be checked. + + Raises + ------ + AssertionError if the arrays do not equal. + + Examples + -------- + >>> import numpy as np + >>> import heat as ht + >>> a = ht.ones((5, 5), split=1, dtype=ht.int32) + >>> b = np.ones((5, 5), dtype=np.int32) + >>> self.assert_array_equal(a, b) + + >>> c = np.ones((5, 5), dtype=np.int64) + >>> self.assert_array_equal(a, c) + AssertionError: [...] + >>> c = np.zeros((5, 5), dtype=np.int32) + >>> self.assert_array_equal(a, c) + AssertionError: [...] + + `assert_func_equal(self, shape: tuple[typing.Any, ...] | list[typing.Any], heat_func: Callable[..., Any], numpy_func: Callable[..., Any], distributed_result: bool = True, heat_args: dict[str, typing.Any] | None = None, numpy_args: dict[str, typing.Any] | None = None, data_types: tuple[type, ...] = (, , , ), low: int = -10000, high: int = 10000) ‑> None` + : This function will create random tensors of the given shape with different data types. + All of these tensors will be tested with `ht.assert_func_equal_for_tensor`. + + Parameters + ---------- + shape: tuple or list + The shape of which a random tensors will be created and tested against + heat_func: function + The function that is to be tested + numpy_func: function + The numpy implementation of an equivalent function to test against + heat_args: dictionary, optional + The keyword arguments that will be passed to the heat function. Array and split function don't need to be + specified. Default is {}. + numpy_args: dictionary, optional + The keyword arguments that will be passed to the numpy function. Array doesn't need to be specified. + Default is {}. + distributed_result: bool, optional + Specify whether the result of the heat function is distributed across all nodes or all nodes have the full + result. Default is True. + data_types: list of numpy dtypes, optional + Tensors with all of these dtypes will be created and tested. Each type must to be a numpy dtype. + Default is [numpy.int32, numpy.int64, numpy.float32, numpy.float64] + low: int, optional + In case one of the data_types has integer types, this is the lower bound for the random values. + Default is -10000 + high: int, optional + In case one of the data_types has integer types, this is the upper bound for the random values. + Default is 10000 + + Raises + ------ + AssertionError if the functions do not perform equally. + + Examples + -------- + >>> import numpy as np + >>> import heat as ht + >>> self.assert_func_equal((2, 2), ht.exp, np.exp) + + >>> self.assert_func_equal((2, 2), ht.exp, np.log) + AssertionError: [...] + >>> self.assert_func_equal((1, 3, 5), ht.any, np.any, distributed_result=False) + + >>> heat_args = {"sorted": True, "axis": 0} + >>> numpy_args = {"axis": 0} + >>> self.assert_func_equal( + ... [5, 5, 5, 5], ht.unique, np.unique, heat_arg=heat_args, numpy_args=numpy_args + ... ) + + `assert_func_equal_for_tensor(self, tensor: numpy.ndarray | torch.Tensor, heat_func: Callable[..., Any], numpy_func: Callable[..., Any], heat_args: dict[str, typing.Any] | None = None, numpy_args: dict[str, typing.Any] | None = None, distributed_result: bool = True) ‑> None` + : This function tests if the heat function and the numpy function create the equal result on the given tensor. + + Parameters + ---------- + tensor: torch.Tensor or numpy.ndarray + The tensor on which the heat function will be executed. + heat_func: function + The function that is to be tested + numpy_func: function + The numpy implementation of an equivalent function to test against + heat_args: dictionary, optional + The keyword arguments that will be passed to the heat function. Array and split function don't need to be + specified. Default is {}. + numpy_args: dictionary, optional + The keyword arguments that will be passed to the numpy function. Array doesn't need to be specified. + Default is {}. + distributed_result: bool, optional + Specify whether the result of the heat function is distributed across all nodes or all nodes have the full + result. Default is True. + + Raises + ------ + AssertionError if the functions to not perform equally. + + Examples + -------- + >>> import numpy as np + >>> import heat as ht + >>> a = np.arange(10) + >>> self.assert_func_equal_for_tensor(a, ht.exp, np.exp) + + >>> self.assert_func_equal_for_tensor(a, ht.exp, np.log) + AssertionError: [...] + >>> self.assert_func_equal_for_tensor(a, ht.any, np.any, distributed_result=False) + + >>> a = torch.ones([5, 5, 5, 5]) + >>> heat_args = {"sorted": True, "axis": 0} + >>> numpy_args = {"axis": 0} + >>> self.assert_func_equal_for_tensor( + ... a, ht.unique, np.unique, heat_arg=heat_args, numpy_args=numpy_args + ... ) + + `get_rank(self) ‑> int | None` + : + + `get_size(self) ‑> int | None` + : diff --git a/doc/api/heat/core/tests/test_suites/index.md b/doc/api/heat/core/tests/test_suites/index.md new file mode 100644 index 0000000000..e27bb799f2 --- /dev/null +++ b/doc/api/heat/core/tests/test_suites/index.md @@ -0,0 +1,7 @@ +Module heat.core.tests.test_suites +================================== + +Sub-modules +----------- +* heat.core.tests.test_suites.basic_test +* heat.core.tests.test_suites.test_basic_test diff --git a/doc/api/heat/core/tests/test_suites/test_basic_test.md b/doc/api/heat/core/tests/test_suites/test_basic_test.md new file mode 100644 index 0000000000..5843a829ca --- /dev/null +++ b/doc/api/heat/core/tests/test_suites/test_basic_test.md @@ -0,0 +1,60 @@ +Module heat.core.tests.test_suites.test_basic_test +================================================== + +Classes +------- + +`TestBasicTest(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_assertTrue_memory_layout(self)` + : + + `test_assert_array_equal(self)` + : + + `test_assert_func_equal(self)` + : + + `test_assert_func_equal_for_tensor(self)` + : diff --git a/doc/api/heat/core/tests/test_tiling.md b/doc/api/heat/core/tests/test_tiling.md new file mode 100644 index 0000000000..d4be234e8d --- /dev/null +++ b/doc/api/heat/core/tests/test_tiling.md @@ -0,0 +1,95 @@ +Module heat.core.tests.test_tiling +================================== + +Classes +------- + +`TestSplitTiles(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_misc_coverage(self)` + : + + `test_raises(self)` + : + +`TestSquareDiagTiles(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase diff --git a/doc/api/heat/core/tests/test_trigonometrics.md b/doc/api/heat/core/tests/test_trigonometrics.md new file mode 100644 index 0000000000..0040a4600d --- /dev/null +++ b/doc/api/heat/core/tests/test_trigonometrics.md @@ -0,0 +1,99 @@ +Module heat.core.tests.test_trigonometrics +========================================== + +Classes +------- + +`TestTrigonometrics(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_acosh(self)` + : + + `test_arccos(self)` + : + + `test_arcsin(self)` + : + + `test_arctan(self)` + : + + `test_arctan2(self)` + : + + `test_asinh(self)` + : + + `test_atanh(self)` + : + + `test_cos(self)` + : + + `test_cosh(self)` + : + + `test_deg2rad(self)` + : + + `test_degrees(self)` + : + + `test_rad2deg(self)` + : + + `test_radians(self)` + : + + `test_sin(self)` + : + + `test_sinh(self)` + : + + `test_tan(self)` + : + + `test_tanh(self)` + : diff --git a/doc/api/heat/core/tests/test_types.md b/doc/api/heat/core/tests/test_types.md new file mode 100644 index 0000000000..33117e1662 --- /dev/null +++ b/doc/api/heat/core/tests/test_types.md @@ -0,0 +1,181 @@ +Module heat.core.tests.test_types +================================= + +Classes +------- + +`TestTypeConversion(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_can_cast(self)` + : + + `test_canonical_heat_type(self)` + : + + `test_finfo(self)` + : + + `test_heat_type_of(self)` + : + + `test_iinfo(self)` + : + + `test_issubdtype(self)` + : + + `test_result_type(self)` + : + + `test_type_promotions(self)` + : + +`TestTypes(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `assert_is_heat_type(self, heat_type)` + : + + `assert_is_instantiable_heat_type(self, heat_type, torch_type)` + : + + `assert_non_instantiable_heat_type(self, heat_type)` + : + + `test_bool(self)` + : + + `test_complex128(self)` + : + + `test_complex64(self)` + : + + `test_flexible(self)` + : + + `test_float32(self)` + : + + `test_float64(self)` + : + + `test_floating(self)` + : + + `test_generic(self)` + : + + `test_int16(self)` + : + + `test_int32(self)` + : + + `test_int64(self)` + : + + `test_int8(self)` + : + + `test_integer(self)` + : + + `test_iscomplex(self)` + : + + `test_isreal(self)` + : + + `test_number(self)` + : + + `test_signedinteger(self)` + : + + `test_uint8(self)` + : + + `test_unsignedinteger(self)` + : diff --git a/doc/api/heat/core/tests/test_vmap.md b/doc/api/heat/core/tests/test_vmap.md new file mode 100644 index 0000000000..24a643b327 --- /dev/null +++ b/doc/api/heat/core/tests/test_vmap.md @@ -0,0 +1,57 @@ +Module heat.core.tests.test_vmap +================================ + +Classes +------- + +`TestVmap(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_vmap(self)` + : + + `test_vmap_catch_errors(self)` + : + + `test_vmap_with_chunks(self)` + : diff --git a/doc/api/heat/core/tiling.md b/doc/api/heat/core/tiling.md new file mode 100644 index 0000000000..dad7e083b3 --- /dev/null +++ b/doc/api/heat/core/tiling.md @@ -0,0 +1,383 @@ +Module heat.core.tiling +======================= +Tiling functions/classes. With these classes, you can classes you can address blocks of data in a DNDarray + +Classes +------- + +`SplitTiles(arr: DNDarray)` +: Initialize tiles with the tile divisions equal to the theoretical split dimensions in + every dimension + + Parameters + ---------- + arr : DNDarray + Base array for which to create the tiles + + Attributes + ---------- + __DNDarray : DNDarray + the ``DNDarray`` associated with the tiles + __lshape_map : torch.Tensor + map of the shapes of the local torch tensors of arr + __tile_locations : torch.Tensor + locations of the tiles of ``arr`` + __tile_ends_g : torch.Tensor + the global indices of the ends of the tiles + __tile_dims : torch.Tensor + the dimensions of all of the tiles + + Examples + -------- + >>> a = ht.zeros( + ... ( + ... 10, + ... 11, + ... ), + ... split=None, + ... ) + >>> a.create_split_tiles() + >>> print(a.tiles.tile_ends_g) + [0/2] tensor([[ 4, 7, 10], + [0/2] [ 4, 8, 11]], dtype=torch.int32) + [1/2] tensor([[ 4, 7, 10], + [1/2] [ 4, 8, 11]], dtype=torch.int32) + [2/2] tensor([[ 4, 7, 10], + [2/2] [ 4, 8, 11]], dtype=torch.int32) + >>> print(a.tiles.tile_locations) + [0/2] tensor([[0, 0, 0], + [0/2] [0, 0, 0], + [0/2] [0, 0, 0]], dtype=torch.int32) + [1/2] tensor([[1, 1, 1], + [1/2] [1, 1, 1], + [1/2] [1, 1, 1]], dtype=torch.int32) + [2/2] tensor([[2, 2, 2], + [2/2] [2, 2, 2], + [2/2] [2, 2, 2]], dtype=torch.int32) + >>> a = ht.zeros((10, 11), split=1) + >>> a.create_split_tiles() + >>> print(a.tiles.tile_ends_g) + [0/2] tensor([[ 4, 7, 10], + [0/2] [ 4, 8, 11]], dtype=torch.int32) + [1/2] tensor([[ 4, 7, 10], + [1/2] [ 4, 8, 11]], dtype=torch.int32) + [2/2] tensor([[ 4, 7, 10], + [2/2] [ 4, 8, 11]], dtype=torch.int32) + >>> print(a.tiles.tile_locations) + [0/2] tensor([[0, 1, 2], + [0/2] [0, 1, 2], + [0/2] [0, 1, 2]], dtype=torch.int32) + [1/2] tensor([[0, 1, 2], + [1/2] [0, 1, 2], + [1/2] [0, 1, 2]], dtype=torch.int32) + [2/2] tensor([[0, 1, 2], + [2/2] [0, 1, 2], + [2/2] [0, 1, 2]], dtype=torch.int32) + + ### Static methods + + `set_tile_locations(split: int, tile_dims: torch.Tensor, arr: DNDarray) ‑> torch.Tensor` + : Create a `torch.Tensor` which contains the locations of the tiles of ``arr`` for the given split + + Parameters + ---------- + split : int + Target split dimension. Does not need to be equal to ``arr.split`` + tile_dims : torch.Tensor + Tensor containing the sizes of the each tile + arr : DNDarray + Array for which the tiles are being created for + + ### Instance variables + + `arr: DNDarray` + : Get the DNDarray associated with the tiling object + + `lshape_map: torch.Tensor` + : Return the shape of all of the local torch.Tensors + + `tile_dimensions: torch.Tensor` + : Returns a ``torch.Tensor`` with the sizes of the tiles + + `tile_ends_g: torch.Tensor` + : Returns a ``torch.Tensor`` with the global indices with the end points of the tiles in every dimension + + Examples + -------- + see :func:`SplitTiles` + + `tile_locations: torch.Tensor` + : Get the ``torch.Tensor`` with the locations of the tiles for SplitTiles + + Examples + -------- + see :class:`~SplitTiles` + + ### Methods + + `get_subarray_params(self, from_axis: int, to_axis: int) ‑> List[Tuple[List[int], List[int], List[int]]]` + : Create subarray types of the local array along a new split axis. For use with Alltoallw. + + Based on the work by Dalcin et al. (https://arxiv.org/abs/1804.09536) + Return type is a list of tuples, each tuple containing the shape of the local array, the shape of the subarray, and the start index of the subarray. + + Parameters + ---------- + from_axis : int + Current split axis of global array. + to_axis : int + New split axis of of subarrays array. + + `get_tile_size(self, key: Union[int, slice, Tuple[Union[int, slice], ...]]) ‑> Tuple[int, ...]` + : Get the size of a tile or tiles indicated by the given key + + Parameters + ---------- + key : int or slice or tuple + which tiles to get + +`SquareDiagTiles(arr: DNDarray, tiles_per_proc: int = 2)` +: Generate the tile map and the other objects which may be useful. + The tiles generated here are based of square tiles along the diagonal. The size of these + tiles along the diagonal dictate the divisions across all processes. If + ``gshape[0]>>gshape[1]`` then there will be extra tiles generated below the diagonal. + If ``gshape[0]`` is close to ``gshape[1]``, then the last tile (as well as the other tiles which + correspond with said tile) will be extended to cover the whole array. However, extra tiles + are not generated above the diagonal in the case that ``gshape[0]< [rank, row size, column size]`` + Tensor filled with the shapes of the local tensors + __tile_map : torch.Tensor + ``units -> row, column, start index in each direction, process`` + Tensor filled with the global indices of the generated tiles + __row_per_proc_list : List + List is length of the number of processes, each element has the number of tile + rows on the process whos rank equals the index + + Warnings + ----------- + The generation of these tiles may unbalance the original ``DNDarray``! + + Notes + ----- + This tiling scheme is intended for use with the :func:`~heat.core.linalg.qr.qr` function. + + ### Instance variables + + `arr: DNDarray` + : Returns the ``DNDarray`` for which the tiles are defined on + + `col_indices: List[int, ...]` + : Returns a list containing the indices of the tile columns + + `last_diagonal_process: int` + : Returns the rank of the last process with diagonal elements + + `lshape_map: torch.Tensor` + : Returns the map of the lshape tuples for the ``DNDarray`` given. + Units are ``(rank, lshape)`` (tuple of the local shape) + + `row_indices: List[int, ...]` + : Returns a list containing the indices of the tile rows + + `tile_columns: int` + : Returns the number of tile columns + + `tile_columns_per_process: List[int, ...]` + : Returns a list containing the number of columns on all processes + + `tile_map: torch.Tensor` + : Returns tile_map which contains the sizes of the tiles + units are ``(row, column, start index in each direction, process)`` + + Examples + -------- + >>> a = ht.zeros((12, 10), split=0) + >>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2) + >>> print(a_tiles.tile_map) + [(0 & 1)/1] tensor([[[0, 0, 0], + [(0 & 1)/1] [0, 3, 0], + [(0 & 1)/1] [0, 6, 0], + [(0 & 1)/1] [0, 8, 0]], + [(0 & 1)/1] + [(0 & 1)/1] [[3, 0, 0], + [(0 & 1)/1] [3, 3, 0], + [(0 & 1)/1] [3, 6, 0], + [(0 & 1)/1] [3, 8, 0]], + [(0 & 1)/1] + [(0 & 1)/1] [[6, 0, 1], + [(0 & 1)/1] [6, 3, 1], + [(0 & 1)/1] [6, 6, 1], + [(0 & 1)/1] [6, 8, 1]], + [(0 & 1)/1] + [(0 & 1)/1] [[8, 0, 1], + [(0 & 1)/1] [8, 3, 1], + [(0 & 1)/1] [8, 6, 1], + [(0 & 1)/1] [8, 8, 1]]], dtype=torch.int32) + >>> print(a_tiles.tile_map.shape) + [0/1] torch.Size([4, 4, 3]) + [1/1] torch.Size([4, 4, 3]) + + `tile_rows: int` + : Returns the number of tile rows + + `tile_rows_per_process: List[int, ...]` + : Returns a list containing the number of rows on all processes + + ### Methods + + `get_start_stop(self, key: Union[int, slice, Tuple[int, slice, ...]]) ‑> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]` + : Returns the start and stop indices in form of ``(dim0 start, dim0 stop, dim1 start, dim1 stop)`` + which correspond to the tile/s which corresponds to the given key. The key MUST use global indices. + + Parameters + ---------- + key : int or Tuple or List or slice + Indices to select the tile + STRIDES ARE NOT ALLOWED, MUST BE GLOBAL INDICES + + Examples + -------- + >>> a = ht.zeros((12, 10), split=0) + >>> a_tiles = ht.tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles + >>> print(a_tiles.get_start_stop(key=(slice(0, 2), 2))) + [0/1] (tensor(0), tensor(6), tensor(6), tensor(8)) + [1/1] (tensor(0), tensor(6), tensor(6), tensor(8)) + >>> print(a_tiles.get_start_stop(key=(0, 2))) + [0/1] (tensor(0), tensor(3), tensor(6), tensor(8)) + [1/1] (tensor(0), tensor(3), tensor(6), tensor(8)) + >>> print(a_tiles.get_start_stop(key=2)) + [0/1] (tensor(0), tensor(2), tensor(0), tensor(10)) + [1/1] (tensor(0), tensor(2), tensor(0), tensor(10)) + >>> print(a_tiles.get_start_stop(key=(3, 3))) + [0/1] (tensor(2), tensor(6), tensor(8), tensor(10)) + [1/1] (tensor(2), tensor(6), tensor(8), tensor(10)) + + `local_get(self, key: Union[int, slice, Tuple[int, slice, ...]]) ‑> torch.Tensor` + : Returns the local tile/s corresponding to the key given + Getitem routing using local indices, converts to global indices then uses getitem + + Parameters + ---------- + key : int, slice, tuple, list + Indices of the tile/s desired. + If the stop index of a slice is larger than the end will be adjusted to the maximum + allowed + + Examples + -------- + See local_set function. + + `local_set(self, key: Union[int, slice, Tuple[int, slice, ...]], value: Union[int, float, torch.Tensor])` + : Setitem routing to set data to a local tile (using local indices) + + Parameters + ---------- + key : int or slice or Tuple[int,...] + Indices of the tile/s desired + If the stop index of a slice is larger than the end will be adjusted to the maximum + allowed + value : torch.Tensor or int or float + Data to be written to the tile + + Examples + -------- + >>> a = ht.zeros((11, 10), split=0) + >>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles + >>> local = a_tiles.local_get(key=slice(None)) + >>> a_tiles.local_set( + ... key=slice(None), value=torch.arange(local.numel()).reshape(local.shape) + ... ) + >>> print(a.larray) + [0/1] tensor([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], + [0/1] [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], + [0/1] [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.], + [0/1] [30., 31., 32., 33., 34., 35., 36., 37., 38., 39.], + [0/1] [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.], + [0/1] [50., 51., 52., 53., 54., 55., 56., 57., 58., 59.]]) + [1/1] tensor([[ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], + [1/1] [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], + [1/1] [20., 21., 22., 23., 24., 25., 26., 27., 28., 29.], + [1/1] [30., 31., 32., 33., 34., 35., 36., 37., 38., 39.], + [1/1] [40., 41., 42., 43., 44., 45., 46., 47., 48., 49.]]) + >>> a.lloc[:] = 0 + >>> a_tiles.local_set(key=(0, 2), value=10) + [0/1] tensor([[ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.], + [0/1] [ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.], + [0/1] [ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.], + [0/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + [1/1] tensor([[ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.], + [1/1] [ 0., 0., 0., 0., 0., 0., 10., 10., 0., 0.], + [1/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [1/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], + [1/1] [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> a_tiles.local_set(key=(slice(None), 1), value=10) + [0/1] tensor([[ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [0/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.]]) + [1/1] tensor([[ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.], + [1/1] [ 0., 0., 0., 10., 10., 10., 0., 0., 0., 0.]]) + + `local_to_global(self, key: Union[int, slice, Tuple[int, slice, ...]], rank: int) ‑> Tuple[int, slice, ...]` + : Convert local indices to global indices + + Parameters + ---------- + key : int or slice or Tuple or List + Indices of the tile/s desired. + If the stop index of a slice is larger than the end will be adjusted to the maximum + allowed + rank : int + Process rank + + Examples + -------- + >>> a = ht.zeros((11, 10), split=0) + >>> a_tiles = tiling.SquareDiagTiles(a, tiles_per_proc=2) # type: tiling.SquareDiagTiles + >>> rank = a.comm.rank + >>> print(a_tiles.local_to_global(key=(slice(None), 1), rank=rank)) + [0/1] (slice(0, 2, None), 1) + [1/1] (slice(2, 4, None), 1) + >>> print(a_tiles.local_to_global(key=(0, 2), rank=0)) + [0/1] (0, 2) + [1/1] (0, 2) + >>> print(a_tiles.local_to_global(key=(0, 2), rank=1)) + [0/1] (2, 2) + [1/1] (2, 2) + + `match_tiles(self, tiles_to_match: SquareDiagTiles) ‑> None` + : Function to match the tile sizes of another tile map + + Parameters + ---------- + tiles_to_match : SquareDiagTiles + The tiles which should be matched by the current tiling scheme + + Notes + ----- + This function overwrites most, if not all, of the elements of this class. Intended for use with the Q matrix, + to match the tiling of a/R. For this to work properly it is required that the 0th dim of both matrices is equal diff --git a/doc/api/heat/core/trigonometrics.md b/doc/api/heat/core/trigonometrics.md new file mode 100644 index 0000000000..fa1abe2f5b --- /dev/null +++ b/doc/api/heat/core/trigonometrics.md @@ -0,0 +1,429 @@ +Module heat.core.trigonometrics +=============================== +Trig functions + +Functions +--------- + +`acos(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric arccos, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arccos`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arccos(ht.array([-1.0, -0.0, 0.83])) + DNDarray([3.1416, 1.5708, 0.5917], dtype=ht.float32, device=cpu:0, split=None) + +`acosh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse hyperbolic cosine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [1., +infinity] are returned as ``NaN``. If ``out`` was provided, ``acosh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the inverse hyperbolic cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.acosh(ht.array([1.0, 10.0, 20.0])) + DNDarray([0.0000, 2.9932, 3.6883], dtype=ht.float32, device=cpu:0, split=None) + +`arccos(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric arccos, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arccos`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arccos(ht.array([-1.0, -0.0, 0.83])) + DNDarray([3.1416, 1.5708, 0.5917], dtype=ht.float32, device=cpu:0, split=None) + +`arccosh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse hyperbolic cosine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [1., +infinity] are returned as ``NaN``. If ``out`` was provided, ``acosh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the inverse hyperbolic cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.acosh(ht.array([1.0, 10.0, 20.0])) + DNDarray([0.0000, 2.9932, 3.6883], dtype=ht.float32, device=cpu:0, split=None) + +`arcsin(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric arcsin, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arcsin`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arcsin(ht.array([-1.0, -0.0, 0.83])) + DNDarray([-1.5708, -0.0000, 0.9791], dtype=ht.float32, device=cpu:0, split=None) + +`arcsinh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse hyperbolic sine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-infinity., +infinity] are returned as ``NaN``. If ``out`` was provided, ``asinh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the inverse hyperbolic sine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.asinh(ht.array([-10.0, 0.0, 10.0])) + DNDarray([-2.9982, 0.0000, 2.9982], dtype=ht.float32, device=cpu:0, split=None) + +`arctan(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric arctan, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arctan`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arctan(ht.arange(-6, 7, 2)) + DNDarray([-1.4056, -1.3258, -1.1071, 0.0000, 1.1071, 1.3258, 1.4056], dtype=ht.float32, device=cpu:0, split=None) + +`arctan2(x1: DNDarray, x2: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. + Returns a new ``DNDarray`` with the signed angles in radians between vector (``x2``,``x1``) and vector (1,0) + + Parameters + ---------- + x1 : DNDarray + y-coordinates + x2 : DNDarray + x-coordinates. If ``x1.shape!=x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). + + Examples + -------- + >>> x = ht.array([-1, +1, +1, -1]) + >>> y = ht.array([-1, -1, +1, +1]) + >>> ht.arctan2(y, x) * 180 / ht.pi + DNDarray([-135.0000, -45.0000, 45.0000, 135.0000], dtype=ht.float64, device=cpu:0, split=None) + +`arctanh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse hyperbolic tangent, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``atanh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the inverse hyperbolic tangent. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.atanh(ht.array([-1.0, -0.0, 0.83])) + DNDarray([ -inf, -0.0000, 1.1881], dtype=ht.float32, device=cpu:0, split=None) + +`asin(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric arcsin, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arcsin`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arcsin(ht.array([-1.0, -0.0, 0.83])) + DNDarray([-1.5708, -0.0000, 0.9791], dtype=ht.float32, device=cpu:0, split=None) + +`asinh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse hyperbolic sine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-infinity., +infinity] are returned as ``NaN``. If ``out`` was provided, ``asinh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the inverse hyperbolic sine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.asinh(ht.array([-10.0, 0.0, 10.0])) + DNDarray([-2.9982, 0.0000, 2.9982], dtype=ht.float32, device=cpu:0, split=None) + +`atan(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric arctan, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``arctan`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the trigonometric cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.arctan(ht.arange(-6, 7, 2)) + DNDarray([-1.4056, -1.3258, -1.1071, 0.0000, 1.1071, 1.3258, 1.4056], dtype=ht.float32, device=cpu:0, split=None) + +`atan2(x1: DNDarray, x2: DNDarray) ‑> heat.core.dndarray.DNDarray` +: Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. + Returns a new ``DNDarray`` with the signed angles in radians between vector (``x2``,``x1``) and vector (1,0) + + Parameters + ---------- + x1 : DNDarray + y-coordinates + x2 : DNDarray + x-coordinates. If ``x1.shape!=x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). + + Examples + -------- + >>> x = ht.array([-1, +1, +1, -1]) + >>> y = ht.array([-1, -1, +1, +1]) + >>> ht.arctan2(y, x) * 180 / ht.pi + DNDarray([-135.0000, -45.0000, 45.0000, 135.0000], dtype=ht.float64, device=cpu:0, split=None) + +`atanh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse hyperbolic tangent, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Input elements outside [-1., 1.] are returned as ``NaN``. If ``out`` was provided, ``atanh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The array for which to compute the inverse hyperbolic tangent. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.atanh(ht.array([-1.0, -0.0, 0.83])) + DNDarray([ -inf, -0.0000, 1.1881], dtype=ht.float32, device=cpu:0, split=None) + +`cos(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Return the trigonometric cosine, element-wise. + + Parameters + ---------- + x : ht.DNDarray + The value for which to compute the trigonometric cosine. + out : ht.DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to None, a fresh tensor is allocated. + + Examples + -------- + >>> ht.cos(ht.arange(-6, 7, 2)) + DNDarray([ 0.9602, -0.6536, -0.4161, 1.0000, -0.4161, -0.6536, 0.9602], dtype=ht.float32, device=cpu:0, split=None) + +`cosh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the hyperbolic cosine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Negative input elements are returned as ``NaN``. If ``out`` was provided, ``cosh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the hyperbolic cosine. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.cosh(ht.arange(-6, 7, 2)) + DNDarray([201.7156, 27.3082, 3.7622, 1.0000, 3.7622, 27.3082, 201.7156], dtype=ht.float32, device=cpu:0, split=None) + +`deg2rad(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Convert angles from degrees to radians. + + Parameters + ---------- + x : DNDarray + The value for which to compute the angles in radians. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.deg2rad(ht.array([0.0, 20.0, 45.0, 78.0, 94.0, 120.0, 180.0, 270.0, 311.0])) + DNDarray([0.0000, 0.3491, 0.7854, 1.3614, 1.6406, 2.0944, 3.1416, 4.7124, 5.4280], dtype=ht.float32, device=cpu:0, split=None) + +`degrees(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Convert angles from radians to degrees. + + Parameters + ---------- + x : DNDarray + The value for which to compute the angles in degrees. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.degrees(ht.array([0.0, 0.2, 0.6, 0.9, 1.2, 2.7, 3.14])) + DNDarray([ 0.0000, 11.4592, 34.3775, 51.5662, 68.7549, 154.6986, 179.9088], dtype=ht.float32, device=cpu:0, split=None) + +`rad2deg(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Convert angles from radians to degrees. + + Parameters + ---------- + x : DNDarray + The value for which to compute the angles in degrees. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.rad2deg(ht.array([0.0, 0.2, 0.6, 0.9, 1.2, 2.7, 3.14])) + DNDarray([ 0.0000, 11.4592, 34.3775, 51.5662, 68.7549, 154.6986, 179.9088], dtype=ht.float32, device=cpu:0, split=None) + +`radians(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Convert angles from degrees to radians. + + Parameters + ---------- + x : DNDarray + The value for which to compute the angles in radians. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.radians(ht.array([0.0, 20.0, 45.0, 78.0, 94.0, 120.0, 180.0, 270.0, 311.0])) + DNDarray([0.0000, 0.3491, 0.7854, 1.3614, 1.6406, 2.0944, 3.1416, 4.7124, 5.4280], dtype=ht.float32, device=cpu:0, split=None) + +`sin(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the trigonometric sine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Negative input elements are returned as ``NaN``. If ``out`` was provided, ``sin`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the trigonometric tangent. + out : DNDarray, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.sin(ht.arange(-6, 7, 2)) + DNDarray([ 0.2794, 0.7568, -0.9093, 0.0000, 0.9093, -0.7568, -0.2794], dtype=ht.float32, device=cpu:0, split=None) + +`sinh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the hyperbolic sine, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Negative input elements are returned as ``NaN``. If ``out`` was provided, ``sinh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the hyperbolic sine. + out : DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.sinh(ht.arange(-6, 7, 2)) + DNDarray([-201.7132, -27.2899, -3.6269, 0.0000, 3.6269, 27.2899, 201.7132], dtype=ht.float32, device=cpu:0, split=None) + +`tan(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute tangent element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + Equivalent to :func:`sin`/:func:`cos` element-wise. If ``out`` was provided, ``tan`` is a reference to it. + + + Parameters + ---------- + x : DNDarray + The value for which to compute the trigonometric tangent. + out : DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.tan(ht.arange(-6, 7, 2)) + DNDarray([ 0.2910, -1.1578, 2.1850, 0.0000, -2.1850, 1.1578, -0.2910], dtype=ht.float32, device=cpu:0, split=None) + +`tanh(x: DNDarray, out: Optional[DNDarray] = None) ‑> heat.core.dndarray.DNDarray` +: Compute the hyperbolic tangent, element-wise. + Result is a ``DNDarray`` of the same shape as ``x``. + If ``out`` was provided, ``tanh`` is a reference to it. + + Parameters + ---------- + x : DNDarray + The value for which to compute the hyperbolic tangent. + out : DNDarray or None, optional + A location in which to store the results. If provided, it must have a broadcastable shape. If not provided + or set to ``None``, a fresh array is allocated. + + Examples + -------- + >>> ht.tanh(ht.arange(-6, 7, 2)) + DNDarray([-1.0000, -0.9993, -0.9640, 0.0000, 0.9640, 0.9993, 1.0000], dtype=ht.float32, device=cpu:0, split=None) diff --git a/doc/api/heat/core/types.md b/doc/api/heat/core/types.md new file mode 100644 index 0000000000..9fe08e3c0e --- /dev/null +++ b/doc/api/heat/core/types.md @@ -0,0 +1,644 @@ +Module heat.core.types +====================== +implementations of the different dtypes supported in heat and the + +Functions +--------- + +`can_cast(from_: Union[str, Type[datatype], Any], to: Union[str, Type[datatype], Any], casting: str = 'intuitive') ‑> heat.core.types.bool` +: Returns True if cast between data types can occur according to the casting rule. If from is a scalar or array + scalar, also returns True if the scalar value can be cast without overflow or truncation to an integer. + + Parameters + ---------- + from_ : Union[str, Type[datatype], Any] + Scalar, data type or type specifier to cast from. + to : Union[str, Type[datatype], Any] + Target type to cast to. + casting: str, optional + options: {"no", "safe", "same_kind", "unsafe", "intuitive"}, optional + Controls the way the cast is evaluated + * "no" the types may not be cast, i.e. they need to be identical + * "safe" allows only casts that can preserve values with complete precision + * "same_kind" safe casts are possible and down_casts within the same type family, e.g. int32 -> int8 + * "unsafe" means any conversion can be performed, i.e. this casting is always possible + * "intuitive" allows all of the casts of safe plus casting from int32 to float32 + + + Raises + ------ + TypeError + If the types are not understood or casting is not a string + ValueError + If the casting rule is not understood + + Examples + -------- + >>> ht.can_cast(ht.int32, ht.int64) + True + >>> ht.can_cast(ht.int64, ht.float64) + True + >>> ht.can_cast(ht.int16, ht.int8) + False + >>> ht.can_cast(1, ht.float64) + True + >>> ht.can_cast(2.0e200, "u1") + False + >>> ht.can_cast("i8", "i4", "no") + False + >>> ht.can_cast("i8", "i4", "safe") + False + >>> ht.can_cast("i8", "i4", "same_kind") + True + >>> ht.can_cast("i8", "i4", "unsafe") + True + +`canonical_heat_type(a_type: Union[str, Type[datatype], Any]) ‑> Type[heat.core.types.datatype]` +: Canonicalize the builtin Python type, type string or HeAT type into a canonical HeAT type. + + Parameters + ---------- + a_type : type, str, datatype + A description for the type. It may be a a Python builtin type, string or an HeAT type already. + In the three former cases the according mapped type is looked up, in the latter the type is simply returned. + + Raises + ------ + TypeError + If the type cannot be converted. + +`heat_type_is_complexfloating(ht_dtype: Type[datatype]) ‑> heat.core.types.bool` +: Check if Heat type is a complex floating point number, i.e complex64 + + Parameters + ---------- + ht_dtype: ht.dtype + HeAT type to check + + Returns + ------- + out: bool + True if ht_dtype is a complex float, False otherwise + +`heat_type_is_exact(ht_dtype: Type[datatype]) ‑> heat.core.types.bool` +: Check if HeAT type is an exact type, i.e an integer type. True if ht_dtype is an integer, False otherwise + + Parameters + ---------- + ht_dtype: Type[datatype] + HeAT type to check + +`heat_type_is_inexact(ht_dtype: Type[datatype]) ‑> heat.core.types.bool` +: Check if HeAT type is an inexact type, i.e floating point type. True if ht_dtype is a float, False otherwise + + Parameters + ---------- + ht_dtype: Type[datatype] + HeAT type to check + +`heat_type_is_realfloating(ht_dtype: Type[datatype]) ‑> heat.core.types.bool` +: Check if Heat type is a real floating point number, i.e float32 or float64 + + Parameters + ---------- + ht_dtype: Type[datatype] + Heat type to check + + Returns + ------- + out: bool + True if ht_dtype is a real float, False otherwise + +`heat_type_of(obj: Union[str, Type[datatype], Any, Iterable[str, Type[datatype], Any]]) ‑> Type[datatype]` +: Returns the corresponding HeAT data type of given object, i.e. scalar, array or iterable. Attempts to determine the + canonical data type based on the following priority list: + 1. dtype property + 2. type(obj) + 3. type(obj[0]) + + Parameters + ---------- + obj : scalar or DNDarray or iterable + The object for which to infer the type. + + Raises + ------ + TypeError + If the object's type cannot be inferred. + +`iscomplex(x: dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Test element-wise if input is complex. + + Parameters + ---------- + x : DNDarray + The input DNDarray + + Examples + -------- + >>> ht.iscomplex(ht.array([1 + 1j, 1])) + DNDarray([ True, False], dtype=ht.bool, device=cpu:0, split=None) + +`isreal(x: dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` +: Test element-wise if input is real-valued. + + Parameters + ---------- + x : DNDarray + The input DNDarray + + Examples + -------- + >>> ht.iscomplex(ht.array([1 + 1j, 1])) + DNDarray([ True, False], dtype=ht.bool, device=cpu:0, split=None) + +`issubdtype(arg1: Union[str, Type[datatype], Any], arg2: Union[str, Type[datatype], Any]) ‑> bool` +: Returns True if first argument is a typecode lower/equal in type hierarchy. + + Parameters + ---------- + arg1 : type, str, ht.dtype + A description representing the type. It may be a a Python builtin type, string or an HeAT type already. + arg2 : type, str, ht.dtype + A description representing the type. It may be a a Python builtin type, string or an HeAT type already. + + + Examples + -------- + >>> ints = ht.array([1, 2, 3], dtype=ht.int32) + >>> ht.issubdtype(ints.dtype, ht.integer) + True + >>> ht.issubdype(ints.dtype, ht.floating) + False + >>> ht.issubdtype(ht.float64, ht.float32) + False + >>> ht.issubdtype("i", ht.integer) + True + +`promote_types(type1: Union[str, Type[datatype], Any], type2: Union[str, Type[datatype], Any]) ‑> Type[heat.core.types.datatype]` +: Returns the data type with the smallest size and smallest scalar kind to which both ``type1`` and ``type2`` may be + intuitively cast to, where intuitive casting refers to maintaining the same bit length if possible. This function + is symmetric. + + Parameters + ---------- + type1 : type or str or datatype + type of first operand + type2 : type or str or datatype + type of second operand + + Examples + -------- + >>> ht.promote_types(ht.uint8, ht.uint8) + + >>> ht.promote_types(ht.int32, ht.float32) + + >>> ht.promote_types(ht.int8, ht.uint8) + + >>> ht.promote_types("i8", "f4") + + +`result_type(*arrays_and_types: Tuple[Union[dndarray.DNDarray, Type[datatype], Any]]) ‑> Type[heat.core.types.datatype]` +: Returns the data type that results from type promotions rules performed in an arithmetic operation. + + Parameters + ---------- + arrays_and_types: List of arrays and types + Input arrays, types or numbers of the operation. + + Examples + -------- + >>> ht.result_type(ht.array([1], dtype=ht.int32), 1) + ht.int32 + >>> ht.result_type(ht.float32, ht.array(1, dtype=ht.int8)) + ht.float32 + >>> ht.result_type("i8", "f4") + ht.float64 + +Classes +------- + +`bool(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The boolean datatype in Heat + + ### Ancestors (in MRO) + + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`bool_(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The boolean datatype in Heat + + ### Ancestors (in MRO) + + * heat.core.types.datatype + +`complex128(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The complex 128 bit datatype. Both real and imaginary are 64 bit floating point + + ### Ancestors (in MRO) + + * heat.core.types.complex + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char()` + : Datatype short-hand name + + `torch_type()` + : Torch Datatype + +`cdouble(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The complex 128 bit datatype. Both real and imaginary are 64 bit floating point + + ### Ancestors (in MRO) + + * heat.core.types.complex + * heat.core.types.number + * heat.core.types.datatype + +`complex64(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The complex 64 bit datatype. Both real and imaginary are 32 bit floating point + + ### Ancestors (in MRO) + + * heat.core.types.complex + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char()` + : Datatype short-hand name + + `torch_type()` + : Torch Datatype + +`cfloat(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The complex 64 bit datatype. Both real and imaginary are 32 bit floating point + + ### Ancestors (in MRO) + + * heat.core.types.complex + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char()` + : Datatype short-hand name + + `torch_type()` + : Torch Datatype + +`csingle(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The complex 64 bit datatype. Both real and imaginary are 32 bit floating point + + ### Ancestors (in MRO) + + * heat.core.types.complex + * heat.core.types.number + * heat.core.types.datatype + +`datatype(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: Defines the basic heat data types in the hierarchy as shown below. Design inspired by the Python package numpy. + As part of the type-hierarchy: xx -- is bit-width + + - generic + + - bool, bool_ (kind=?) + + - number + + - integer + + - signedinteger (intxx)(kind=b, i) + + - int8, byte + + - int16, short + + - int32, int + + - int64, long + - unsignedinteger (uintxx)(kind=B, u) + + - uint8, ubyte + - floating (floatxx) (kind=f) + + - float32, float, float_ + + - float64, double (double) + - flexible (currently unused, placeholder for characters) + + ### Descendants + + * heat.core.types.bool + * heat.core.types.flexible + * heat.core.types.number + + ### Static methods + + `char() ‑> NotImplemented` + : Datatype short-hand name + + `torch_type() ‑> NotImplemented` + : Torch Datatype + +`flexible(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The general flexible datatype. Currently unused, placeholder for characters + + ### Ancestors (in MRO) + + * heat.core.types.datatype + +`float32(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The 32 bit floating point datatype + + ### Ancestors (in MRO) + + * heat.core.types.floating + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`float(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The 32 bit floating point datatype + + ### Ancestors (in MRO) + + * heat.core.types.floating + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`float_(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The 32 bit floating point datatype + + ### Ancestors (in MRO) + + * heat.core.types.floating + * heat.core.types.number + * heat.core.types.datatype + +`float64(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The 64 bit floating point datatype + + ### Ancestors (in MRO) + + * heat.core.types.floating + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatye + +`double(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The 64 bit floating point datatype + + ### Ancestors (in MRO) + + * heat.core.types.floating + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `torch_type() ‑> torch.dtype` + : Torch Datatye + +`floating(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The general floating point datatype class. + + ### Ancestors (in MRO) + + * heat.core.types.number + * heat.core.types.datatype + + ### Descendants + + * heat.core.types.float32 + * heat.core.types.float64 + +`int16(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 16 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`short(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 16 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + +`int32(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 32 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`int(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 32 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + +`int64(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 64 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`long(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 64 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + +`int8(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 8 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`byte(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 8 bit signed integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.signedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + +`integer(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The general integer datatype. Specific integer classes inherit from this. + + ### Ancestors (in MRO) + + * heat.core.types.number + * heat.core.types.datatype + + ### Descendants + + * heat.core.types.signedinteger + * heat.core.types.unsignedinteger + +`number(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The general number datatype. Integer and Float classes will inherit from this. + + ### Ancestors (in MRO) + + * heat.core.types.datatype + + ### Descendants + + * heat.core.types.complex + * heat.core.types.floating + * heat.core.types.integer + +`signedinteger(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The general signed integer datatype. + + ### Ancestors (in MRO) + + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Descendants + + * heat.core.types.int16 + * heat.core.types.int32 + * heat.core.types.int64 + * heat.core.types.int8 + +`uint8(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 8 bit unsigned integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.unsignedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Static methods + + `char() ‑> str` + : Datatype short-hand name + + `torch_type() ‑> torch.dtype` + : Torch Datatype + +`ubyte(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: 8 bit unsigned integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.unsignedinteger + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + +`unsignedinteger(*value, device: Optional[Union[str, devices.Device]] = None, comm: Optional[communication.Communication] = None)` +: The general unsigned integer datatype + + ### Ancestors (in MRO) + + * heat.core.types.integer + * heat.core.types.number + * heat.core.types.datatype + + ### Descendants + + * heat.core.types.uint8 diff --git a/doc/api/heat/core/version.md b/doc/api/heat/core/version.md new file mode 100644 index 0000000000..f13bc4bfeb --- /dev/null +++ b/doc/api/heat/core/version.md @@ -0,0 +1,18 @@ +Module heat.core.version +======================== +Heat's version information. + +Variables +--------- + +`extension: str` +: Indicates special builds, e.g. for specific hardware. + +`major: int` +: Indicates Heat's main version. + +`micro: int` +: Indicates revisions for bugfixes. + +`minor: int` +: Indicates feature extension. diff --git a/doc/api/heat/core/vmap.md b/doc/api/heat/core/vmap.md new file mode 100644 index 0000000000..c514936d74 --- /dev/null +++ b/doc/api/heat/core/vmap.md @@ -0,0 +1,42 @@ +Module heat.core.vmap +===================== +Vmap module. +This implements a functionality similar to PyTorchs vmap function. +Requires PyTorch 2.0.0 or higher. + +Functions +--------- + +`vmap(func: Callable[[Tuple[torch.Tensor]], Tuple[torch.Tensor]], out_dims: Tuple[int] | int = 0, randomness: str = 'error', *, chunk_size: int = None) ‑> Callable[[Tuple[heat.core.dndarray.DNDarray]], Tuple[heat.core.dndarray.DNDarray]]` +: Apply a function to a DNDarray in a vectorized way. + `heat.vmap` return a callable that can be applied to DNDarrays. + Vectorization will automatically take place along the split axis/axes of the DNDarray(s); + therefore, unlike in PyTorch, there is no argument `in_dims`. + What we here refer to as "split axis/dimension" in the Heat terminology is often referred to as "batch axis/dimension" in the PyTorch terminology. + + Parameters + ---------- + func : callable + The function to apply in a vmapped way to the DNDarray(s). It must take PyTorch tensor(s) as positional arguments. + Additional parameters, not to be vmapped over, can be passed as keyword arguments. The callable returned by + by `heat.vmap` will also accept these keyword arguments. + out_dims : int or tuple of int, optional + The dimensions of the output(s) that are mapped over; identical to the split dimension(s) of the output(s). + Default is 0. + randomness : {'error', 'different', 'same'}, optional + Determines how to handle randomness in the function to be vmapped. This argument is directly passed to the underlying PyTorch vmaps; + see the corresponding PyTorch documentation for more information and the note below. + If 'error' (default), an error is raised if the function to be mapped contains randomness. + chunk_size : int, optional + The size of the chunks to use for the process-local computation. + If None (default), apply a single PyTorch vmap over the process-local chunks of data. If not None, then compute the process-local PyTorch vmap `chunk_size` + many samples at a time. Note that `chunk_size=1` is equivalent to computing the process-local PyTorch vmap's with a for-loop. + If you run into memory issues computing the vmap, please try a non-None chunk_size. + + Note + ------ + This function is a wrapper around PyTorch's `torch.vmap` function. In essence, a PyTorch vmap is applied to the input function `func` on each MPI process separately. + This process-local PyTorch-vmapped function is then applied to the process-local chunks of the input DNDarray(s). + + Please note that the options 'same' and 'different' for `randomness` will result in behaviour different from the one known by PyTorch as (at least currently) + no actions are taken to synchronize randomness across the MPI processes. diff --git a/doc/api/heat/datasets/index.md b/doc/api/heat/datasets/index.md new file mode 100644 index 0000000000..38414c0d2e --- /dev/null +++ b/doc/api/heat/datasets/index.md @@ -0,0 +1,3 @@ +Module heat.datasets +==================== +Make heat.datasets available as a module. diff --git a/doc/api/heat/decomposition/dmd.md b/doc/api/heat/decomposition/dmd.md new file mode 100644 index 0000000000..f372d60a37 --- /dev/null +++ b/doc/api/heat/decomposition/dmd.md @@ -0,0 +1,174 @@ +Module heat.decomposition.dmd +============================= +Module implementing the Dynamic Mode Decomposition (DMD) algorithm. + +Classes +------- + +`DMD(svd_solver: str | None = 'full', svd_rank: int | None = None, svd_tol: float | None = None)` +: Dynamic Mode Decomposition (DMD), plain vanilla version with SVD-based implementation. + + The time series of which DMD shall be computed must be provided as a 2-D DNDarray of shape (n_features, n_timesteps). + Please, note that this deviates from Heat's convention that data sets are handeled as 2-D arrays with the feature axis being the second axis. + + Parameters + ---------- + svd_solver : str, optional + Specifies the algorithm to use for the singular value decomposition (SVD). Options are 'full' (default), 'hierarchical', and 'randomized'. + svd_rank : int, optional + The rank to which SVD shall be truncated. For `'full'` SVD, `svd_rank = None` together with `svd_tol = None` (default) will result in no truncation. + For `svd_solver='full'`, at most one of `svd_rank` or `svd_tol` may be specified. + For `svd_solver='hierarchical'`, either `svd_rank` (rank to truncate to) or `svd_tol` (tolerance to truncate to) must be specified. + For `svd_solver='randomized'`, `svd_rank` must be specified and determines the the rank to truncate to. + svd_tol : float, optional + The tolerance to which SVD shall be truncated. For `'full'` SVD, `svd_tol = None` together with `svd_rank = None` (default) will result in no truncation. + For `svd_solver='hierarchical'`, either `svd_tol` (accuracy to truncate to) or `svd_rank` (rank to truncate to) must be specified. + For `svd_solver='randomized'`, `svd_tol` is meaningless and must be None. + + Attributes + ---------- + svd_solver : str + The algorithm used for the singular value decomposition (SVD). + svd_rank : int + The rank to which SVD shall be truncated. + svd_tol : float + The tolerance to which SVD shall be truncated. + rom_basis_ : DNDarray + The reduced order model basis. + rom_transfer_matrix_ : DNDarray + The reduced order model transfer matrix. + rom_eigenvalues_ : DNDarray + The reduced order model eigenvalues. + rom_eigenmodes_ : DNDarray + The reduced order model eigenmodes ("DMD modes") + + Notes + ----- + We follow the "exact DMD" method as described in [1], Sect. 2.2. + + References + ---------- + [1] J. L. Proctor, S. L. Brunton, and J. N. Kutz, "Dynamic Mode Decomposition with Control," SIAM Journal on Applied Dynamical Systems, vol. 15, no. 1, pp. 142-161, 2016. + + ### Ancestors (in MRO) + + * heat.core.base.RegressionMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray) ‑> Self` + : Fits the DMD model to the given data. + + Parameters + ---------- + X : DNDarray + The time series data to fit the DMD model to. Must be of shape (n_features, n_timesteps). + + `predict(self, X: heat.core.dndarray.DNDarray, steps: int | List[int]) ‑> heat.core.dndarray.DNDarray` + : Predics and returns future states given a current state(s) and returns them all as an array of size (n_steps, n_features). + + This function avoids a time-stepping loop (i.e., repeated calls to 'predict_next') and computes the future states in one go. + To do so, the number of future times to predict must be of moderate size as an array of shape (n_steps, self.n_modes_, self.n_modes_) must fit into memory. + Moreover, it must be ensured that: + + - the array of initial states is not split or split along the batch axis (axis 1) and the feature axis is small (i.e., self.rom_basis_ is not split) + + Parameters + ---------- + X : DNDarray + The current state(s) for the prediction. Must have the same number of features as the training data, but can be batched for multiple current states, + i.e., X can be of shape (n_features,) or (n_current_states, n_features). + steps : int or List[int] + if int: predictions at time step 0, 1, ..., steps-1 are computed + if List[int]: predictions at time steps given in the list are computed + + `predict_next(self, X: heat.core.dndarray.DNDarray, n_steps: int = 1) ‑> heat.core.dndarray.DNDarray` + : Predicts and returns the state(s) after n_steps-many time steps for given a current state(s). + + Parameters + ---------- + X : DNDarray + The current state(s) for the prediction. Must have the same number of features as the training data, but can be batched for multiple current states, + i.e., X can be of shape (n_features,) or (n_features, n_current_states). + The output will have the same shape as the input. + n_steps : int, optional + The number of steps to predict into the future. Default is 1, i.e., the next time step is predicted. + +`DMDc(svd_solver: str | None = 'full', svd_rank: int | None = None, svd_tol: float | None = None)` +: Dynamic Mode Decomposition with Control (DMDc), plain vanilla version with SVD-based implementation. + + The time series of states and controls must be provided as 2-D DNDarrays of shapes (n_state_features, n_timesteps) and (n_control_features, n_timesteps), respectively. + Please, note that this deviates from Heat's convention that data sets are handeled as 2-D arrays with the feature axis being the second axis. + + Parameters + ---------- + svd_solver : str, optional + Specifies the algorithm to use for the singular value decomposition (SVD). Options are 'full' (default), 'hierarchical', and 'randomized'. + svd_rank : int, optional + The rank to which SVD of the states shall be truncated. For `'full'` SVD, `svd_rank = None` together with `svd_tol = None` (default) will result in no truncation. + For `svd_solver='full'`, at most one of `svd_rank` or `svd_tol` may be specified. + For `svd_solver='hierarchical'`, either `svd_rank` (rank to truncate to) or `svd_tol` (tolerance to truncate to) must be specified. + For `svd_solver='randomized'`, `svd_rank` must be specified and determines the the rank to truncate to. + svd_tol : float, optional + The tolerance to which SVD of the states shall be truncated. For `'full'` SVD, `svd_tol = None` together with `svd_rank = None` (default) will result in no truncation. + For `svd_solver='hierarchical'`, either `svd_tol` (accuracy to truncate to) or `svd_rank` (rank to truncate to) must be specified. + For `svd_solver='randomized'`, `svd_tol` is meaningless and must be None. + + Attributes + ---------- + svd_solver : str + The algorithm used for the singular value decomposition (SVD). + svd_rank : int + The rank to which SVD shall be truncated. + svd_tol : float + The tolerance to which SVD shall be truncated. + rom_basis_ : DNDarray + The reduced order model basis. + rom_transfer_matrix_ : DNDarray + The reduced order model transfer matrix. + rom_control_matrix_ : DNDarray + The reduced order model control matrix. + rom_eigenvalues_ : DNDarray + The reduced order model eigenvalues. + rom_eigenmodes_ : DNDarray + The reduced order model eigenmodes ("DMD modes") + + Notes + ----- + We follow the approach described in [1], Sects. 3.3 and 3.4. + In the case that svd_rank is prescribed, the rank of the SVD of the full system matrix is set to svd_rank + n_control_features; cf. https://github.com/dynamicslab/pykoopman + for the same approach. + + References + ---------- + [1] J. L. Proctor, S. L. Brunton, and J. N. Kutz, "Dynamic Mode Decomposition with Control," SIAM Journal on Applied Dynamical Systems, vol. 15, no. 1, pp. 142-161, 2016. + + ### Ancestors (in MRO) + + * heat.core.base.RegressionMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray, C: heat.core.dndarray.DNDarray) ‑> Self` + : Fits the DMD model to the given data. + + Parameters + ---------- + X : DNDarray + The time series data of states to fit the DMD model to. Must be of shape (n_state_features, n_timesteps). + C : DNDarray + The time series of control inputs to fit the DMD model to. Must be of shape (n_control_features, n_timesteps). + + `predict(self, X: heat.core.dndarray.DNDarray, C: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Predicts and returns future states given the current state(s) ``X`` and control trajectory ``C``. + + Parameters + ---------- + X : DNDarray + The current state(s) for the prediction. Must have the same number of features as the training data, but can be batched for multiple current states, + i.e., X can be of shape (n_state_features,) or (n_batch, n_state_features). + C : DNDarray + The control trajectory for the prediction. Must have the same number of control features as the training data, i.e., C must be of shape + (n_control_features,) --for a single time step-- or (n_control_features, n_timesteps). diff --git a/doc/api/heat/decomposition/index.md b/doc/api/heat/decomposition/index.md new file mode 100644 index 0000000000..34a0f74a8f --- /dev/null +++ b/doc/api/heat/decomposition/index.md @@ -0,0 +1,9 @@ +Module heat.decomposition +========================= +Add the decomposition functions to the ht.decomposition namespace + +Sub-modules +----------- +* heat.decomposition.dmd +* heat.decomposition.pca +* heat.decomposition.tests diff --git a/doc/api/heat/decomposition/pca.md b/doc/api/heat/decomposition/pca.md new file mode 100644 index 0000000000..764fa8a86f --- /dev/null +++ b/doc/api/heat/decomposition/pca.md @@ -0,0 +1,165 @@ +Module heat.decomposition.pca +============================= +Module implementing decomposition techniques, such as PCA. + +Classes +------- + +`IncrementalPCA(n_components: int | None = None, copy: bool = True, whiten: bool = False, batch_size: int | None = None)` +: Incremental Principal Component Analysis (PCA). + + This class allows for incremental updates of the PCA model. This is especially useful for large data sets that do not fit into memory. + + An example how to apply this class is given in, e.g., `benchmarks/cb/decomposition.py`. + + Parameters + ---------- + n_components : int, optional + Number of components to keep. If `n_components` is not set all components are kept (default). + copy : bool, default=True + In-place operations are not yet supported. Please set `copy=True`. + whiten : bool, default=False + Not yet supported. + batch_size : int, optional + Currently not needed and only added for API consistency and possible future extensions. + + Attributes + ---------- + components_ : DNDarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of maximum variance in the data. The components are sorted by `explained_variance_. + singular_values_ : DNDarray of shape (n_components,) + The singular values corresponding to each of the selected components. + mean_ : DNDarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + n_components_ : int + The estimated number of components. + n_samples_seen_ : int + Number of samples processed so far. + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X, y=None) ‑> Self` + : Not yet implemented; please use `.partial_fit` instead. + Please open an issue on GitHub if you would like to see this method implemented and make a suggestion on how you would like to see it implemented. + + `inverse_transform(self, X: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Transform data back to its original space. + + Parameters + ---------- + X : DNDarray of shape (n_samples, n_components) + Data set to be transformed back. + + `partial_fit(self, X: heat.core.dndarray.DNDarray, y=None)` + : One single step of incrementally building up the PCA. + Input X is the current batch of data that needs to be added to the existing PCA. + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Apply dimensionality based on PCA to X. + + Parameters + ---------- + X : DNDarray of shape (n_samples, n_features) + Data set to be transformed. + +`PCA(n_components: int | float | None = None, copy: bool = True, whiten: bool = False, svd_solver: str = 'hierarchical', tol: float | None = None, iterated_power: int | str = 0, n_oversamples: int = 10, power_iteration_normalizer: str = 'qr', random_state: int | None = None)` +: Pricipal Component Analysis (PCA). + + Linear dimensionality reduction using Singular Value Decomposition of the data to project it to a lower dimensional space. + The input data is centered but not scaled for each feature before applying the SVD. + + Parameters + ---------- + n_components : int, float, None, default=None + Number of components to keep. If n_components is not set all components are kept. + If n_components is an integer, it specifies the number of components to keep. + If n_components is a float between 0 and 1, it specifies the fraction of variance explained by the components to keep. + copy : bool, default=True + In-place operations are not yet supported. Please set copy=True. + whiten : bool, default=False + Not yet supported. + svd_solver : {'full', 'hierarchical'}, default='hierarchical' + 'full' : Full SVD is performed. In general, this is more accurate, but also slower. So far, this is only supported for tall-skinny or short-fat data. + 'hierarchical' : Hierarchical SVD, i.e., an algorithm for computing an approximate, truncated SVD, is performed. Only available for data split along axis no. 0. + 'randomized' : Randomized SVD is performed. + tol : float, default=None + Not yet necessary as iterative methods for PCA are not yet implemented. + iterated_power : int, default=0 + if svd_solver='randomized', this parameter is the number of iterations for the power method. + Choosing `iterated_power > 0` can lead to better results in the case of slowly decaying singular values but is computationally more expensive. + n_oversamples : int, default=10 + if svd_solver='randomized', this parameter is the number of additional random vectors to sample the range of X so that the range of X can be approximated more accurately. + power_iteration_normalizer : {'qr'}, default='qr' + if svd_solver='randomized', this parameter is the normalization form of the iterated power method. So far, only QR is supported. + random_state : int, default=None + if svd_solver='randomized', this parameter allows to set the seed for the random number generator. + + Attributes + ---------- + components_ : DNDarray of shape (n_components, n_features) + Principal axes in feature space, representing the directions of maximum variance in the data. The components are sorted by explained_variance_. + explained_variance_ : DNDarray of shape (n_components,) + The amount of variance explained by each of the selected components. + Not supported by svd_solver='hierarchical' and svd_solver='randomized'. + explained_variance_ratio_ : DNDarray of shape (n_components,) + Percentage of variance explained by each of the selected components. + Not supported by svd_solver='hierarchical' and svd_solver='randomized'. + total_explained_variance_ratio_ : float + The percentage of total variance explained by the selected components together. + For svd_solver='hierarchical', an lower estimate for this quantity is provided; see :func:`ht.linalg.hsvd_rtol` and :func:`ht.linalg.hsvd_rank` for details. + Not supported by svd_solver='randomized'. + singular_values_ : DNDarray of shape (n_components,) + The singular values corresponding to each of the selected components. + Not supported by svd_solver='hierarchical' and svd_solver='randomized'. + mean_ : DNDarray of shape (n_features,) + Per-feature empirical mean, estimated from the training set. + n_components_ : int + The estimated number of components. + n_samples_ : int + Number of samples in the training data. + noise_variance_ : float + not yet implemented + + Notes + ----- + Hierarchical SVD (`svd_solver = "hierarchical"`) computes an approximate, truncated SVD. Thus, the results are not exact, in general, unless the + truncation rank chosen is larger than the actual rank (matrix rank) of the underlying data; see :func:`ht.linalg.hsvd_rank` and :func:`ht.linalg.hsvd_rtol` for details. + Randomized SVD (`svd_solver = "randomized"`) is a stochastic algorithm that computes an approximate, truncated SVD. + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray, y=None) ‑> Self` + : Fit the PCA model with data X. + + Parameters + ---------- + X : DNDarray of shape (n_samples, n_features) + Data set of which PCA has to be computed. + y : Ignored + Not used, present for API consistency by convention. + + `inverse_transform(self, X: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Transform data back to its original space. + + Parameters + ---------- + X : DNDarray of shape (n_samples, n_components) + Data set to be transformed back. + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Apply dimensionality based on PCA to X. + + Parameters + ---------- + X : DNDarray of shape (n_samples, n_features) + Data set to be transformed. diff --git a/doc/api/heat/decomposition/tests/index.md b/doc/api/heat/decomposition/tests/index.md new file mode 100644 index 0000000000..f1465eae88 --- /dev/null +++ b/doc/api/heat/decomposition/tests/index.md @@ -0,0 +1,7 @@ +Module heat.decomposition.tests +=============================== + +Sub-modules +----------- +* heat.decomposition.tests.test_dmd +* heat.decomposition.tests.test_pca diff --git a/doc/api/heat/decomposition/tests/test_dmd.md b/doc/api/heat/decomposition/tests/test_dmd.md new file mode 100644 index 0000000000..5edad35c5d --- /dev/null +++ b/doc/api/heat/decomposition/tests/test_dmd.md @@ -0,0 +1,157 @@ +Module heat.decomposition.tests.test_dmd +======================================== + +Classes +------- + +`TestDMD(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_dmd_correctness_split0(self)` + : + + `test_dmd_correctness_split1(self)` + : + + `test_dmd_fit_catch_wrong(self)` + : + + `test_dmd_functionality_split0_full(self)` + : + + `test_dmd_functionality_split0_hierarchical(self)` + : + + `test_dmd_functionality_split0_randomized(self)` + : + + `test_dmd_functionality_split1_full(self)` + : + + `test_dmd_functionality_split1_hierarchical(self)` + : + + `test_dmd_functionality_split1_randomized(self)` + : + + `test_dmd_predict_catch_wrong(self)` + : + + `test_dmd_setup_catch_wrong(self)` + : + +`TestDMDc(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_dmdc_correctness_split0(self)` + : + + `test_dmdc_correctness_split1(self)` + : + + `test_dmdc_fit_catch_wrong(self)` + : + + `test_dmdc_functionality_split0_full(self)` + : + + `test_dmdc_functionality_split0_hierarchical(self)` + : + + `test_dmdc_functionality_split0_randomized(self)` + : + + `test_dmdc_functionality_split1_full(self)` + : + + `test_dmdc_functionality_split1_hierarchical(self)` + : + + `test_dmdc_functionality_split1_randomized(self)` + : + + `test_dmdc_predict_catch_wrong(self)` + : + + `test_dmdc_setup_catch_wrong(self)` + : diff --git a/doc/api/heat/decomposition/tests/test_pca.md b/doc/api/heat/decomposition/tests/test_pca.md new file mode 100644 index 0000000000..131d5c2ab5 --- /dev/null +++ b/doc/api/heat/decomposition/tests/test_pca.md @@ -0,0 +1,121 @@ +Module heat.decomposition.tests.test_pca +======================================== + +Classes +------- + +`TestIncrementalPCA(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_incrementalpca_catch_wrong_inputs(self)` + : + + `test_incrementalpca_full_rank_reached_split0(self)` + : + + `test_incrementalpca_setup(self)` + : + + `test_incrementalpca_truncation_happens_split1(self)` + : + +`TestPCA(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_pca_randomized(self)` + : + + `test_pca_setup(self)` + : + + `test_pca_with_full_rank(self)` + : + + `test_pca_with_full_rtol(self)` + : + + `test_pca_with_hiearchical_rtol(self)` + : + + `test_pca_with_hierarchical_rank(self)` + : diff --git a/doc/api/heat/fft/fft.md b/doc/api/heat/fft/fft.md new file mode 100644 index 0000000000..d7d14bf753 --- /dev/null +++ b/doc/api/heat/fft/fft.md @@ -0,0 +1,586 @@ +Module heat.fft.fft +=================== +Provides a collection of Discrete Fast Fourier Transforms (DFFT) and their inverses. + +Functions +--------- + +`fft(x: heat.core.dndarray.DNDarray, n: int = None, axis: int = -1, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the one-dimensional discrete Fourier Transform over the specified axis in an M-dimensional + array by means of the Fast Fourier Transform (FFT). By default, the last axis is transformed, while the remaining + axes are left unchanged. + + Parameters + ---------- + x : DNDarray + Input array, can be complex. WARNING: If x is 1-D and distributed, the entire array is copied on each MPI process. See Notes. + n : int, optional + Length of the transformed axis of the output. If not given, the length is assumed to be the length of the input + along the axis specified by `axis`. If `n` is smaller than the length of the input, the input is truncated. If `n` is + larger, the input is padded with zeros. Default: None. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is used, or the only axis if `x` has only one + dimension. Default: -1. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`ifft` : inverse 1-dimensional FFT + :func:`fft2` : 2-dimensional FFT + :func:`fftn` : N-dimensional FFT + :func:`rfft` : 1-dimensional FFT of a real signal + :func:`hfft` : 1-dimensional FFT of a Hermitian symmetric sequence + :func:`fftfreq` : frequency bins for given FFT parameters + :func:`rfftfreq` : frequency bins for real FFT + + Notes + ----- + This function requires MPI communication if the input array is transformed along the distribution axis. + If the input array is 1-D and distributed, this function copies the entire array on each MPI process! i.e. if the array is very large, you might run out of memory. + Hint: if you are looping through a batch of 1-D arrays to transform them, consider stacking them into a 2-D DNDarray and transforming them in one go (see :func:`fft2`). + +`fft2(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, int] = (-2, -1), norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the 2-dimensional discrete Fourier Transform over the specified axes in an M-dimensional + array by means of the Fast Fourier Transform (FFT). By default, the last two axes are transformed, while the + remaining axes are left unchanged. + + Parameters + ---------- + x : DNDarray + Input array, can be complex + s : Tuple[int, int], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, int], optional + Axes over which to compute the FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is also + not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. + (default is (-2, -1)) + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`ifft2` : inverse 2-dimensional FFT + :func:`fft` : 1-dimensional FFT + :func:`fftn` : N-dimensional FFT + :func:`rfft2` : 2-dimensional FFT of a real signal + :func:`hfft2` : 2-dimensional FFT of a Hermitian symmetric sequence + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`fftfreq(n: int, d: int | float = 1.0, dtype: Type | None = None, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: mpi4py.MPI.Comm | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the Discrete Fourier Transform sample frequencies for a signal of size ``n``. + + The returned ``DNDarray`` contains the frequency bin centers in cycles per unit of the sample spacing (with zero + at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. + + Parameters + ---------- + n : int + Window length. + d : Union[int, float], optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + dtype : Type, optional + The desired data type of the output. Defaults to `ht.float32`. + split : int, optional + The axis along which to split the result. Can be None or 0, as the output is 1-dimensional. Defaults to None, i.e. non-distributed output. + device : str or Device, optional + The device on which to place the output. If not given, the output is placed on the current device. + comm : MPI.Comm, optional + The MPI communicator to use for distributing the output. If not given, the default communicator is used. + + See Also + -------- + :func:`rfftfreq` : frequency bins for :func:`rfft` + +`fftn(x: heat.core.dndarray.DNDarray, s: Tuple[int, ...] = None, axes: Tuple[int, ...] = None, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the N-dimensional discrete Fourier Transform. + + This function computes the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional + array by means of the Fast Fourier Transform (FFT). + + Parameters + ---------- + x : DNDarray + Input array, can be complex + s : Tuple[int, ...], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, ...], optional + Axes over which to compute the FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is also + not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. + (default is None) + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`ifftn` : inverse N-dimensional FFT + :func:`fft` : 1-dimensional FFT + :func:`fft2` : 2-dimensional FFT + :func:`rfftn` : N-dimensional FFT of a real signal + :func:`hfftn` : N-dimensional FFT of a Hermitian symmetric sequence + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`fftshift(x: heat.core.dndarray.DNDarray, axes: int | Iterable[int] | None = None) ‑> heat.core.dndarray.DNDarray` +: Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). Note that ``y[0]`` is the Nyquist component + only if ``len(x)`` is even. + + Parameters + ---------- + x : DNDarray + Input array + axes : int or Iterable[int], optional + Axes over which to shift. Default is None, which shifts all axes. + + See Also + -------- + :func:`ifftshift` : The inverse of `fftshift`. + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is shifted. + +`hfft(x: heat.core.dndarray.DNDarray, n: int = None, axis: int = -1, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the one-dimensional discrete Fourier Transform of a Hermitian symmetric signal. + + This function computes the one-dimensional discrete Fourier Transform over the specified axis in an M-dimensional + array by means of the Fast Fourier Transform (FFT). By default, the last axis is transformed, while the remaining + axes are left unchanged. The input signal is assumed to be Hermitian-symmetric, i.e. `x[..., i] = x[..., -i].conj()`. + + Parameters + ---------- + x : DNDarray + Input array + n : int, optional + Length of the transformed axis of the output. + If `n` is not None, the input array is either zero-padded or trimmed to length `n` before the transform. + Default: `2 * (x.shape[axis] - 1)`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is used, or the only axis if x has only one + dimension. Default: -1. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`ihfft` : inverse 1-dimensional FFT of a Hermitian-symmetric sequence + :func:`hfft2` : 2-dimensional FFT of a Hermitian-symmetric sequence + :func:`hfftn` : N-dimensional FFT of a Hermitian-symmetric sequence + :func:`fft` : 1-dimensional FFT + :func:`rfft` : 1-dimensional FFT of a real signal + + Notes + ----- + This function requires MPI communication if the input array is transformed along the distribution axis. + +`hfft2(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, int] = (-2, -1), norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the 2-dimensional discrete Fourier Transform of a Hermitian symmetric signal. + + This function computes the 2-dimensional discrete Fourier Transform over the specified axes in an M-dimensional + array by means of the Fast Fourier Transform (FFT). By default, the last two axes are transformed, while the + remaining axes are left unchanged. The input signal is assumed to be Hermitian-symmetric, i.e. `x[..., i] = x[..., -i].conj()`. + + Parameters + ---------- + x : DNDarray + Input array + s : Tuple[int, int], optional + Shape of the signal along the transformed axes. If `s` is specified, the input array is either zero-padded or trimmed to length `s` before the transform. + If `s` is not given, the last dimension defaults to even output: `s[-1] = 2 * (x.shape[-1] - 1)`. + axes : Tuple[int, int], optional + Axes over which to compute the FFT. If not given, the last two dimensions are transformed. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default: (-2, -1). + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`ihfft2` : inverse 2-dimensional FFT of a Hermitian-symmetric sequence + :func:`hfft` : 1-dimensional FFT of a Hermitian-symmetric sequence + :func:`hfftn` : N-dimensional FFT of a Hermitian-symmetric sequence + :func:`fft2` : 2-dimensional FFT + :func:`rfft2` : 2-dimensional FFT of a real signal + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`hfftn(x: heat.core.dndarray.DNDarray, s: Tuple[int, ...] = None, axes: Tuple[int, ...] = None, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the N-dimensional discrete Fourier Transform of a Hermitian symmetric signal. + + This function computes the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional + array by means of the Fast Fourier Transform (FFT). By default, all axes are transformed. + + Parameters + ---------- + x : DNDarray + Input array + s : Tuple[int, ...], optional + Shape of the signal along the transformed axes. If `s` is specified, the input array is either zero-padded or trimmed to length `s` before the transform. + If `s` is not given, the last dimension defaults to even output: `s[-1] = 2 * (x.shape[-1] - 1)`. + axes : Tuple[int, ...], optional + Axes over which to compute the FFT. If not given, all dimensions are transformed. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default: None. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`ihfftn` : inverse N-dimensional FFT of a Hermitian-symmetric sequence + :func:`hfft` : 1-dimensional FFT of a Hermitian-symmetric sequence + :func:`hfft2` : 2-dimensional FFT of a Hermitian-symmetric sequence + :func:`fftn` : N-dimensional FFT + :func:`rfftn` : N-dimensional FFT of a real signal + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`ifft(x: heat.core.dndarray.DNDarray, n: int = None, axis: int = -1, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the one-dimensional inverse discrete Fourier Transform. + + Parameters + ---------- + x : DNDarray + Input array, can be complex + n : int, optional + Length of the transformed axis of the output. If not given, the length is taken to be the length of the input + along the axis specified by `axis`. If `n` is smaller than the length of the input, the input is cropped. If `n` is + larger, the input is padded with zeros. Default: None. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last axis is used, or the only axis if x has only one dimension. Default: -1. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`fft` : forward 1-dimensional FFT + :func:`ifft2` : inverse 2-dimensional FFT + :func:`ifftn` : inverse N-dimensional FFT + :func:`irfft` : inverse 1-dimensional FFT of a real sequence + :func:`ihfft` : inverse 1-dimensional FFT of a Hermitian symmetric sequence + + Notes + ----- + This function requires MPI communication if the input array is transformed along the distribution axis. + If the input array is 1-D and distributed, this function copies the entire array on each MPI process! i.e. if the array is very large, you might run out of memory. + Hint: if you are looping through a batch of 1-D arrays to transform them, consider stacking them into a 2-D DNDarray and transforming them all at once (see :func:`ifft2`). + +`ifft2(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, int] = (-2, -1), norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the 2-dimensional inverse discrete Fourier Transform. + + Parameters + ---------- + x : DNDarray + Input array, can be complex + s : Tuple[int, int], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, int], optional + Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default: (-2, -1). + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`fft2` : forward 2-dimensional FFT + :func:`ifft` : inverse 1-dimensional FFT + :func:`ifftn` : inverse N-dimensional FFT + :func:`irfft2` : inverse 2-dimensional FFT of a real sequence + :func:`ihfft2` : inverse 2-dimensional FFT of a Hermitian symmetric sequence + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`ifftn(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, ...] = None, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the N-dimensional inverse discrete Fourier Transform. + + Parameters + ---------- + x : DNDarray + Input array, can be complex + s : Tuple[int, ...], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, ...], optional + Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default: None. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`fftn` : forward N-dimensional FFT + :func:`ifft` : inverse 1-dimensional FFT + :func:`ifft2` : inverse 2-dimensional FFT + :func:`irfftn` : inverse N-dimensional FFT of a real sequence + :func:`ihfftn` : inverse N-dimensional FFT of a Hermitian symmetric sequence + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`ifftshift(x: heat.core.dndarray.DNDarray, axes: int | Iterable[int] | None = None) ‑> heat.core.dndarray.DNDarray` +: The inverse of fftshift. + + Parameters + ---------- + x : DNDarray + Input array + axes : int or Iterable[int], optional + Axes over which to shift. Default is None, which shifts all axes. + + See Also + -------- + :func:`fftshift` : Shift the zero-frequency component to the center of the spectrum. + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is shifted. + +`ihfft(x: heat.core.dndarray.DNDarray, n: int = None, axis: int = -1, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the one-dimensional inverse discrete Fourier Transform of a real signal. The output is Hermitian-symmetric. + + Parameters + ---------- + x : DNDarray + Input array, must be real + n : int, optional + Length of the transformed axis of the output. If not given, the length is taken to be the length of the input + along the axis specified by `axis`. If `n` is smaller than the length of the input, the input is cropped. If `n` is + larger, the input is padded with zeros. Default: None. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last axis is used, or the only axis if x has only one dimension. Default: -1. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`hfft` : 1-dimensional FFT of a Hermitian-symmetric sequence + :func:`ihfft2` : inverse 2-dimensional FFT of a Hermitian-symmetric sequence + :func:`ihfftn` : inverse N-dimensional FFT of a Hermitian-symmetric sequence + :func:`rfft` : 1-dimensional FFT of a real signal + :func:`irfft` : inverse 1-dimensional FFT of a real sequence + + Notes + ----- + This function requires MPI communication if the input array is transformed along the distribution axis. + +`ihfft2(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, int] = (-2, -1), norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse of a 2-dimensional discrete Fourier Transform of a Hermitian-symmetric signal. The output is Hermitian-symmetric. Requires torch >= 1.11.0. + + Parameters + ---------- + x : DNDarray + Input array, must be real + s : Tuple[int, int], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, int], optional + Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default is (-2, -1). + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`hfft2` : 2-dimensional FFT of a Hermitian-symmetric sequence + :func:`ihfft` : inverse 1-dimensional FFT of a Hermitian-symmetric sequence + :func:`ihfftn` : inverse N-dimensional FFT of a Hermitian-symmetric sequence + :func:`rfft2` : 2-dimensional FFT of a real signal + :func:`irfft2` : inverse 2-dimensional FFT of a real sequence + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`ihfftn(x: heat.core.dndarray.DNDarray, s: Tuple[int, ...] = None, axes: Tuple[int, ...] = None, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse of a N-dimensional discrete Fourier Transform of Hermitian-symmetric signal. The output is Hermitian-symmetric. Requires torch >= 1.11.0. + + Parameters + ---------- + x : DNDarray + Input array, must be real + s : Tuple[int, ...], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, ...], optional + Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default: None. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`hfftn` : N-dimensional FFT of a Hermitian-symmetric sequence + :func:`ihfft` : inverse 1-dimensional FFT of a Hermitian-symmetric sequence + :func:`ihfft2` : inverse 2-dimensional FFT of a Hermitian-symmetric sequence + :func:`rfftn` : N-dimensional FFT of a real signal + :func:`irfftn` : inverse N-dimensional FFT of a real sequence + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`irfft(x: heat.core.dndarray.DNDarray, n: int = None, axis: int = -1, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse of a one-dimensional discrete Fourier Transform of real signal. The output is real. + + Parameters + ---------- + x : DNDarray + Input array, can be complex + n : int, optional + Length of the transformed axis of the output. If not given, the length is taken to be the length of the input + along the axis specified by `axis`. If `n` is smaller than the length of the input, the input is cropped. If `n` is + larger, the input is padded with zeros. Default: None. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last axis is used, or the only axis if x has only one dimension. Default: -1. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`irfft2` : inverse 2-dimensional FFT + :func:`irfftn` : inverse N-dimensional FFT + :func:`rfft` : 1-dimensional FFT of a real signal + :func:`hfft` : 1-dimensional FFT of a Hermitian symmetric sequence + :func:`fft` : 1-dimensional FFT + + Notes + ----- + This function requires MPI communication if the input array is transformed along the distribution axis. + If the input array is 1-D and distributed, this function copies the entire array on each MPI process! i.e. if the array is very large, you might run out of memory. + Hint: if you are looping through a batch of 1-D arrays to transform them, consider stacking them into a 2-D DNDarray and transforming them all at once (see :func:`irfft2`). + +`irfft2(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, int] = (-2, -1), norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse of a 2-dimensional discrete real Fourier Transform. The output is real. + + Parameters + ---------- + x : DNDarray + Input array, can be complex + s : Tuple[int, int], optional + Shape of the output along the transformed axes. + axes : Tuple[int, int], optional + Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. Default is (-2, -1)) + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + See Also + -------- + :func:`irfft` : inverse 1-dimensional FFT + :func:`irfftn` : inverse N-dimensional FFT + :func:`rfft2` : 2-dimensional FFT of a real signal + :func:`hfft2` : 2-dimensional FFT of a Hermitian symmetric sequence + :func:`fft2` : 2-dimensional FFT + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`irfftn(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, ...] = None, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the inverse of an N-dimensional discrete Fourier Transform of real signal. + The output is real. + + Parameters + ---------- + x : DNDarray + Input array, assumed to be Hermitian-symmetric along the transformed axes, with the last transformed axis only containing the positive half of the frequencies. + s : Tuple[int, ...], optional + Shape of the output along the transformed axes. If ``s`` is not specified, the last transposed axis is reconstructued in full, i.e. `s[-1] = 2 * (x.shape[axes[-1]] - 1)`. + axes : Tuple[int, ...], optional + Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. + (default is None) + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`rfft(x: heat.core.dndarray.DNDarray, n: int = None, axis: int = -1, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the one-dimensional discrete Fourier Transform of real input. The output is Hermitian-symmetric. + + Parameters + ---------- + x : DNDarray + Input array, must be real. + n : int, optional + Length of the transformed axis of the output. If not given, the length is taken to be the length of the input + along the axis specified by `axis`. If `n` is smaller than the length of the input, the input is cropped. If `n` is + larger, the input is padded with zeros. Default: None. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is used, or the only axis if x has only one dimension. Default: -1. + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + Notes + ----- + This function requires MPI communication if the input array is transformed along the distribution axis. + If the input array is 1-D and distributed, this function copies the entire array on each MPI process! i.e. if the array is very large, you might run out of memory. + Hint: if you are looping through a batch of 1-D arrays to transform them, consider stacking them into a 2-D DNDarray and transforming them all at once (see :func:`rfft2`). + +`rfft2(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, int] = (-2, -1), norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the 2-dimensional discrete Fourier Transform of real input. The output is Hermitian-symmetric. + + Parameters + ---------- + x : DNDarray + Input array, must be real. + s : Tuple[int, int], optional + Shape of the output along the transformed axes. (default is x.shape) + axes : Tuple[int, int], optional + Axes over which to compute the FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. (default is (-2, -1)) + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. + +`rfftfreq(n: int, d: int | float = 1.0, dtype: Type | None = None, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: mpi4py.MPI.Comm | None = None) ‑> heat.core.dndarray.DNDarray` +: Return the Discrete Fourier Transform sample frequencies. + + The returned float DNDarray contains the frequency bin centers in cycles per unit of the sample spacing (with zero + at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. + + Parameters + ---------- + n : int + Window length. + d : Union[int, float], optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + dtype : Type, optional + The desired data type of the output. Defaults to `float32`. + split : int, optional + The axis along which to split the result. If not given, the result is not split. + device : str or Device, optional + The device on which to place the output. If not given, the output is placed on the current device. + comm : MPI.Comm, optional + The MPI communicator to use for distributing the output. If not given, the default communicator is used. + +`rfftn(x: heat.core.dndarray.DNDarray, s: Tuple[int, int] = None, axes: Tuple[int, ...] = None, norm: str = None) ‑> heat.core.dndarray.DNDarray` +: Compute the N-dimensional discrete Fourier Transform of real input. By default, all axes are transformed, with the real transform + performed over the last axis, while the remaining transforms are complex. The output is Hermitian-symmetric, with the last transformed axis having length `s[-1] // 2 + 1` (the positive part of the spectrum). + + Parameters + ---------- + x : DNDarray + Input array, must be real. + s : Tuple[int, ...], optional + Shape of the output along the transformed axes. + axes : Tuple[int, ...], optional + Axes over which to compute the FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is + also not specified. Repeated transforms over an axis, i.e. repeated indices in ``axes``, are not supported yet. (default is None) + norm : str, optional + Normalization mode: 'forward', 'backward', or 'ortho'. Indicates in what direction the forward/backward pair of transforms is normalized. Default is "backward". + + Notes + ----- + This function requires MPI communication if the input array is distributed and the split axis is transformed. diff --git a/doc/api/heat/fft/index.md b/doc/api/heat/fft/index.md new file mode 100644 index 0000000000..1d149b9346 --- /dev/null +++ b/doc/api/heat/fft/index.md @@ -0,0 +1,8 @@ +Module heat.fft +=============== +import the fft functions into the fft namespace + +Sub-modules +----------- +* heat.fft.fft +* heat.fft.tests diff --git a/doc/api/heat/fft/tests/index.md b/doc/api/heat/fft/tests/index.md new file mode 100644 index 0000000000..b642abd95c --- /dev/null +++ b/doc/api/heat/fft/tests/index.md @@ -0,0 +1,6 @@ +Module heat.fft.tests +===================== + +Sub-modules +----------- +* heat.fft.tests.test_fft diff --git a/doc/api/heat/fft/tests/test_fft.md b/doc/api/heat/fft/tests/test_fft.md new file mode 100644 index 0000000000..75f9536fdb --- /dev/null +++ b/doc/api/heat/fft/tests/test_fft.md @@ -0,0 +1,81 @@ +Module heat.fft.tests.test_fft +============================== + +Classes +------- + +`TestFFT(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_fft2_ifft2(self)` + : + + `test_fft_ifft(self)` + : + + `test_fftfreq_rfftfreq(self)` + : + + `test_fftn_ifftn(self)` + : + + `test_fftshift_ifftshift(self)` + : + + `test_hfft2_ihfft2(self)` + : + + `test_hfft_ihfft(self)` + : + + `test_hfftn_ihfftn(self)` + : + + `test_rfft2_irfft2(self)` + : + + `test_rfft_irfft(self)` + : + + `test_rfftn_irfftn(self)` + : diff --git a/doc/api/heat/graph/index.md b/doc/api/heat/graph/index.md new file mode 100644 index 0000000000..a564841c93 --- /dev/null +++ b/doc/api/heat/graph/index.md @@ -0,0 +1,8 @@ +Module heat.graph +================= +Import the graph functions into the graph namespace + +Sub-modules +----------- +* heat.graph.laplacian +* heat.graph.tests diff --git a/doc/api/heat/graph/laplacian.md b/doc/api/heat/graph/laplacian.md new file mode 100644 index 0000000000..4fbf25837d --- /dev/null +++ b/doc/api/heat/graph/laplacian.md @@ -0,0 +1,48 @@ +Module heat.graph.laplacian +=========================== +Module for graph-based classes + +Classes +------- + +`Laplacian(similarity: Callable, weighted: bool = True, definition: str = 'norm_sym', mode: str = 'fully_connected', threshold_key: str = 'upper', threshold_value: float = 1.0, neighbours: int = 10)` +: Graph Laplacian from a dataset + + Parameters + ---------- + similarity : Callable + Metric function that defines similarity between vertices. Should accept a data matrix :math:`n \times f` as input and + return an :math:`n\times n` similarity matrix. Additional required parameters can be passed via a lambda function. + definition : str + Type of Laplacian + + - ``'simple'``: Laplacian matrix for simple graphs :math:`L = D - A` + + - ``'norm_sym'``: Symmetric normalized Laplacian :math:`L^{sym} = I - D^{-1/2} A D^{-1/2}` + + - ``'norm_rw'``: Random walk normalized Laplacian :math:`L^{rw} = D^{-1} L = I - D^{-1}` + + mode : str + How to calculate adjacency from the similarity matrix + + - ``'fully_connected'`` is fully-connected, so :math:`A = S` + + - ``'eNeighbour'`` is the epsilon neighbourhood, with :math:`A_{ji} = 0` if :math:`S_{ij} > upper` or + :math:`S_{ij} < lower`; for eNeighbour an upper or lower boundary needs to be set + + threshold_key : str + ``'upper'`` or ``'lower'``, defining the type of threshold for the epsilon-neighborhood + threshold_value : float + Boundary value for the epsilon-neighborhood + neighbours : int + Number of nearest neighbors to be considered for adjacency definition. Currently not implemented + + ### Methods + + `construct(self, X: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Callable to get the Laplacian matrix from the dataset ``X`` according to the specified Laplacian + + Parameters + ---------- + X : DNDarray + The data matrix, Shape = (n_samples, n_features) diff --git a/doc/api/heat/graph/tests/index.md b/doc/api/heat/graph/tests/index.md new file mode 100644 index 0000000000..0fa66c447a --- /dev/null +++ b/doc/api/heat/graph/tests/index.md @@ -0,0 +1,6 @@ +Namespace heat.graph.tests +========================== + +Sub-modules +----------- +* heat.graph.tests.test_laplacian diff --git a/doc/api/heat/graph/tests/test_laplacian.md b/doc/api/heat/graph/tests/test_laplacian.md new file mode 100644 index 0000000000..1dcb9e45fb --- /dev/null +++ b/doc/api/heat/graph/tests/test_laplacian.md @@ -0,0 +1,51 @@ +Module heat.graph.tests.test_laplacian +====================================== + +Classes +------- + +`TestLaplacian(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_laplacian(self)` + : diff --git a/doc/api/heat/index.md b/doc/api/heat/index.md new file mode 100644 index 0000000000..b742d80dbb --- /dev/null +++ b/doc/api/heat/index.md @@ -0,0 +1,23 @@ +Module heat +=========== +Add modules/namespaces to the heat namespace + +Sub-modules +----------- +* heat.classification +* heat.cli +* heat.cluster +* heat.core +* heat.datasets +* heat.decomposition +* heat.fft +* heat.graph +* heat.naive_bayes +* heat.nn +* heat.optim +* heat.preprocessing +* heat.regression +* heat.sparse +* heat.spatial +* heat.tests +* heat.utils diff --git a/doc/api/heat/naive_bayes/gaussianNB.md b/doc/api/heat/naive_bayes/gaussianNB.md new file mode 100644 index 0000000000..acf73a6379 --- /dev/null +++ b/doc/api/heat/naive_bayes/gaussianNB.md @@ -0,0 +1,167 @@ +Module heat.naive_bayes.gaussianNB +================================== +Distributed Gaussian Naive-Bayes classifier. + +Classes +------- + +`GaussianNB(priors=None, var_smoothing=1e-09)` +: Gaussian Naive Bayes (GaussianNB), based on `scikit-learn.naive_bayes.GaussianNB `_. + Can perform online updates to model parameters via method :func:`partial_fit`. + For details on algorithm used to update feature means and variance online, + see Chan, Golub, and LeVeque 1983 [1]. + + Parameters + ---------- + priors : DNDarray + Prior probabilities of the classes, with shape ``(n_classes,)``. If specified, the priors are not + adjusted according to the data. + var_smoothing : float, optional + Portion of the largest variance of all features that is added to + variances for calculation stability. + + Attributes + ---------- + class_count_ : DNDarray + Number of training samples observed in each class. Shape = ``(n_classes,)`` + class_prior_ : DNDarray + Probability of each class. Shape = ``(n_classes,)`` + classes_ : DNDarray + Class labels known to the classifier. Shape = ``(n_classes,)`` + epsilon_ : float + Absolute additive value to variances + sigma_ : DNDarray + Variance of each feature per class. Shape = ``(n_classes, n_features)`` + theta_ : DNDarray + Mean of each feature per class. Shape = ``(n_classes, n_features)`` + + References + ---------- + [1] Chan, Tony F., Golub, Gene H., and Leveque, Randall J., "Algorithms for Computing the Sample Variance: Analysis + and Recommendations", The American Statistician, 37:3, pp. 242-247, 1983 + + Examples + -------- + >>> import heat as ht + >>> X = ht.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]], dtype=ht.float32) + >>> Y = ht.array([1, 1, 1, 2, 2, 2]) + >>> from heat.naive_bayes import GaussianNB + >>> clf = GaussianNB() + >>> clf.fit(X, Y) + + >>> print(clf.predict(ht.array([[-0.8, -1]]))) + tensor([1]) + >>> clf_pf = GaussianNB() + >>> clf_pf.partial_fit(X, Y, ht.unique(Y, sorted=True)) + + >>> print(clf_pf.predict(ht.array([[-0.8, -1]]))) + tensor([1]) + + ### Ancestors (in MRO) + + * heat.core.base.ClassificationMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, x: DNDarray, y: DNDarray, sample_weight: Optional[DNDarray] = None)` + : Fit Gaussian Naive Bayes according to ``x`` and ``y`` + + Parameters + ---------- + x : DNDarray + Training set, where n_samples is the number of samples + and n_features is the number of features. Shape = (n_classes, n_features) + y : DNDarray + Labels for training set. Shape = (n_samples, ) + sample_weight : DNDarray, optional + Weights applied to individual samples (1. for unweighted). Shape = (n_samples, ) + + `logsumexp(self, a: DNDarray, axis: Optional[Union[int, Tuple[int, ...]]] = None, b: Optional[DNDarray] = None, keepdims: bool = False, return_sign: bool = False) ‑> heat.core.dndarray.DNDarray` + : Adapted to HeAT from scikit-learn. + Compute the log of the sum of exponentials of input elements. The result, ``np.log(np.sum(np.exp(a)))`` + calculated in a numerically more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` + is returned. + + Parameters + ---------- + a : DNDarray + Input array. + axis : None or int or Tuple [int,...], optional + Axis or axes over which the sum is taken. By default ``axis`` is ``None``, + and all elements are summed. + keepdims : bool, optional + If this is set to ``True``, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the original array. + b : DNDarray, optional + Scaling factor for ``exp(a)`` must be of the same shape as ``a`` or + broadcastable to ``a``. These values may be negative in order to + implement subtraction. + return_sign : bool, optional + If this is set to ``True``, the result will be a pair containing sign + information; if ``False``, results that are negative will be returned + as ``NaN``. + #TODO: returns NotImplementedYet error. + sgn : DNDarray, NOT IMPLEMENTED YET + #TODO If return_sign is True, this will be an array of floating-point + numbers matching res and +1, 0, or -1 depending on the sign + of the result. If ``False``, only one result is returned. + + `partial_fit(self, x: DNDarray, y: DNDarray, classes: Optional[DNDarray] = None, sample_weight: Optional[DNDarray] = None)` + : Adapted to HeAT from scikit-learn. + Incremental fit on a batch of samples. + This method is expected to be called several times consecutively + on different chunks of a dataset so as to implement out-of-core + or online learning. + This is especially useful when the whole dataset is too big to fit in + memory at once. + This method has some performance and numerical stability overhead, + hence it is better to call :func:`partial_fit` on chunks of data that are + as large as possible (as long as fitting in the memory budget) to + hide the overhead. + + Parameters + ---------- + x : DNDarray + Training set, where `n_samples` is the number of samples and + `n_features` is the number of features. Shape = (n_samples, n_features) + y : DNDarray + Labels for training set. Shape = (n_samples,) + classes : DNDarray, optional + List of all the classes that can possibly appear in the ``y`` vector. + Must be provided at the first call to :func:`partial_fit`, can be omitted + in subsequent calls. Shape = ``(n_classes,)`` + sample_weight : DNDarray, optional + Weights applied to individual samples (1. for unweighted). Shape = (n_samples,) + + `predict(self, x: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Adapted to HeAT from scikit-learn. + Perform classification on a tensor of test data ``x``. + + Parameters + ---------- + x : DNDarray + Input data with shape (n_samples, n_features) + + `predict_log_proba(self, x: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Adapted to HeAT from scikit-learn. + Return log-probability estimates of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute ``classes_``. + + Parameters + ---------- + x : DNDarray + Input data. Shape = (n_samples, n_features). + + `predict_proba(self, x: DNDarray) ‑> heat.core.dndarray.DNDarray` + : Adapted to HeAT from scikit-learn. + Return probability estimates for the test tensor x of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute ``classes_``. + + Parameters + ---------- + x : DNDarray + Input data. Shape = (n_samples, n_features). diff --git a/doc/api/heat/naive_bayes/index.md b/doc/api/heat/naive_bayes/index.md new file mode 100644 index 0000000000..f71bd2b314 --- /dev/null +++ b/doc/api/heat/naive_bayes/index.md @@ -0,0 +1,8 @@ +Module heat.naive_bayes +======================= +Add the GNB function to the ht.naive_bayes namespace + +Sub-modules +----------- +* heat.naive_bayes.gaussianNB +* heat.naive_bayes.tests diff --git a/doc/api/heat/naive_bayes/tests/index.md b/doc/api/heat/naive_bayes/tests/index.md new file mode 100644 index 0000000000..f96c20422c --- /dev/null +++ b/doc/api/heat/naive_bayes/tests/index.md @@ -0,0 +1,6 @@ +Module heat.naive_bayes.tests +============================= + +Sub-modules +----------- +* heat.naive_bayes.tests.test_gaussiannb diff --git a/doc/api/heat/naive_bayes/tests/test_gaussiannb.md b/doc/api/heat/naive_bayes/tests/test_gaussiannb.md new file mode 100644 index 0000000000..820a28d4b1 --- /dev/null +++ b/doc/api/heat/naive_bayes/tests/test_gaussiannb.md @@ -0,0 +1,60 @@ +Module heat.naive_bayes.tests.test_gaussiannb +============================================= + +Classes +------- + +`TestGaussianNB(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_classifier(self)` + : + + `test_exception(self)` + : + + `test_fit_iris(self)` + : + + `test_get_and_set_params(self)` + : diff --git a/doc/api/heat/nn/data_parallel.md b/doc/api/heat/nn/data_parallel.md new file mode 100644 index 0000000000..e25c09f608 --- /dev/null +++ b/doc/api/heat/nn/data_parallel.md @@ -0,0 +1,76 @@ +Module heat.nn.data_parallel +============================ +General data parallel neural network classes. + +Classes +------- + +`DataParallel(module: torch.nn.modules.module.Module, comm: heat.core.communication.MPICommunication, optimizer: heat.optim.dp_optimizer.DataParallelOptimizer | List | Tuple, blocking_parameter_updates: bool = False)` +: Implements data parallelism across multiple processes. This means that the same model will be run locally + on each process. Creation of the model is similar to PyTorch, the only changes are using HeAT layers (ht.nn.layer) + in the initialization of the network/optimizer. If there is not a HeAT layer, it will fall back to the PyTorch layer + of the same name. The same is true for the optimizer. It's possible to use more than one optimizer, but + communication during parameter updates is limited to blocking. The same limitation takes effect when passing an + optimizer that does not deal exactly with the set of model's parameters. For the given model both the + ``__init__()`` and ``forward()`` functions must be defined in the class defining the network. + + An example of this is shown in `examples/mnist.py `_. + + It is highly recommended that a HeAT DataLoader is used, see :func:`ht.utils.data.DataLoader `. + The default communications scheme for this is blocking. The blocking scheme will average the model parameters during + the backwards step, synchronizing them before the next model iteration. + + Usage of more than one optimizer forces MPI communication to be parameter updates to use blocking communications. + + Attributes + ---------- + module : torch.nn.Module + The local module + comm : MPICommunication + Communicator to use + optimizer : heat.DataParallelOptimizer, List, Tuple + Individual or sequence of DataParallelOptimizers to be used + blocking_parameter_updates : bool, optional + Flag indicating the usage of blocking communications for parameter updates + Default: non-blocking updates (``False``) + + Initialize internal Module state, shared by both nn.Module and ScriptModule. + + ### Ancestors (in MRO) + + * torch.nn.modules.module.Module + + ### Methods + + `forward(self, *inputs: tuple, **kwargs: dict) ‑> torch.Tensor` + : Do the forward step for the network, receive the parameters from the last + +`DataParallelMultiGPU(module: torch.nn.modules.module.Module, optimizer: heat.optim.dp_optimizer.DASO, comm: heat.core.communication.MPICommunication = )` +: Creates data parallel networks local to each node using PyTorch's distributed class. This does NOT + do any global synchronizations. To make optimal use of this structure, use :func:`ht.optim.DASO `. + + Notes + ----- + The PyTorch distributed process group must already exist before this class is initialized. + + Parameters + ---------- + module: torch.nn.Module + an implemented PyTorch model + optimizer: optim.DASO + A DASO optimizer. Other optimizers are not yet implemented. The DASO optimizer should be + defined prior to calling this class. + comm: MPICommunication, optional + A global communicator. + Default: :func:`MPICommunication ` + + Initialize internal Module state, shared by both nn.Module and ScriptModule. + + ### Ancestors (in MRO) + + * torch.nn.modules.module.Module + + ### Methods + + `forward(self, *inputs: Tuple, **kwargs: Dict) ‑> torch.Tensor` + : Calls the forward method for the torch model diff --git a/doc/api/heat/nn/functional.md b/doc/api/heat/nn/functional.md new file mode 100644 index 0000000000..d21de15a8b --- /dev/null +++ b/doc/api/heat/nn/functional.md @@ -0,0 +1,11 @@ +Module heat.nn.functional +========================= +File containing the heat.nn.functional submodule + +Functions +--------- + +`func_getattr(name)` +: When a function is called for the heat.nn.functional module it will attempt to run the + heat.nn.functional module with that name, then, if there is no such heat nn module, + it will attempt to get the torch.nn.functional module of that name. diff --git a/doc/api/heat/nn/index.md b/doc/api/heat/nn/index.md new file mode 100644 index 0000000000..a0172b80b2 --- /dev/null +++ b/doc/api/heat/nn/index.md @@ -0,0 +1,84 @@ +Module heat.nn +============== +Neural network submodule. + +It contains data parallel specific nn modules. It also includes all of the modules in the torch.nn namespace + +Sub-modules +----------- +* heat.nn.data_parallel +* heat.nn.functional +* heat.nn.tests + +Classes +------- + +`DataParallel(module: torch.nn.modules.module.Module, comm: heat.core.communication.MPICommunication, optimizer: heat.optim.dp_optimizer.DataParallelOptimizer | List | Tuple, blocking_parameter_updates: bool = False)` +: Implements data parallelism across multiple processes. This means that the same model will be run locally + on each process. Creation of the model is similar to PyTorch, the only changes are using HeAT layers (ht.nn.layer) + in the initialization of the network/optimizer. If there is not a HeAT layer, it will fall back to the PyTorch layer + of the same name. The same is true for the optimizer. It's possible to use more than one optimizer, but + communication during parameter updates is limited to blocking. The same limitation takes effect when passing an + optimizer that does not deal exactly with the set of model's parameters. For the given model both the + ``__init__()`` and ``forward()`` functions must be defined in the class defining the network. + + An example of this is shown in `examples/mnist.py `_. + + It is highly recommended that a HeAT DataLoader is used, see :func:`ht.utils.data.DataLoader `. + The default communications scheme for this is blocking. The blocking scheme will average the model parameters during + the backwards step, synchronizing them before the next model iteration. + + Usage of more than one optimizer forces MPI communication to be parameter updates to use blocking communications. + + Attributes + ---------- + module : torch.nn.Module + The local module + comm : MPICommunication + Communicator to use + optimizer : heat.DataParallelOptimizer, List, Tuple + Individual or sequence of DataParallelOptimizers to be used + blocking_parameter_updates : bool, optional + Flag indicating the usage of blocking communications for parameter updates + Default: non-blocking updates (``False``) + + Initialize internal Module state, shared by both nn.Module and ScriptModule. + + ### Ancestors (in MRO) + + * torch.nn.modules.module.Module + + ### Methods + + `forward(self, *inputs: tuple, **kwargs: dict) ‑> torch.Tensor` + : Do the forward step for the network, receive the parameters from the last + +`DataParallelMultiGPU(module: torch.nn.modules.module.Module, optimizer: heat.optim.dp_optimizer.DASO, comm: heat.core.communication.MPICommunication = )` +: Creates data parallel networks local to each node using PyTorch's distributed class. This does NOT + do any global synchronizations. To make optimal use of this structure, use :func:`ht.optim.DASO `. + + Notes + ----- + The PyTorch distributed process group must already exist before this class is initialized. + + Parameters + ---------- + module: torch.nn.Module + an implemented PyTorch model + optimizer: optim.DASO + A DASO optimizer. Other optimizers are not yet implemented. The DASO optimizer should be + defined prior to calling this class. + comm: MPICommunication, optional + A global communicator. + Default: :func:`MPICommunication ` + + Initialize internal Module state, shared by both nn.Module and ScriptModule. + + ### Ancestors (in MRO) + + * torch.nn.modules.module.Module + + ### Methods + + `forward(self, *inputs: Tuple, **kwargs: Dict) ‑> torch.Tensor` + : Calls the forward method for the torch model diff --git a/doc/api/heat/nn/tests/index.md b/doc/api/heat/nn/tests/index.md new file mode 100644 index 0000000000..121712763c --- /dev/null +++ b/doc/api/heat/nn/tests/index.md @@ -0,0 +1,7 @@ +Module heat.nn.tests +==================== + +Sub-modules +----------- +* heat.nn.tests.test_data_parallel +* heat.nn.tests.test_nn diff --git a/doc/api/heat/nn/tests/test_data_parallel.md b/doc/api/heat/nn/tests/test_data_parallel.md new file mode 100644 index 0000000000..5c2bc53f90 --- /dev/null +++ b/doc/api/heat/nn/tests/test_data_parallel.md @@ -0,0 +1,50 @@ +Module heat.nn.tests.test_data_parallel +======================================= + +Classes +------- + +`TestDataParallel(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * unittest.case.TestCase + + ### Methods + + `test_data_parallel(self)` + : diff --git a/doc/api/heat/nn/tests/test_nn.md b/doc/api/heat/nn/tests/test_nn.md new file mode 100644 index 0000000000..542128f72b --- /dev/null +++ b/doc/api/heat/nn/tests/test_nn.md @@ -0,0 +1,95 @@ +Module heat.nn.tests.test_nn +============================ + +Classes +------- + +`TestFunctional(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * unittest.case.TestCase + + ### Methods + + `test_functional_getattr(self)` + : + +`TestNN(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * unittest.case.TestCase + + ### Methods + + `test_nn_getattr(self)` + : diff --git a/doc/api/heat/optim/dp_optimizer.md b/doc/api/heat/optim/dp_optimizer.md new file mode 100644 index 0000000000..be02a4a1da --- /dev/null +++ b/doc/api/heat/optim/dp_optimizer.md @@ -0,0 +1,177 @@ +Module heat.optim.dp_optimizer +============================== +MPI enabled data parallel optimizers + +Classes +------- + +`DASO(local_optimizer: torch.optim.optimizer.Optimizer, total_epochs: int, comm: heat.core.communication.MPICommunication = , warmup_epochs: int = 4, cooldown_epochs: int = 4, scheduler:  = None, stability_level: float = 0.05, max_global_skips: int = 8, sending_chunk_size: int = 10000000, downcast_type: torch.dtype = torch.bfloat16, use_mpi_groups: bool = True, skip_reduction_factor: int = 2, local_skip_factor: int = 4, verbose: bool = False)` +: Optimizer wrapper to use the Distributed Asynchronous and Selective Optimization (DASO) method. + + This optimizer uses a local torch optimizer combined with the :func:`nn.DataParallelMultiGPU ` + to create local DPNNs on each node consisting of the GPUs on each node. Then those networks communicate + globally with MPI groups, each of which has a single GPU on each node. + + DASO uses both local and global synchronization operations. Local synchronization operations are intended to be + done very frequently while global synchronizations are conducted asynchronously as the next batches are + computed. + + This implementation requires that all nodes have the name number of GPUs. + + There are four phases to training: + + 1. initialization: steps 1 to 8 below + 2. Warmup phase: blocking averaging update occurs for global synchronization step + 3. Cycling phase: for the global synchronization, the data is sent after a number of batches. the number of batches between synchronizations is referred to as `global_skips`. After the data is sent a number of batches pass before it is received (`batches_to_wait`). both of these cycle downward from `max_global_skips` for the global skips and 1/4th this value for `batches_to_wait`. When both values are equal to 1 and the loss is stable it will be reset to the initial values, then will decay again. + 4. Cooldown phase: blocking averaging update occurs for global synchronization step + + As example usage of this can be found in `heat/examples/nn/imagenet-DASO.py `_. + + The recommended checklist for using this class is as follows: + + 1. initialize the local PyTorch process group and set the default device of the local GPUs. + 2. define the torch network + 3. define the `local_optimizer` -> a torch optimizer of your choice (tested with SGD) + 4. optional, choose a learning rate scheduler. This is only for those learning rates which will also step the optimizer + 5. initialize DASO with the local optimizers and parameters + 6. initialize :func:`nn.DataParallelMultiGPU ` with the torch network and DASO + 7. If using automatic mixed precision (:class:`torch.cuda.amp`), initialize the gradient scaler and add it to DASO (:func:`add_scaler`) + 8. ensure that the DataLoaders evenly distribute the data between all the processes. This can be done by using the `torch.utils.data.distributed.DistributedSampler `_ with the `num_replicas` and `rank` parameters + 9. call `daso_optimizer.epoch_loss_logic(training_loss)` at the end of + 10. set the number of batches per epoch (`daso_optimizer.last_batch = number_of_batches`) + 11. ensure that the step function used in training is that of the DASO optimizer + + Parameters + ---------- + local_optimizer: torch.optim.Optimizer + This optimizer handles the optimization of the local NN. Example: `torch.optim.SGD`. \n + This can be any optimizer, although tests were only completed with SGD. Other optimizers may show + unexpected behavior. + total_epochs: int + The total number of epochs for training. Needed to determine when to enter the cooldown phase. + comm: MPICommunication, optional + The MPI communicator to use for training. \n + Default: :func:`MPI_WORLD ` + warmup_epochs: int, optional + The number of epochs to complete with a blocking averaging operation after each batch before entering + the cycling phase.\n + Default: 4 + cooldown_epochs: int, optional + The number of epochs with blocking averaging operations after each batch at the end of training.\n + Default: 4 + scheduler: torch.optim.lr_scheduler, optional + Local PyTorch learning rate scheduler. This must be used in the case that the scheduler's `step` function + is supposed to be called instead of the optimizer's `step` function.\n + Default: None + stability_level: float, optional + This can be viewed as the percent change threshold that the loss must exceed to be judged as improving. + When the loss is within this percent change for 2 epochs, then it is judged as stable.\n + Default: 0.05 + max_global_skips: int, optional + The maximum number of batches between the beginning of a global synchronization process.\n + Default: 8 + sending_chunk_size: int, optional + During the global synchronization step, the network parameters are split into chunks of data to overlap + communication and computation. This value is the maximum chunk size.\n + Default: 10,000,000 + downcast_type: torch.dtype, optional + Options: [torch.bfloat16, torch.half, torch.float] + When the network parameters are sent during the global synchronization step, they are cast down to + a smaller dtype, by default this is `torch.bfloat16`. Smaller torch dtypes are not implemented. + torch.bfloat16.\n + Default: torch.bfloat16 + use_mpi_groups: bool, optional + Use MPI groups to divide the global communicator. If True, use MPI GROUPs, otherwise, use MPI SPLIT.\n + Default: True + skip_reduction_factor: int, optional + How much to reduce the global/local skips by when the loss has stabilized.\n + Default: 2 + local_skip_factor: int, optional + How many local skips occur per global skip, i.e. number of local skips = global_skips // local_skip_factor.\n + Default: 4 + verbose: bool, optional + If true, print out a collection of debug messages.\n + Default: False + + ### Methods + + `add_scaler(self, scaler: torch.cuda.amp.grad_scaler.GradScaler) ‑> None` + : Create a reference to torch's `torch.cuda.amp.GradScaler `_ used in torch's automatic mixed + precision. + + Parameters + ---------- + scaler: torch.cuda.amp.GradScaler + the gradient scaler to be used + + `epoch_loss_logic(self, loss: torch.Tensor | int | float, loss_globally_averaged: bool = False) ‑> None` + : Function controlling the number of batches between global synchronizations and the batches to wait before + receiving the sent parameters. The warm-up and cool-down phases are also controlled here. + + This function should be called at the end of each epoch with the training loss value at the end of the epoch. + + The number of batches between local synchronizations can also be modified here with minor code adjustments. + + Parameters + ---------- + loss: torch.Tensor or float + loss value of the current epoch + loss_globally_averaged: bool, optional + boolean if the loss is already globally averaged + + `print0(self, *args, **kwargs) ‑> None` + : Print a message on rank 0 if the class parameter `verbose` is set. + + `reset(self) ‑> None` + : Reset the optimizer to its base state + + `set_model(self, model: torch.nn.modules.module.Module) ‑> None` + : Set the local model for the optimizer. + This should be called during the init of :func:`nn.DataParallelMultiGPU `. + However, this can also be called manually. + + Parameters + ---------- + model: torch.nn.Module + the local torch model. + + `step(self) ‑> None` + : Perform a single optimization step. + This will perform the `step` operations of the local optimizer, + local learning rate scheduler (if defined), and the gradient scaler used in automatic mixed + precision (if defined). + + Also in the step is the logic used for when to send and receive the global/local synchronizations. + Global Syncs occur on batches for which the modulus of the batch number and the `global_skip` number is 0. + If `batches_to_wait` > 0, the next batches have only local syncs. After that number of batches, + the data during the global sync phase is received. + + Local synchronization can also be turned off if desired by increasing `local_skips` above 1. + + Notes + ----- + self.last_batch must be set! + + `zero_grad(self) ‑> None` + : Reset gradients of local optimizer's parameters. + +`DataParallelOptimizer(torch_optimizer: torch.optim.optimizer.Optimizer, blocking: bool = False)` +: Uses a torch.optim.Optimizer for data parallelism. It should be used in combination with DataParallel (DP) class. + To optimize a DP module, DP optimizer has to be passed to DP module during its initialization. + See :func:`nn.DataParallel ` for a basic example of usage. + + Attributes + ---------- + torch_optimizer : torch.optim.Optimizer + the wrapped Torch optimizer + blocking : bool + use blocking communications or not. will typically be overwritten by :func:`nn.DataParallel ` + + ### Methods + + `step(self) ‑> None` + : Force torch optimizer to update model parameters. For blocking, optimizer immediately updates parameters. For + non-blocking, optimizer will update parameters during next forward. + + `zero_grad(self) ‑> None` + : Reset gradients of optimizer's params. diff --git a/doc/api/heat/optim/index.md b/doc/api/heat/optim/index.md new file mode 100644 index 0000000000..54a8eae386 --- /dev/null +++ b/doc/api/heat/optim/index.md @@ -0,0 +1,1549 @@ +Module heat.optim +================= +Optimizer module. + +It contains data parallel specific optimizers and learning rate schedulers. It also includes all of the +optimizers and learning rate schedulers in the torch namespace + +Sub-modules +----------- +* heat.optim.dp_optimizer +* heat.optim.lr_scheduler +* heat.optim.tests +* heat.optim.utils + +Classes +------- + +`ASGD(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.01, lambd: float = 0.0001, alpha: float = 0.75, t0: float = 1000000.0, weight_decay: float = 0, foreach: bool | None = None, maximize: bool = False, differentiable: bool = False, capturable: bool = False)` +: Implements Averaged Stochastic Gradient Descent. + + It has been proposed in `Acceleration of stochastic approximation by + averaging`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-2) + lambd (float, optional): decay term (default: 1e-4) + alpha (float, optional): power for eta update (default: 0.75) + t0 (float, optional): point at which to start averaging (default: 1e6) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + + .. _Acceleration of stochastic approximation by averaging: + https://dl.acm.org/citation.cfm?id=131098 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`Adadelta(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 1.0, rho: float = 0.9, eps: float = 1e-06, weight_decay: float = 0, foreach: bool | None = None, *, capturable: bool = False, maximize: bool = False, differentiable: bool = False)` +: Implements Adadelta algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, + \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)}, + \: \lambda \text{ (weight decay)} \\ + &\textbf{initialize} : v_0 \leftarrow 0 \: \text{ (square avg)}, + \: u_0 \leftarrow 0 \: \text{ (accumulate variables)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm} v_t \leftarrow v_{t-1} \rho + g^2_t (1 - \rho) \\ + &\hspace{5mm}\Delta x_t \leftarrow \frac{\sqrt{u_{t-1} + + \epsilon }}{ \sqrt{v_t + \epsilon} }g_t \hspace{21mm} \\ + &\hspace{5mm} u_t \leftarrow u_{t-1} \rho + + \Delta x^2_t (1 - \rho) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \Delta x_t \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): coefficient that scale delta before it is applied + to the parameters (default: 1.0) + rho (float, optional): coefficient used for computing a running average + of squared gradients (default: 0.9). A higher value of `rho` will + result in a slower average, which can be helpful for preventing + oscillations in the learning process. + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-6). + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + + .. _ADADELTA\: An Adaptive Learning Rate Method: + https://arxiv.org/abs/1212.5701 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`Adafactor(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.01, beta2_decay: float = -0.8, eps: tuple[float | None, float] = (None, 0.001), d: float = 1.0, weight_decay: float = 0.0, *, foreach: bool | None = None, maximize: bool = False)` +: Implements Adafactor algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{(lr)}, \: \tau + \text{(}\beta_2\text{ decay)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, \\ + &\hspace{15mm} \: \epsilon_1, \epsilon_2 \text{ (epsilons)}, \: d \text{(clipping threshold)}, \\ + &\hspace{15mm} \: \lambda \text{(weight decay)}, + \: \textit{maximize} \\ + &\textbf{initialize} : \: R_0 \leftarrow 0 \text{ (second moment row factor)}, \\ + &\hspace{23mm} \: C_0 \leftarrow 0 \text{ (second moment col factor)}, \\ + &\hspace{23mm} \: \widehat{V}_0 \leftarrow 0 \text{ (second moment for vectors)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}G_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}G_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\widehat{\beta}_{2_t} \leftarrow 1 - t^{\tau} \\ + &\hspace{5mm}\rho_t \leftarrow min(lr, \frac{1}{\sqrt{t}}) \\ + &\hspace{5mm}\alpha_t \leftarrow max(\epsilon_2, + \text{RMS}(\theta_{t-1}))\rho_t \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{5mm}\textbf{if} \: \text{dim}(G_t) > 1: \\ + &\hspace{10mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+ + (1-\widehat{\beta}_{2_t})(G_t \odot G_t) \cdot 1_m \\ + &\hspace{10mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+ + (1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t) \\ + &\hspace{10mm}\widehat{V}_t \leftarrow + \frac{R_t \cdot C_t}{max(1^\top_n \cdot R_t, \epsilon_1)} \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\widehat{V}_t \leftarrow \widehat{\beta}_{2_t}\widehat{V}_{t-1}+ + (1-\widehat{\beta}_{2_t}) \cdot (G_t \odot G_t) \\ + &\hspace{5mm}U_t \leftarrow + \frac{G_t}{max(\sqrt{\widehat{V}_t}, \epsilon_1)} \\ + &\hspace{5mm}\widehat{U}_t \leftarrow \frac{U_t}{max(1, \frac{\text{RMS}(U_t)}{d})} \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \alpha_t \widehat{U}_t \\ + + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): unlike other optimizers, Adafactor does not require a + learning rate, and Shazeer, Noam, and Mitchell Stern do not use lr at all. + Deviating from the paper, this implementation uses lr for applying weight + decay and as the maximum value for relative step size rho_t. Note that in + the paper, a constant of 0.01 is used as the maximum value for relative + step size, and so we set 0.01 as the default value. (default: 1e-2) + beta2_decay (float, optional): the decay rate of beta2. beta2 standardly refers + to the coefficient used for computing the running average of the gradient + squared. (default: -0.8) + eps (Tuple[float, float], optional): epsilon1 is the term added to the denominator + of the update calculation to improve numerical stability. This use of epsilon1 + deviates from the algorithm written in the paper! See note below for more details. + epsilon2 is the term used to avoid having too small a weight update when applying + parameter scaling. (default: (None, 1e-3)) + d (float, optional): the clipping threshold, used to avoid larger-than-desired + updates. + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + foreach (bool, optional): whether foreach implementation of optimizer is used. Note + that the foreach implementation uses ~ sizeof(params) more peak memory than the + for-loop version due to the intermediates being a tensorlist vs just one tensor. + As Adafactor is commonly used when memory is prohibitive, Adafactor will default + to the slower single tensor for-loop implementation unless this flag is explicitly + True. This behavior is contrary to other optimizers, which will attempt defaulting + to foreach on CUDA for faster runtime. (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + .. Note:: + The implementation of Adafactor subtly differs from Shazeer, Noam, and Mitchell Stern + and implementations in some other frameworks with its use of learning rate and + :math:`\epsilon_1`. + + Regarding the learning rate hyperparameter: Shazeer, Noam, and Mitchell Stern do not + use lr at all, as the stated algorithm uses :math:`\rho_t` and update clipping to + affect the step size. + + This implementation allows `lr` to influence the maximum value for :math:`\rho_t`: + + .. math:: + \begin{aligned} + &\hspace{5mm}\rho_t \leftarrow min(lr, \frac{1}{\sqrt{t}}) + \end{aligned} + + This differs from Shazeer, Noam, and Mitchell Stern, who use a constant of 0.01 as + the maximum value of :math:`\rho_t` + + .. math:: + \begin{aligned} + &\hspace{5mm}\rho_t \leftarrow min(0.01, \frac{1}{\sqrt{t}}) + \end{aligned} + + Shazeer, Noam, and Mitchell Stern do not enforce an opinion on how weight decay should + be computed, and so we use the learning rate as a coefficient for decoupled weight + decay, similar to what is suggested in `Decoupled Weight Decay Regularization`_. + + Regarding the use of :math:`\epsilon_1`: The implementation attempts to replicate the + presumed intention of Shazeer, Noam, and Mitchell Stern to use :math:`\epsilon_1` as + a stabilizing term when the squared gradient becomes small. + + This stabilization can be written as + + .. math:: + \begin{aligned} + &\hspace{5mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+ + (1-\widehat{\beta}_{2_t})(G_t \odot G_t + 1_n \cdot 1^\top_m) \cdot 1_m \\ + &\hspace{5mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+ + (1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t + 1_n \cdot 1^\top_m) \\ + &\hspace{5mm}\widehat{V}_t \leftarrow + \frac{R_t \cdot C_t}{max(1^\top_n \cdot R_t, \epsilon_1)} \\ + &\hspace{5mm}U_t \leftarrow \frac{G_t}{max(\sqrt{\widehat{V}_t}, \epsilon_1)} \\ + \end{aligned} + + where the row and column factors of gradient squared :math:`R_t` and :math:`C_t` + are left alone, and we apply :math:`\epsilon_1` at the final calculation of + the variance estimate :math:`\widehat{V}_t` and for the update :math:`U_t`. + + This is in contrast to Shazeer, Noam, and Mitchell Stern and other frameworks which + apply :math:`\epsilon_1` to both row and column factors of the squared gradient, but + not in the calculations after: + + .. math:: + \begin{aligned} + &\hspace{5mm}R_t \leftarrow \widehat{\beta}_{2_t}R_{t-1}+ + (1-\widehat{\beta}_{2_t})(G_t \odot G_t + \epsilon_1 1_n \cdot 1^\top_m) \cdot 1_m \\ + &\hspace{5mm}C_t \leftarrow \widehat{\beta}_{2_t}C_{t-1}+ + (1-\widehat{\beta}_{2_t}) 1^\top_n \cdot (G_t \odot G_t + \epsilon_1 1_n \cdot 1^\top_m) \\ + &\hspace{5mm}\widehat{V}_t \leftarrow \frac{R_t \cdot C_t}{1^\top_n \cdot R_t} \\ + &\hspace{5mm}U_t \leftarrow \frac{G_t}{\sqrt{\widehat{V}_t}} \\ + \end{aligned} + + + .. _Adafactor\: Adaptive Learning Rates with Sublinear Memory Cost: + https://arxiv.org/pdf/1804.04235 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`Adagrad(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.01, lr_decay: float = 0, weight_decay: float = 0, initial_accumulator_value: float = 0, eps: float = 1e-10, foreach: bool | None = None, *, maximize: bool = False, differentiable: bool = False, fused: bool | None = None)` +: Implements Adagrad algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{12mm} \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\ + &\textbf{initialize} : state\_sum_0 \leftarrow \tau \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \tilde{\gamma} \leftarrow \gamma / (1 +(t-1) \eta) \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}state\_sum_t \leftarrow state\_sum_{t-1} + g^2_t \\ + &\hspace{5mm}\theta_t \leftarrow + \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning + and Stochastic Optimization`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-2) + lr_decay (float, optional): learning rate decay (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + initial_accumulator_value (float, optional): initial value of the + sum of squares of gradients (default: 0) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + fused (bool, optional): whether the fused implementation (CPU only) is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None). Please note that the fused implementations does not + support sparse or complex gradients. + .. _Adaptive Subgradient Methods for Online Learning and Stochastic + Optimization: http://jmlr.org/papers/v12/duchi11a.html + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `share_memory(self)` + : + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`Adam(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.001, betas: tuple[float | torch.Tensor, float | torch.Tensor] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, amsgrad: bool = False, *, foreach: bool | None = None, maximize: bool = False, capturable: bool = False, differentiable: bool = False, fused: bool | None = None, decoupled_weight_decay: bool = False)` +: Implements Adam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad}, + \:\textit{maximize}, \: \epsilon \text{ (epsilon)} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0\leftarrow 0 \text{ (second moment)},\: v_0^{max}\leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t) \\ + &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_weight_decay (bool, optional): if True, this optimizer is + equivalent to AdamW and the algorithm will not accumulate weight + decay in the momentum nor variance. (default: False) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + fused (bool, optional): whether the fused implementation is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None) + + .. note:: The foreach and fused implementations are typically faster than the for-loop, + single-tensor implementation, with fused being theoretically fastest with both + vertical and horizontal fusion. As such, if the user has not specified either + flag (i.e., when foreach = fused = None), we will attempt defaulting to the foreach + implementation when the tensors are all on CUDA. Why not fused? Since the fused + implementation is relatively new, we want to give it sufficient bake-in time. + To specify fused, pass True for fused. To force running the for-loop + implementation, pass False for either foreach or fused. + .. Note:: + A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`. + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Descendants + + * torch.optim.adamw.AdamW + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`AdamW(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.001, betas: tuple[float | torch.Tensor, float | torch.Tensor] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0.01, amsgrad: bool = False, *, maximize: bool = False, foreach: bool | None = None, capturable: bool = False, differentiable: bool = False, fused: bool | None = None)` +: Implements AdamW algorithm, where weight decay does not accumulate in the momentum nor variance. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 + \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, + \: \epsilon \text{ (epsilon)} \\ + &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, + \: \textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 + \text{ ( second moment)}, \: v_0^{max}\leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t) \\ + &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + fused (bool, optional): whether the fused implementation is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None) + + .. note:: The foreach and fused implementations are typically faster than the for-loop, + single-tensor implementation, with fused being theoretically fastest with both + vertical and horizontal fusion. As such, if the user has not specified either + flag (i.e., when foreach = fused = None), we will attempt defaulting to the foreach + implementation when the tensors are all on CUDA. Why not fused? Since the fused + implementation is relatively new, we want to give it sufficient bake-in time. + To specify fused, pass True for fused. To force running the for-loop + implementation, pass False for either foreach or fused. + .. Note:: + A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`. + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + ### Ancestors (in MRO) + + * torch.optim.adam.Adam + * torch.optim.optimizer.Optimizer + +`Adamax(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.002, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, foreach: bool | None = None, *, maximize: bool = False, differentiable: bool = False, capturable: bool = False)` +: Implements Adamax algorithm (a variant of Adam based on infinity norm). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, + \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \epsilon \text{ (epsilon)} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`LBFGS(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 1, max_iter: int = 20, max_eval: int | None = None, tolerance_grad: float = 1e-07, tolerance_change: float = 1e-09, history_size: int = 100, line_search_fn: str | None = None)` +: Implements L-BFGS algorithm. + + Heavily inspired by `minFunc + `_. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Args: + params (iterable): iterable of parameters to optimize. Parameters must be real. + lr (float, optional): learning rate (default: 1) + max_iter (int, optional): maximal number of iterations per optimization step + (default: 20) + max_eval (int, optional): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float, optional): termination tolerance on first order optimality + (default: 1e-7). + tolerance_change (float, optional): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int, optional): update history size (default: 100). + line_search_fn (str, optional): either 'strong_wolfe' or None (default: None). + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure)` + : Perform a single optimization step. + + Args: + closure (Callable): A closure that reevaluates the model + and returns the loss. + +`NAdam(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.002, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, momentum_decay: float = 0.004, decoupled_weight_decay: bool = False, *, foreach: bool | None = None, maximize: bool = False, capturable: bool = False, differentiable: bool = False)` +: Implements NAdam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\ + &\hspace{13mm} \: \textit{decoupled\_weight\_decay}, \:\textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ + &\hspace{15mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{10mm}\textbf{else} \\ + &\hspace{15mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\ + &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex] + & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + momentum_decay (float, optional): momentum momentum_decay (default: 4e-3) + decoupled_weight_decay (bool, optional): whether to decouple the weight + decay as in AdamW to obtain NAdamW. If True, the algorithm does not + accumulate weight decay in the momentum nor variance. (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + + .. _Incorporating Nesterov Momentum into Adam: + https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`Optimizer(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], defaults: dict[str, typing.Any])` +: Base class for all optimizers. + + .. warning:: + Parameters need to be specified as collections that have a deterministic + ordering that is consistent between runs. Examples of objects that don't + satisfy those properties are sets and iterators over values of dictionaries. + + Args: + params (iterable): an iterable of :class:`torch.Tensor` s or + :class:`dict` s. Specifies what Tensors should be optimized. + defaults: (dict): a dict containing default values of optimization + options (used when a parameter group doesn't specify them). + + ### Descendants + + * torch.optim.Adafactor + * torch.optim.adadelta.Adadelta + * torch.optim.adagrad.Adagrad + * torch.optim.adam.Adam + * torch.optim.adamax.Adamax + * torch.optim.asgd.ASGD + * torch.optim.lbfgs.LBFGS + * torch.optim.nadam.NAdam + * torch.optim.radam.RAdam + * torch.optim.rmsprop.RMSprop + * torch.optim.rprop.Rprop + * torch.optim.sgd.SGD + * torch.optim.sparse_adam.SparseAdam + + ### Class variables + + `OptimizerPostHook: TypeAlias` + : + + `OptimizerPreHook: TypeAlias` + : + + ### Static methods + + `profile_hook_step(func: Callable[~_P, ~R]) ‑> Callable[~_P, ~R]` + : + + ### Methods + + `add_param_group(self, param_group: dict[str, typing.Any]) ‑> None` + : Add a param group to the :class:`Optimizer` s `param_groups`. + + This can be useful when fine tuning a pre-trained network as frozen layers can be made + trainable and added to the :class:`Optimizer` as training progresses. + + Args: + param_group (dict): Specifies what Tensors should be optimized along with group + specific optimization options. + + `load_state_dict(self, state_dict: dict[str, typing.Any]) ‑> None` + : Load the optimizer state. + + Args: + state_dict (dict): optimizer state. Should be an object returned + from a call to :meth:`state_dict`. + + .. note:: + The names of the parameters (if they exist under the "param_names" key of each param group + in :meth:`state_dict`) will not affect the loading process. + To use the parameters' names for custom cases (such as when the parameters in the loaded state dict + differ from those initialized in the optimizer), + a custom ``register_load_state_dict_pre_hook`` should be implemented to adapt the loaded dict + accordingly. + If ``param_names`` exist in loaded state dict ``param_groups`` they will be saved and override + the current names, if present, in the optimizer state. If they do not exist in loaded state dict, + the optimizer ``param_names`` will remain unchanged. + + `register_load_state_dict_post_hook(self, hook: Callable[[ForwardRef('Optimizer')], None], prepend: bool = False) ‑> torch.utils.hooks.RemovableHandle` + : Register a load_state_dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + The hook will be called with argument ``self`` after calling + ``load_state_dict`` on ``self``. The registered hook can be used to + perform post-processing after ``load_state_dict`` has loaded the + ``state_dict``. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + `register_load_state_dict_pre_hook(self, hook: Callable[[ForwardRef('Optimizer'), dict[str, Any]], dict[str, Any] | None], prepend: bool = False) ‑> torch.utils.hooks.RemovableHandle` + : Register a load_state_dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The ``optimizer`` argument is the optimizer instance being used and the + ``state_dict`` argument is a shallow copy of the ``state_dict`` the user + passed in to ``load_state_dict``. The hook may modify the state_dict inplace + or optionally return a new one. If a state_dict is returned, it will be used + to be loaded into the optimizer. + + The hook will be called with argument ``self`` and ``state_dict`` before + calling ``load_state_dict`` on ``self``. The registered hook can be used to + perform pre-processing before the ``load_state_dict`` call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + `register_state_dict_post_hook(self, hook: Callable[[ForwardRef('Optimizer'), dict[str, Any]], dict[str, Any] | None], prepend: bool = False) ‑> torch.utils.hooks.RemovableHandle` + : Register a state dict post-hook which will be called after :meth:`~torch.optim.Optimizer.state_dict` is called. + + It should have the following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The hook will be called with arguments ``self`` and ``state_dict`` after generating + a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally + return a new one. The registered hook can be used to perform post-processing + on the ``state_dict`` before it is returned. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + `register_state_dict_pre_hook(self, hook: Callable[[ForwardRef('Optimizer')], None], prepend: bool = False) ‑> torch.utils.hooks.RemovableHandle` + : Register a state dict pre-hook which will be called before :meth:`~torch.optim.Optimizer.state_dict` is called. + + It should have the following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``. + The registered hook can be used to perform pre-processing before the ``state_dict`` + call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + `register_step_post_hook(self, hook: Callable[[Self, tuple[Any, ...], dict[str, Any]], None]) ‑> torch.utils.hooks.RemovableHandle` + : Register an optimizer step post hook which will be called after optimizer step. + + It should have the following signature:: + + hook(optimizer, args, kwargs) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + `register_step_pre_hook(self, hook: Callable[[Self, tuple[Any, ...], dict[str, Any]], tuple[tuple[Any, ...], dict[str, Any]] | None]) ‑> torch.utils.hooks.RemovableHandle` + : Register an optimizer step pre hook which will be called before optimizer step. + + It should have the following signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + The ``optimizer`` argument is the optimizer instance being used. If + args and kwargs are modified by the pre-hook, then the transformed + values are returned as a tuple containing the new_args and new_kwargs. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + + `state_dict(self) ‑> dict[str, typing.Any]` + : Return the state of the optimizer as a :class:`dict`. + + It contains two entries: + + * ``state``: a Dict holding current optimization state. Its content + differs between optimizer classes, but some common characteristics + hold. For example, state is saved per parameter, and the parameter + itself is NOT saved. ``state`` is a Dictionary mapping parameter ids + to a Dict with state corresponding to each parameter. + * ``param_groups``: a List containing all parameter groups where each + parameter group is a Dict. Each parameter group contains metadata + specific to the optimizer, such as learning rate and weight decay, + as well as a List of parameter IDs of the parameters in the group. + If a param group was initialized with ``named_parameters()`` the names + content will also be saved in the state dict. + + NOTE: The parameter IDs may look like indices but they are just IDs + associating state with param_group. When loading from a state_dict, + the optimizer will zip the param_group ``params`` (int IDs) and the + optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to + match state WITHOUT additional verification. + + A returned state dict might look something like: + + .. code-block:: text + + { + 'state': { + 0: {'momentum_buffer': tensor(...), ...}, + 1: {'momentum_buffer': tensor(...), ...}, + 2: {'momentum_buffer': tensor(...), ...}, + 3: {'momentum_buffer': tensor(...), ...} + }, + 'param_groups': [ + { + 'lr': 0.01, + 'weight_decay': 0, + ... + 'params': [0] + 'param_names' ['param0'] (optional) + }, + { + 'lr': 0.001, + 'weight_decay': 0.5, + ... + 'params': [1, 2, 3] + 'param_names': ['param1', 'layer.weight', 'layer.bias'] (optional) + } + ] + } + + `step(self, closure: Callable[[], float] | None = None) ‑> float | None` + : Perform a single optimization step to update parameter. + + Args: + closure (Callable): A closure that reevaluates the model and + returns the loss. Optional for most optimizers. + + `zero_grad(self, set_to_none: bool = True) ‑> None` + : Reset the gradients of all optimized :class:`torch.Tensor` s. + + Args: + set_to_none (bool): instead of setting to zero, set the grads to None. + This will in general have lower memory footprint, and can modestly improve performance. + However, it changes certain behaviors. For example: + 1. When the user tries to access a gradient and perform manual ops on it, + a None attribute or a Tensor full of 0s will behave differently. + 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s + are guaranteed to be None for params that did not receive a gradient. + 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None + (in one case it does the step with a gradient of 0 and in the other it skips + the step altogether). + +`RAdam(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.001, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-08, weight_decay: float = 0, decoupled_weight_decay: bool = False, *, foreach: bool | None = None, maximize: bool = False, capturable: bool = False, differentiable: bool = False)` +: Implements RAdam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2 + \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \: + \lambda \text{ (weightdecay)}, \:\textit{maximize} \\ + &\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0 \leftarrow 0 \text{ ( second moment)}, \\ + &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{6mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{12mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{6mm}\textbf{else} \\ + &\hspace{12mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{6mm} \theta_t \leftarrow \theta_{t-1} \\ + &\hspace{6mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ + &\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t} \\ + &\hspace{12mm}\textbf{else} \\ + &\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t} \\ + &\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} - + 2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[0.1.ex] + &\hspace{6mm}\textbf{if} \: \rho_t > 5 \\ + &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon } \\ + &\hspace{12mm} r_t \leftarrow + \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\ + &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t \\ + &\hspace{6mm}\textbf{else} \\ + &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_. + + This implementation provides an option to use either the original weight_decay implementation as in Adam + (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied + to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False + (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which + corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information + about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_. + + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_weight_decay (bool, optional): whether to decouple the weight + decay as in AdamW to obtain RAdamW. If True, the algorithm does not + accumulate weight decay in the momentum nor variance. (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + + .. _On the variance of the adaptive learning rate and beyond: + https://arxiv.org/abs/1908.03265 + .. _author's implementation: + https://github.com/LiyuanLucasLiu/RAdam + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`RMSprop(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.01, alpha: float = 0.99, eps: float = 1e-08, weight_decay: float = 0, momentum: float = 0, centered: bool = False, capturable: bool = False, foreach: bool | None = None, maximize: bool = False, differentiable: bool = False)` +: Implements RMSprop algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \alpha \text{ (alpha)}, \: \gamma \text{ (lr)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)}, + \: centered, \: \epsilon \text{ (epsilon)} \\ + &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \: + \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}if \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t + \hspace{8mm} \\ + &\hspace{5mm} \tilde{v_t} \leftarrow v_t \\ + &\hspace{5mm}if \: centered \\ + &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\ + &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\ + &\hspace{5mm}if \: \mu > 0 \\ + &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} + + g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\ + &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\ + &\hspace{5mm} else \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - + \gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to + `lecture notes `_ by G. Hinton. + and centered version `Generating Sequences + With Recurrent Neural Networks `_. + The implementation here takes the square root of the gradient average before + adding epsilon (note that TensorFlow interchanges these two operations). The effective + learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma` + is the scheduled learning rate and :math:`v` is the weighted moving average + of the squared gradient. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-2) + alpha (float, optional): smoothing constant (default: 0.99) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + momentum (float, optional): momentum factor (default: 0) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`Rprop(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.01, etas: tuple[float, float] = (0.5, 1.2), step_sizes: tuple[float, float] = (1e-06, 50), *, capturable: bool = False, foreach: bool | None = None, maximize: bool = False, differentiable: bool = False)` +: Implements the resilient backpropagation algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta) + \text{ (objective)}, \\ + &\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min} + \text{ (step sizes)} \\ + &\textbf{initialize} : g^0_{prev} \leftarrow 0, + \: \eta_0 \leftarrow \text{lr (learning rate)} \\ + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\ + &\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\ + &\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+}, + \Gamma_{max}) \\ + &\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\ + &\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-}, + \Gamma_{min}) \\ + &\hspace{15mm} g^i_t \leftarrow 0 \\ + &\hspace{10mm} \textbf{else} \: \\ + &\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\ + &\hspace{5mm}g_{prev} \leftarrow g_t \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to the paper + `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm + `_. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, optional): learning rate (default: 1e-2) + etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that + are multiplicative increase and decrease factors + (default: (0.5, 1.2)) + step_sizes (Tuple[float, float], optional): a pair of minimal and + maximal allowed step sizes (default: (1e-6, 50)) + capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`SGD(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.001, momentum: float = 0, dampening: float = 0, weight_decay: float | torch.Tensor = 0, nesterov: bool = False, *, maximize: bool = False, foreach: bool | None = None, differentiable: bool = False, fused: bool | None = None)` +: Implements stochastic gradient descent (optionally with momentum). + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)}, + \:\textit{ nesterov,}\:\textit{ maximize} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}\textbf{if} \: \mu \neq 0 \\ + &\hspace{10mm}\textbf{if} \: t > 1 \\ + &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t \\ + &\hspace{10mm}\textbf{else} \\ + &\hspace{15mm} \textbf{b}_t \leftarrow g_t \\ + &\hspace{10mm}\textbf{if} \: \textit{nesterov} \\ + &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t \\ + &\hspace{10mm}\textbf{else} \\[-1.ex] + &\hspace{15mm} g_t \leftarrow \textbf{b}_t \\ + &\hspace{5mm}\textbf{if} \: \textit{maximize} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t \\[-1.ex] + &\hspace{5mm}\textbf{else} \\[-1.ex] + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + Nesterov momentum is based on the formula from + `On the importance of initialization and momentum in deep learning`__. + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-3) + momentum (float, optional): momentum factor (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + nesterov (bool, optional): enables Nesterov momentum. Only applicable + when momentum is non-zero. (default: False) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None) + differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False) + fused (bool, optional): whether the fused implementation is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None) + + .. note:: The foreach and fused implementations are typically faster than the for-loop, + single-tensor implementation, with fused being theoretically fastest with both + vertical and horizontal fusion. As such, if the user has not specified either + flag (i.e., when foreach = fused = None), we will attempt defaulting to the foreach + implementation when the tensors are all on CUDA. Why not fused? Since the fused + implementation is relatively new, we want to give it sufficient bake-in time. + To specify fused, pass True for fused. To force running the for-loop + implementation, pass False for either foreach or fused. + + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + + __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf + + .. note:: + The implementation of SGD with Momentum/Nesterov subtly differs from + Sutskever et al. and implementations in some other frameworks. + + Considering the specific case of Momentum, the update can be written as + + .. math:: + \begin{aligned} + v_{t+1} & = \mu * v_{t} + g_{t+1}, \\ + p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, + \end{aligned} + + where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the + parameters, gradient, velocity, and momentum respectively. + + This is in contrast to Sutskever et al. and + other frameworks which employ an update of the form + + .. math:: + \begin{aligned} + v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\ + p_{t+1} & = p_{t} - v_{t+1}. + \end{aligned} + + The Nesterov version is analogously modified. + + Moreover, the initial value of the momentum buffer is set to the + gradient value at the first step. This is in contrast to some other + frameworks that initialize it to all zeros. + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + +`SparseAdam(params: Iterable[torch.Tensor] | Iterable[dict[str, typing.Any]] | Iterable[tuple[str, torch.Tensor]], lr: float | torch.Tensor = 0.001, betas: tuple[float, float] = (0.9, 0.999), eps: float = 1e-08, maximize: bool = False)` +: SparseAdam implements a masked version of the Adam algorithm + suitable for sparse gradients. Currently, due to implementation constraints (explained + below), SparseAdam is only intended for a narrow subset of use cases, specifically + parameters of a dense layout with gradients of a sparse layout. This occurs in a + special case where the module backwards produces grads already in a sparse layout. + One example NN module that behaves as such is ``nn.Embedding(sparse=True)``. + + SparseAdam approximates the Adam algorithm by masking out the parameter and moment + updates corresponding to the zero values in the gradients. Whereas the Adam algorithm + will update the first moment, the second moment, and the parameters based on all values + of the gradients, SparseAdam only updates the moments and parameters corresponding + to the non-zero values of the gradients. + + A simplified way of thinking about the `intended` implementation is as such: + + 1. Create a mask of the non-zero values in the sparse gradients. For example, + if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1]. + 2. Apply this mask over the running moments and do computation on only the + non-zero values. + 3. Apply this mask over the parameters and only apply an update on non-zero values. + + In actuality, we use sparse layout Tensors to optimize this approximation, which means the + more gradients that are masked by not being materialized, the more performant the optimization. + Since we rely on using sparse layout tensors, we infer that any materialized value in the + sparse layout is non-zero and we do NOT actually verify that all values are not zero! + It is important to not conflate a semantically sparse tensor (a tensor where many + of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse`` + returns ``True``). The SparseAdam approximation is intended for `semantically` sparse + tensors and the sparse layout is only a implementation detail. A clearer implementation + would be to use MaskedTensors, but those are experimental. + + + .. note:: + + If you suspect your gradients are semantically sparse (but do not have sparse + layout), this variant may not be the best for you. Ideally, you want to avoid + materializing anything that is suspected to be sparse in the first place, since + needing to convert all your grads from dense layout to sparse layout may outweigh + the performance gain. Here, using Adam may be the best alternative, unless you + can easily rig up your module to output sparse grads similar to + ``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do + so by manually overriding your parameters' ``.grad`` fields with their sparse + equivalents before calling ``.step()``. + + + Args: + params (iterable): iterable of parameters or named_parameters to optimize + or iterable of dicts defining parameter groups. When using named_parameters, + all parameters in all groups should be named + lr (float, Tensor, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + + ### Ancestors (in MRO) + + * torch.optim.optimizer.Optimizer + + ### Methods + + `step(self, closure=None)` + : Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. diff --git a/doc/api/heat/optim/lr_scheduler.md b/doc/api/heat/optim/lr_scheduler.md new file mode 100644 index 0000000000..d444bb0e49 --- /dev/null +++ b/doc/api/heat/optim/lr_scheduler.md @@ -0,0 +1,871 @@ +Module heat.optim.lr_scheduler +============================== +Learning rate schedulers in the heat namespace + +Classes +------- + +`ChainedScheduler(schedulers: Sequence[torch.optim.lr_scheduler.LRScheduler], optimizer: torch.optim.optimizer.Optimizer | None = None)` +: Chains a list of learning rate schedulers. + + Takes in a sequence of chainable learning rate schedulers and calls their + step() functions consecutively in just one call to step(). + + Args: + schedulers (sequence): sequence of chained schedulers. + optimizer (Optimizer, optional): Wrapped optimizer. Default: None. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 1. for all groups + >>> # lr = 0.09 if epoch == 0 + >>> # lr = 0.081 if epoch == 1 + >>> # lr = 0.729 if epoch == 2 + >>> # lr = 0.6561 if epoch == 3 + >>> # lr = 0.59049 if epoch >= 4 + >>> scheduler1 = ConstantLR(optimizer, factor=0.1, total_iters=2) + >>> scheduler2 = ExponentialLR(optimizer, gamma=0.9) + >>> scheduler = ChainedScheduler([scheduler1, scheduler2], optimizer=optimizer) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `load_state_dict(self, state_dict)` + : Load the scheduler's state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The wrapped scheduler states will also be saved. + + `step(self)` + : Perform a step. + +`ConstantLR(optimizer: torch.optim.optimizer.Optimizer, factor: float = 0.3333333333333333, total_iters: int = 5, last_epoch: int = -1)` +: Multiply the learning rate of each parameter group by a small constant factor. + + The multiplication is done until the number of epoch reaches a pre-defined milestone: total_iters. + Notice that such multiplication of the small constant factor can + happen simultaneously with other changes to the learning rate from outside this scheduler. + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + factor (float): The number we multiply learning rate until the milestone. Default: 1./3. + total_iters (int): The number of steps that the scheduler multiplies the learning rate by the factor. + Default: 5. + last_epoch (int): The index of the last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.025 if epoch == 0 + >>> # lr = 0.025 if epoch == 1 + >>> # lr = 0.025 if epoch == 2 + >>> # lr = 0.025 if epoch == 3 + >>> # lr = 0.05 if epoch >= 4 + >>> scheduler = ConstantLR(optimizer, factor=0.5, total_iters=4) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate of each parameter group. + +`CosineAnnealingLR(optimizer: torch.optim.optimizer.Optimizer, T_max: int, eta_min: float = 0.0, last_epoch: int = -1)` +: Set the learning rate of each parameter group using a cosine annealing schedule. + + The :math:`\eta_{max}` is set to the initial lr and + :math:`T_{cur}` is the number of epochs since the last restart in SGDR: + + .. math:: + \begin{aligned} + \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), + & T_{cur} \neq (2k+1)T_{max}; \\ + \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) + \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), + & T_{cur} = (2k+1)T_{max}. + \end{aligned} + + When last_epoch=-1, sets initial lr as lr. Notice that because the schedule + is defined recursively, the learning rate can be simultaneously modified + outside this scheduler by other operators. If the learning rate is set + solely by this scheduler, the learning rate at each step becomes: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only + implements the cosine annealing part of SGDR, and not the restarts. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_max (int): Maximum number of iterations. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Retrieve the learning rate of each parameter group. + +`CosineAnnealingWarmRestarts(optimizer: torch.optim.optimizer.Optimizer, T_0: int, T_mult: int = 1, eta_min: float = 0.0, last_epoch: int = -1)` +: Set the learning rate of each parameter group using a cosine annealing schedule. + + The :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` + is the number of epochs since the last restart and :math:`T_{i}` is the number + of epochs between two warm restarts in SGDR: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right) + + When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`. + When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`. + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_0 (int): Number of iterations until the first restart. + T_mult (int, optional): A factor by which :math:`T_{i}` increases after a restart. Default: 1. + eta_min (float, optional): Minimum learning rate. Default: 0. + last_epoch (int, optional): The index of the last epoch. Default: -1. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the initial learning rate. + + `step(self, epoch=None)` + : Step could be called after every batch update. + + Example: + >>> # xdoctest: +SKIP("Undefined vars") + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> iters = len(dataloader) + >>> for epoch in range(20): + >>> for i, sample in enumerate(dataloader): + >>> inputs, labels = sample['inputs'], sample['labels'] + >>> optimizer.zero_grad() + >>> outputs = net(inputs) + >>> loss = criterion(outputs, labels) + >>> loss.backward() + >>> optimizer.step() + >>> scheduler.step(epoch + i / iters) + + This function can be called in an interleaved way. + + Example: + >>> # xdoctest: +SKIP("Undefined vars") + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> for epoch in range(20): + >>> scheduler.step() + >>> scheduler.step(26) + >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) + +`CyclicLR(optimizer: torch.optim.optimizer.Optimizer, base_lr: float | list[float], max_lr: float | list[float], step_size_up: int = 2000, step_size_down: int | None = None, mode: Literal['triangular', 'triangular2', 'exp_range'] = 'triangular', gamma: float = 1.0, scale_fn: Callable[[float], float] | None = None, scale_mode: Literal['cycle', 'iterations'] = 'cycle', cycle_momentum: bool = True, base_momentum: float = 0.8, max_momentum: float = 0.9, last_epoch: int = -1)` +: Sets the learning rate of each parameter group according to cyclical learning rate policy (CLR). + + The policy cycles the learning rate between two boundaries with a constant frequency, + as detailed in the paper `Cyclical Learning Rates for Training Neural Networks`_. + The distance between the two boundaries can be scaled on a per-iteration + or per-cycle basis. + + Cyclical learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This class has three built-in policies, as put forth in the paper: + + * "triangular": A basic triangular cycle without amplitude scaling. + * "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. + * "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}` + at each cycle iteration. + + This implementation was adapted from the github repo: `bckenstler/CLR`_ + + Args: + optimizer (Optimizer): Wrapped optimizer. + base_lr (float or list): Initial learning rate which is the + lower boundary in the cycle for each parameter group. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_lr - base_lr). + The lr at any cycle is the sum of base_lr + and some scaling of the amplitude; therefore + max_lr may not actually be reached depending on + scaling function. + step_size_up (int): Number of training iterations in the + increasing half of a cycle. Default: 2000 + step_size_down (int): Number of training iterations in the + decreasing half of a cycle. If step_size_down is None, + it is set to step_size_up. Default: None + mode (str): One of {triangular, triangular2, exp_range}. + Values correspond to policies detailed above. + If scale_fn is not None, this argument is ignored. + Default: 'triangular' + gamma (float): Constant in 'exp_range' scaling function: + gamma**(cycle iterations) + Default: 1.0 + scale_fn (function): Custom scaling policy defined by a single + argument lambda function, where + 0 <= scale_fn(x) <= 1 for all x >= 0. + If specified, then 'mode' is ignored. + Default: None + scale_mode (str): {'cycle', 'iterations'}. + Defines whether scale_fn is evaluated on + cycle number or cycle iterations (training + iterations since start of cycle). + Default: 'cycle' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.8 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + The momentum at any cycle is the difference of max_momentum + and some scaling of the amplitude; therefore + base_momentum may not actually be reached depending on + scaling function. Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.9 + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + + .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 + .. _bckenstler/CLR: https://github.com/bckenstler/CLR + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Calculate the learning rate at batch index. + + This function treats `self.last_epoch` as the last batch index. + + If `self.cycle_momentum` is ``True``, this function has a side effect of + updating the optimizer's momentum. + + `load_state_dict(self, state_dict)` + : Load the scheduler's state. + + `scale_fn(self, x) ‑> float` + : Get the scaling policy. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + +`ExponentialLR(optimizer: torch.optim.optimizer.Optimizer, gamma: float, last_epoch: int = -1)` +: Decays the learning rate of each parameter group by gamma every epoch. + + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + gamma (float): Multiplicative factor of learning rate decay. + last_epoch (int): The index of last epoch. Default: -1. + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate of each parameter group. + +`LRScheduler(optimizer: torch.optim.optimizer.Optimizer, last_epoch: int = -1)` +: Adjusts the learning rate during optimization. + + ### Descendants + + * torch.optim.lr_scheduler.ChainedScheduler + * torch.optim.lr_scheduler.ConstantLR + * torch.optim.lr_scheduler.CosineAnnealingLR + * torch.optim.lr_scheduler.CosineAnnealingWarmRestarts + * torch.optim.lr_scheduler.CyclicLR + * torch.optim.lr_scheduler.ExponentialLR + * torch.optim.lr_scheduler.LambdaLR + * torch.optim.lr_scheduler.LinearLR + * torch.optim.lr_scheduler.MultiStepLR + * torch.optim.lr_scheduler.MultiplicativeLR + * torch.optim.lr_scheduler.OneCycleLR + * torch.optim.lr_scheduler.PolynomialLR + * torch.optim.lr_scheduler.ReduceLROnPlateau + * torch.optim.lr_scheduler.SequentialLR + * torch.optim.lr_scheduler.StepLR + * torch.optim.lr_scheduler._LRScheduler + * torch.optim.swa_utils.SWALR + + ### Methods + + `get_last_lr(self) ‑> list[float]` + : Return last computed learning rate by current scheduler. + + `get_lr(self) ‑> list[float]` + : Compute learning rate using chainable form of the scheduler. + + `load_state_dict(self, state_dict: dict[str, typing.Any])` + : Load the scheduler's state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + + `step(self, epoch: int | None = None)` + : Perform a step. + +`LambdaLR(optimizer: torch.optim.optimizer.Optimizer, lr_lambda: Callable[[int], float] | list[typing.Callable[[int], float]], last_epoch: int = -1)` +: Sets the initial learning rate. + + The learning rate of each parameter group is set to the initial lr + times a given function. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer has two groups. + >>> lambda1 = lambda epoch: epoch // 30 + >>> lambda2 = lambda epoch: 0.95 ** epoch + >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute learning rate. + + `load_state_dict(self, state_dict)` + : Load the scheduler's state. + + When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + + When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. + +`LinearLR(optimizer: torch.optim.optimizer.Optimizer, start_factor: float = 0.3333333333333333, end_factor: float = 1.0, total_iters: int = 5, last_epoch: int = -1)` +: Decays the learning rate of each parameter group by linearly changing small multiplicative factor. + + The multiplication is done until the number of epoch reaches a pre-defined milestone: total_iters. + Notice that such decay can happen simultaneously with other changes to the learning rate + from outside this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + start_factor (float): The number we multiply learning rate in the first epoch. + The multiplication factor changes towards end_factor in the following epochs. + Default: 1./3. + end_factor (float): The number we multiply learning rate at the end of linear changing + process. Default: 1.0. + total_iters (int): The number of iterations that multiplicative factor reaches to 1. + Default: 5. + last_epoch (int): The index of the last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.025 if epoch == 0 + >>> # lr = 0.03125 if epoch == 1 + >>> # lr = 0.0375 if epoch == 2 + >>> # lr = 0.04375 if epoch == 3 + >>> # lr = 0.05 if epoch >= 4 + >>> scheduler = LinearLR(optimizer, start_factor=0.5, total_iters=4) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate. + +`MultiStepLR(optimizer: torch.optim.optimizer.Optimizer, milestones: Iterable[int], gamma: float = 0.1, last_epoch: int = -1)` +: Decays the learning rate of each parameter group by gamma once the number of epoch reaches one of the milestones. + + Notice that such decay can happen simultaneously with other changes to the learning rate + from outside this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + milestones (list): List of epoch indices. Must be increasing. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 80 + >>> # lr = 0.0005 if epoch >= 80 + >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate of each parameter group. + +`MultiplicativeLR(optimizer: torch.optim.optimizer.Optimizer, lr_lambda: Callable[[int], float] | list[typing.Callable[[int], float]], last_epoch: int = -1)` +: Multiply the learning rate of each parameter group by the factor given in the specified function. + + When last_epoch=-1, set initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> lmbda = lambda epoch: 0.95 + >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate of each parameter group. + + `load_state_dict(self, state_dict)` + : Load the scheduler's state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + +`OneCycleLR(optimizer: torch.optim.optimizer.Optimizer, max_lr: float | list[float], total_steps: int | None = None, epochs: int | None = None, steps_per_epoch: int | None = None, pct_start: float = 0.3, anneal_strategy: Literal['cos', 'linear'] = 'cos', cycle_momentum: bool = True, base_momentum: float | list[float] = 0.85, max_momentum: float | list[float] = 0.95, div_factor: float = 25.0, final_div_factor: float = 10000.0, three_phase: bool = False, last_epoch: int = -1)` +: Sets the learning rate of each parameter group according to the 1cycle learning rate policy. + + The 1cycle policy anneals the learning rate from an initial learning rate to some maximum + learning rate and then from that maximum learning rate to some minimum learning rate much + lower than the initial learning rate. + This policy was initially described in the paper `Super-Convergence: + Very Fast Training of Neural Networks Using Large Learning Rates`_. + + The 1cycle learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This scheduler is not chainable. + + Note also that the total number of steps in the cycle can be determined in one + of two ways (listed in order of precedence): + + #. A value for total_steps is explicitly provided. + #. A number of epochs (epochs) and a number of steps per epoch + (steps_per_epoch) are provided. + In this case, the number of total steps is inferred by + total_steps = epochs * steps_per_epoch + + You must either provide a value for total_steps or provide a value for both + epochs and steps_per_epoch. + + The default behaviour of this scheduler follows the fastai implementation of 1cycle, which + claims that "unpublished work has shown even better results by using only two phases". To + mimic the behaviour of the original paper instead, set ``three_phase=True``. + + Args: + optimizer (Optimizer): Wrapped optimizer. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. + total_steps (int): The total number of steps in the cycle. Note that + if a value is not provided here, then it must be inferred by providing + a value for epochs and steps_per_epoch. + Default: None + epochs (int): The number of epochs to train for. This is used along + with steps_per_epoch in order to infer the total number of steps in the cycle + if a value for total_steps is not provided. + Default: None + steps_per_epoch (int): The number of steps per epoch to train for. This is + used along with epochs in order to infer the total number of steps in the + cycle if a value for total_steps is not provided. + Default: None + pct_start (float): The percentage of the cycle (in number of steps) spent + increasing the learning rate. + Default: 0.3 + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: "cos" for cosine annealing, "linear" for + linear annealing. + Default: 'cos' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.85 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.95 + div_factor (float): Determines the initial learning rate via + initial_lr = max_lr/div_factor + Default: 25 + final_div_factor (float): Determines the minimum learning rate via + min_lr = initial_lr/final_div_factor + Default: 1e4 + three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the + learning rate according to 'final_div_factor' instead of modifying the second + phase (the first two phases will be symmetrical about the step indicated by + 'pct_start'). + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + + Example: + >>> # xdoctest: +SKIP + >>> data_loader = torch.utils.data.DataLoader(...) + >>> optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> optimizer.step() + >>> scheduler.step() + + + .. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates: + https://arxiv.org/abs/1708.07120 + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate of each parameter group. + +`PolynomialLR(optimizer: torch.optim.optimizer.Optimizer, total_iters: int = 5, power: float = 1.0, last_epoch: int = -1)` +: Decays the learning rate of each parameter group using a polynomial function in the given total_iters. + + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. + power (float): The power of the polynomial. Default: 1.0. + + Example: + >>> # xdoctest: +SKIP("undefined vars") + >>> # Assuming optimizer uses lr = 0.001 for all groups + >>> # lr = 0.001 if epoch == 0 + >>> # lr = 0.00075 if epoch == 1 + >>> # lr = 0.00050 if epoch == 2 + >>> # lr = 0.00025 if epoch == 3 + >>> # lr = 0.0 if epoch >= 4 + >>> scheduler = PolynomialLR(optimizer, total_iters=4, power=1.0) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate. + +`ReduceLROnPlateau(optimizer: torch.optim.optimizer.Optimizer, mode: Literal['min', 'max'] = 'min', factor: float = 0.1, patience: int = 10, threshold: float = 0.0001, threshold_mode: Literal['rel', 'abs'] = 'rel', cooldown: int = 0, min_lr: float | list[float] = 0, eps: float = 1e-08)` +: Reduce learning rate when a metric has stopped improving. + + Models often benefit from reducing the learning rate by a factor + of 2-10 once learning stagnates. This scheduler reads a metrics + quantity and if no improvement is seen for a 'patience' number + of epochs, the learning rate is reduced. + + Args: + optimizer (Optimizer): Wrapped optimizer. + mode (str): One of `min`, `max`. In `min` mode, lr will + be reduced when the quantity monitored has stopped + decreasing; in `max` mode it will be reduced when the + quantity monitored has stopped increasing. Default: 'min'. + factor (float): Factor by which the learning rate will be + reduced. new_lr = lr * factor. Default: 0.1. + patience (int): The number of allowed epochs with no improvement after + which the learning rate will be reduced. + For example, consider the case of having no patience (`patience = 0`). + In the first epoch, a baseline is established and is always considered good as there's no previous baseline. + In the second epoch, if the performance is worse than the baseline, + we have what is considered an intolerable epoch. + Since the count of intolerable epochs (1) is greater than the patience level (0), + the learning rate is reduced at the end of this epoch. + From the third epoch onwards, the learning rate continues to be reduced at the end of each epoch + if the performance is worse than the baseline. If the performance improves or remains the same, + the learning rate is not adjusted. + Default: 10. + threshold (float): Threshold for measuring the new optimum, + to only focus on significant changes. Default: 1e-4. + threshold_mode (str): One of `rel`, `abs`. In `rel` mode, + dynamic_threshold = best * ( 1 + threshold ) in 'max' + mode or best * ( 1 - threshold ) in `min` mode. + In `abs` mode, dynamic_threshold = best + threshold in + `max` mode or best - threshold in `min` mode. Default: 'rel'. + cooldown (int): Number of epochs to wait before resuming + normal operation after lr has been reduced. Default: 0. + min_lr (float or list): A scalar or a list of scalars. A + lower bound on the learning rate of all param groups + or each group respectively. Default: 0. + eps (float): Minimal decay applied to lr. If the difference + between new and old lr is smaller than eps, the update is + ignored. Default: 1e-8. + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = ReduceLROnPlateau(optimizer, 'min') + >>> for epoch in range(10): + >>> train(...) + >>> val_loss = validate(...) + >>> # Note that step should be called after validate() + >>> scheduler.step(val_loss) + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Instance variables + + `in_cooldown` + : + + ### Methods + + `is_better(self, a, best)` + : + + `load_state_dict(self, state_dict)` + : Load the scheduler's state. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + + `step(self, metrics: , epoch=None)` + : Perform a step. + +`SequentialLR(optimizer: torch.optim.optimizer.Optimizer, schedulers: list[torch.optim.lr_scheduler.LRScheduler], milestones: list[int], last_epoch: int = -1)` +: Contains a list of schedulers expected to be called sequentially during the optimization process. + + Specifically, the schedulers will be called according to the milestone points, which should provide exact + intervals by which each scheduler should be called at a given epoch. + + Args: + optimizer (Optimizer): Wrapped optimizer. + schedulers (list): List of chained schedulers. + milestones (list): List of integers that reflects milestone points. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 1. for all groups + >>> # lr = 0.1 if epoch == 0 + >>> # lr = 0.1 if epoch == 1 + >>> # lr = 0.9 if epoch == 2 + >>> # lr = 0.81 if epoch == 3 + >>> # lr = 0.729 if epoch == 4 + >>> scheduler1 = ConstantLR(optimizer, factor=0.1, total_iters=2) + >>> scheduler2 = ExponentialLR(optimizer, gamma=0.9) + >>> scheduler = SequentialLR(optimizer, schedulers=[scheduler1, scheduler2], milestones=[2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `load_state_dict(self, state_dict)` + : Load the scheduler's state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + + `recursive_undo(self, sched=None)` + : Recursively undo any step performed by the initialisation of + schedulers. + + `state_dict(self)` + : Return the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The wrapped scheduler states will also be saved. + + `step(self)` + : Perform a step. + +`StepLR(optimizer: torch.optim.optimizer.Optimizer, step_size: int, gamma: float = 0.1, last_epoch: int = -1)` +: Decays the learning rate of each parameter group by gamma every step_size epochs. + + Notice that such decay can happen simultaneously with other changes to the learning rate + from outside this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + step_size (int): Period of learning rate decay. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 60 + >>> # lr = 0.0005 if 60 <= epoch < 90 + >>> # ... + >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + + ### Ancestors (in MRO) + + * torch.optim.lr_scheduler.LRScheduler + + ### Methods + + `get_lr(self)` + : Compute the learning rate of each parameter group. diff --git a/doc/api/heat/optim/tests/index.md b/doc/api/heat/optim/tests/index.md new file mode 100644 index 0000000000..3a5452b68a --- /dev/null +++ b/doc/api/heat/optim/tests/index.md @@ -0,0 +1,8 @@ +Module heat.optim.tests +======================= + +Sub-modules +----------- +* heat.optim.tests.test_dp_optimizer +* heat.optim.tests.test_optim +* heat.optim.tests.test_utils diff --git a/doc/api/heat/optim/tests/test_dp_optimizer.md b/doc/api/heat/optim/tests/test_dp_optimizer.md new file mode 100644 index 0000000000..3526eb2e54 --- /dev/null +++ b/doc/api/heat/optim/tests/test_dp_optimizer.md @@ -0,0 +1,51 @@ +Module heat.optim.tests.test_dp_optimizer +========================================= + +Classes +------- + +`TestDASO(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_daso(self)` + : diff --git a/doc/api/heat/optim/tests/test_optim.md b/doc/api/heat/optim/tests/test_optim.md new file mode 100644 index 0000000000..d79622aebb --- /dev/null +++ b/doc/api/heat/optim/tests/test_optim.md @@ -0,0 +1,97 @@ +Module heat.optim.tests.test_optim +================================== + +Classes +------- + +`TestLRScheduler(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_lr_scheduler_callthrough(self)` + : + +`TestOptim(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_optim_getattr(self)` + : diff --git a/doc/api/heat/optim/tests/test_utils.md b/doc/api/heat/optim/tests/test_utils.md new file mode 100644 index 0000000000..341d2984a2 --- /dev/null +++ b/doc/api/heat/optim/tests/test_utils.md @@ -0,0 +1,51 @@ +Module heat.optim.tests.test_utils +================================== + +Classes +------- + +`TestUtils(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_DetectMetricPlateau(self)` + : diff --git a/doc/api/heat/optim/utils.md b/doc/api/heat/optim/utils.md new file mode 100644 index 0000000000..0462d15cd5 --- /dev/null +++ b/doc/api/heat/optim/utils.md @@ -0,0 +1,89 @@ +Module heat.optim.utils +======================= +Utility functions for the heat optimizers + +Classes +------- + +`DetectMetricPlateau(mode: str | None = 'min', patience: int | None = 10, threshold: float | None = 0.0001, threshold_mode: str | None = 'rel', cooldown: int | None = 0)` +: Determine if a when a metric has stopped improving. + This scheduler reads a metrics quantity and if no improvement + is seen for a 'patience' number of epochs, the learning rate is reduced. + + Adapted from `torch.optim.lr_scheduler.ReduceLROnPlateau `_. + + Args: + mode: str, optional + One of `min`, `max`. + In `min` mode, the quantity monitored is determined to have plateaued when + it stops decreasing. In `max` mode, the quantity monitored is determined to + have plateaued when it stops decreasing.\n + Default: 'min'. + patience: int, optional + Number of epochs to wait before determining if there is a plateau + For example, if `patience = 2`, then we will ignore the first 2 epochs + with no improvement, and will only determine if there is a plateau after the + 3rd epoch if the loss still hasn't improved then.\n + Default: 10. + threshold: float, optional + Threshold for measuring the new optimum to only focus on significant changes.\n + Default: 1e-4. + threshold_mode: str, optional + One of `rel`, `abs`. In `rel` mode, + dynamic_threshold = best * ( 1 + threshold ) in 'max' + mode or best * ( 1 - threshold ) in `min` mode. + In `abs` mode, dynamic_threshold = best + threshold in + `max` mode or best - threshold in `min` mode.\n + Default: 'rel'. + cooldown: int, optional + Number of epochs to wait before resuming + normal operation after lr has been reduced.\n + Default: 0. + + ### Instance variables + + `in_cooldown: bool` + : Test if the class is in the cool down period + + ### Methods + + `get_state(self) ‑> Dict` + : Get a dictionary of the class parameters. This is useful for checkpointing. + + `is_better(self, a: float, best: float) ‑> bool` + : Test if the given value is better than the current best value. The best value is adjusted with the threshold + + Parameters + ---------- + a: float + the metric value + best: float + the current best value for the metric + + Returns + ------- + boolean indicating if the metric is improving + + `reset(self) ‑> None` + : Resets num_bad_epochs counter and cooldown counter. + + `set_state(self, dic: Dict) ‑> None` + : Load a dictionary with the status of the class. Typically used in checkpointing. + + Parameters + ---------- + dic: Dictionary + contains the values to be set as the class parameters + + `test_if_improving(self, metrics: torch.Tensor) ‑> bool` + : Test if the metric/s is/are improving. If the metrics are better than the adjusted best value, they + are set as the best for future testing. + + Parameters + ---------- + metrics: torch.Tensor + the metrics to test + + Returns + ------- + True if the metrics are better than the best, False otherwise diff --git a/doc/api/heat/preprocessing/index.md b/doc/api/heat/preprocessing/index.md new file mode 100644 index 0000000000..5280610c77 --- /dev/null +++ b/doc/api/heat/preprocessing/index.md @@ -0,0 +1,8 @@ +Module heat.preprocessing +========================= +Add the preprocessing functions to the ht.preprocessing namespace + +Sub-modules +----------- +* heat.preprocessing.preprocessing +* heat.preprocessing.tests diff --git a/doc/api/heat/preprocessing/preprocessing.md b/doc/api/heat/preprocessing/preprocessing.md new file mode 100644 index 0000000000..04abff5e04 --- /dev/null +++ b/doc/api/heat/preprocessing/preprocessing.md @@ -0,0 +1,316 @@ +Module heat.preprocessing.preprocessing +======================================= +Module implementing basic data preprocessing techniques + +Classes +------- + +`MaxAbsScaler(*, copy: bool = True)` +: MaxAbsScaler: scale each feature of a given data set linearly by its maximum absolute value. The underyling data set to be scaled is + assumed to be stored as a 2D-`DNDarray` of shape (n_datapoints, n_features); this routine is similar to + `sklearn.preprocessing.MaxAbsScaler`. + + Each feature is scaled individually such that the maximal absolute value of each feature after transformation will be 1.0. + No shifting/centering is applied. + + Parameters + ---------- + copy : bool, default=True + ``copy=False`` enables in-place transformation. + + Attributes + ---------- + scale_ : DNDarray of shape (n_features,) + Per feature relative scaling of the data. + + max_abs_ : DNDarray of shape (n_features,) + Per feature maximum absolute value of the input data. + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray) ‑> Self` + : Fit MaxAbsScaler to input data ``X``: compute the parameters to be used for later scaling. + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + The data set to which the scaler shall be fitted. + + `inverse_transform(self, Y: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Apply the inverse of :meth:``transform``, i.e. scale the input data ``Y`` back to the original representation. + + Parameters + ---------- + Y : DNDarray of shape (n_datapoints, n_features) + The data set to be transformed back. + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Scale the data with the MaxAbsScaler. + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + The data set to be scaled. + +`MinMaxScaler(feature_range: Tuple[float, float] = (0.0, 1.0), *, copy: bool = True, clip: bool = False)` +: Min-Max-Scaler: transforms the features by scaling each feature (affine) linearly to the prescribed range; + similar to `sklearn.preprocessing.MinMaxScaler`. + The data set to be scaled must be stored as 2D-`DNDarray` of shape (n_datapoints, n_features). + + Each feature is scaled and translated individually such that it is in the given range on the input data set, + e.g. between zero and one (default). + + Parameters + ---------- + feature_range : tuple (min, max), default=(0, 1) + Desired range of transformed features. + + copy : bool, default=True + ``copy = False`` means in-place transformations whenever possible. + + clip : Not yet supported. + raises ``NotImplementedError``. + + Attributes + ---------- + min_ : DNDarray of shape (n_features,) + translation required per feature + + scale_ : DNDarray of shape (n_features,) + scaling required per feature + + data_min_ : DNDarray of shape (n_features,) + minimum per feature in the input data set + + data_max_ : DNDarray of shape (n_features,) + maximum per feature in the input data set + + data_range_ : DNDarray of shape (n_features,) + range per feature in the input data set + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray) ‑> Self` + : Fit the MinMaxScaler: i.e. compute the parameters required for later scaling. + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + data set to which scaler shall be fitted. + + `inverse_transform(self, Y: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Apply the inverse of :meth:``fit``. + + Parameters + ---------- + Y : DNDarray of shape (n_datapoints, n_features) + Data set to be transformed back. + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Transform input data with MinMaxScaler: i.e. scale features of ``X`` according to feature_range. + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + Data set to be transformed. + +`Normalizer(norm: str = 'l2', *, copy: bool = True)` +: Normalizer: each data point of a data set is scaled to unit norm independently. + The data set to be scaled must be stored as 2D-`DNDarray` of shape (n_datapoints, n_features); therefore + the Normalizer scales each row to unit norm. This object is similar to `sklearn.preprocessing.Normalizer`. + + Parameters + ---------- + norm : {'l1', 'l2', 'max'}, default='l2' + The norm to use to normalize the data points. ``norm='max'`` refers to the :math:`\ell^\infty`-norm. + + copy : bool, default=True + ``copy=False`` enables in-place normalization. + + Attributes + ---------- + None + + + Notes + ----- + Normalizer is :term:`stateless` and, consequently, :meth:``fit`` is only a dummy that does not need to be called before :meth:``transform``. + Since :meth:``transform`` is not bijective, there is no back-transformation :meth:``inverse_transform``. + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray) ‑> Self` + : Since :object:``Normalizer`` is stateless, this function is only a dummy. + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Apply Normalizer trasformation: scales each data point of the input data set ``X`` to unit norm (w.r.t. to ``norm``). + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + The data set to be normalized. + + copy : bool, default=None + ``copy=False`` enables in-place transformation. + +`RobustScaler(*, with_centering: bool = True, with_scaling: bool = True, quantile_range: Tuple[float, float] = (25.0, 75.0), copy: bool = True, unit_variance: bool = False, sketched: bool = False, sketch_size: float | None = 1.0)` +: Scales the features of a given data set making use of statistics + that are robust to outliers: it removes the median and scales the data according to + the quantile range (defaults to IQR: Interquartile Range); this routine is similar + to ``sklearn.preprocessing.RobustScaler``. + + Per default, the "true" median and IQR of the entire data set is computed; however, the argument + `sketched` allows to switch to a faster but inaccurate version that computes + median and IQR only on behalf of a random subset of the data set ("sketch") of size `sketch_size`. + + The underyling data set to be scaled must be stored as a 2D-`DNDarray` of shape (n_datapoints, n_features). + Each feature is centered and scaled independently. + + Parameters + ---------- + with_centering : bool, default=True + If `True`, data are centered before scaling. + + with_scaling : bool, default=True + If `True`, scale the data to prescribed interquantile range. + + quantile_range : tuple (q_min, q_max), 0.0 <= q_min < q_max <= 100.0, default=(25.0, 75.0) + Quantile range used to calculate `scale_`; default is the so-called + the IQR given by ``q_min=25`` and ``q_max=75``. + + copy : bool, default=True + ``copy=False`` enable in-place transformations. + + unit_variance : not yet supported. + raises ``NotImplementedError`` + + sketched : bool, default=False + If `True`, use a sketch of the data set to compute the median and IQR. + This is faster but less accurate. The size of the sketch is determined by the argument `sketch_size`. + + sketch_size : float, default=1./ht.MPI_WORLD.size + Fraction of the data set to be used for the sketch if `sketched=True`. The default value is 1/N, where N is the number of MPI processes. + Ignored if `sketched=False`. + + Attributes + ---------- + center_ : DNDarray of shape (n_features,) + Feature-wise median value of the given data set. + + iqr_ : DNDarray of shape (n_features,) + length of the interquantile range for each feature. + + scale_ : array of floats + feature-wise inverse of ``iqr_``. + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray) ‑> Self` + : Fit RobustScaler to given data set, i.e. compute the parameters required for transformation. + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + Data to which the Scaler should be fitted. + + `inverse_transform(self, Y: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Apply inverse of :meth:``transform``. + + Parameters + ---------- + Y : DNDarray of shape (n_datapoints, n_features) + Data to be back-transformed + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Transform given data with RobustScaler + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features) + Data set to be transformed. + +`StandardScaler(*, copy: bool = True, with_mean: bool = True, with_std: bool = True)` +: Standardization of features to mean 0 and variance 1 by affine linear transformation; similar to `sklearn.preprocessing.StandardScaler`. + The data set to be scaled must be stored as 2D-`DNDarray` of shape (n_datapoints, n_features). + Shifting to mean 0 and scaling to variance 1 is applied to each feature independently. + + Parameters + ---------- + copy : bool, default=True + If False, try to avoid a copy and do inplace scaling instead. + + with_mean : bool, default=True + If True, center the data (i.e. mean = 0) before scaling. + + with_std : bool, default=True + If True, scale the data to variance = 1. + + Attributes + ---------- + scale_ : DNDarray of shape (n_features,) or None + Per feature relative scaling of the data to achieve unit + variance. Set to ``None`` (no variance scaling applied) if ``var = None`` or ``var`` below machine precision. + + mean_ : DNDarray of shape (n_features,) or None + The mean value for each feature. Equal to ``None`` when ``with_mean=False``. + + var_ : DNDarray of shape (n_features,) or None + Featurewise variance of the given data. Equal to ``None`` when ``with_std=False``. + + ### Ancestors (in MRO) + + * heat.core.base.TransformMixin + * heat.core.base.BaseEstimator + + ### Methods + + `fit(self, X: heat.core.dndarray.DNDarray, sample_weight: heat.core.dndarray.DNDarray | None = None) ‑> Self` + : Fit ``StandardScaler`` to the given data ``X``, i.e. compute mean and standard deviation of ``X`` to be used for later scaling. + + Parameters + ---------- + X : DNDarray of shape (n_datapoints, n_features). + Data used to compute the mean and standard deviation used for later featurewise scaling. + + sample_weight : Not yet supported. + Raises ``NotImplementedError``. + + `inverse_transform(self, Y: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Scale back the data to the original representation, i.e. apply the inverse of :meth:``transform`` to the input ``Y``. + + Parameters + ---------- + Y : DNDarray of shape (n_datapoints, n_features) + Data to be scaled back. + copy : bool, default=None + Copy the input ``Y`` or not. + + `transform(self, X: heat.core.dndarray.DNDarray) ‑> Self | heat.core.dndarray.DNDarray` + : Applies standardization to input data ``X`` by centering and scaling w.r.t. mean and std previously computed and saved in ``StandardScaler`` with :meth:``fit``. + + Parameters + ---------- + X : DNDarray (n_datapoints, n_features) + The data set to be standardized. + copy : bool, default=None + Copy the input ``X`` or not. diff --git a/doc/api/heat/preprocessing/tests/index.md b/doc/api/heat/preprocessing/tests/index.md new file mode 100644 index 0000000000..1536da70fd --- /dev/null +++ b/doc/api/heat/preprocessing/tests/index.md @@ -0,0 +1,6 @@ +Module heat.preprocessing.tests +=============================== + +Sub-modules +----------- +* heat.preprocessing.tests.test_preprocessing diff --git a/doc/api/heat/preprocessing/tests/test_preprocessing.md b/doc/api/heat/preprocessing/tests/test_preprocessing.md new file mode 100644 index 0000000000..6ca8bdfc4e --- /dev/null +++ b/doc/api/heat/preprocessing/tests/test_preprocessing.md @@ -0,0 +1,238 @@ +Module heat.preprocessing.tests.test_preprocessing +================================================== + +Classes +------- + +`TestMaxAbsScaler(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_MaxAbsScaler(self)` + : + +`TestMinMaxScaler(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_MinMaxScaler(self)` + : + +`TestNormalizer(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_Normalizer(self)` + : + +`TestRobustScaler(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_RobustScaler(self)` + : + + `test_robust_scaler_sketched(self)` + : + +`TestStandardScaler(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_StandardScaler(self)` + : diff --git a/doc/api/heat/regression/index.md b/doc/api/heat/regression/index.md new file mode 100644 index 0000000000..2a4e4bb243 --- /dev/null +++ b/doc/api/heat/regression/index.md @@ -0,0 +1,8 @@ +Module heat.regression +====================== +Include regression algorithms into heat namespace + +Sub-modules +----------- +* heat.regression.lasso +* heat.regression.tests diff --git a/doc/api/heat/regression/lasso.md b/doc/api/heat/regression/lasso.md new file mode 100644 index 0000000000..3cd05b127b --- /dev/null +++ b/doc/api/heat/regression/lasso.md @@ -0,0 +1,105 @@ +Module heat.regression.lasso +============================ +Implementation of the LASSO regression + +Classes +------- + +`Lasso(lam: float | None = 0.1, max_iter: int | None = 100, tol: float | None = 1e-06)` +: ``Least absolute shrinkage and selection operator``(LASSO), a linear model with L1 regularization. The optimization + objective for Lasso is: + + .. math:: E(w) = \frac{1}{2 m} ||y - Xw||^2_2 + \lambda ||w\_||_1 + + with + + .. math:: w\_=(w_1,w_2,...,w_n), w=(w_0,w_1,w_2,...,w_n), + .. math:: y \in M(m \times 1), w \in M(n \times 1), X \in M(m \times n) + + Parameters + ---------- + lam : float, optional + Constant that multiplies the L1 term. Default value: 0.1 ``lam = 0.`` is equivalent to an ordinary + least square (OLS). For numerical reasons, using ``lam = 0.,`` with the ``Lasso`` object is not advised. + max_iter : int, optional + The maximum number of iterations. Default value: 100 + tol : float, optional. Default value: 1e-8 + The tolerance for the optimization. + + Attributes + ---------- + __theta : array, shape (n_features + 1,), first element is the interception parameter vector w. + coef_ : array, shape (n_features,) | (n_targets, n_features) + parameter vector (w in the cost function formula) + intercept_ : float | array, shape (n_targets,) + independent term in decision function. + n_iter_ : int or None | array-like, shape (n_targets,) + number of iterations run by the coordinate descent solver to reach the specified tolerance. + + Examples + -------- + >>> X = ht.random.randn(10, 4, split=0) + >>> y = ht.random.randn(10, 1, split=0) + >>> estimator = ht.regression.lasso.Lasso(max_iter=100, tol=None) + >>> estimator.fit(X, y) + + Initialize lasso parameters + + ### Ancestors (in MRO) + + * heat.core.base.RegressionMixin + * heat.core.base.BaseEstimator + + ### Instance variables + + `coef_: heat.core.dndarray.DNDarray | None` + : Returns coefficients + + `intercept_: heat.core.dndarray.DNDarray | None` + : Returns bias term + + `lam: float` + : Returns regularization term lambda + + `theta` + : Returns regularization term lambda + + ### Methods + + `fit(self, x: heat.core.dndarray.DNDarray, y: heat.core.dndarray.DNDarray) ‑> None` + : Fit lasso model with coordinate descent + + Parameters + ---------- + x : DNDarray + Input data, Shape = (n_samples, n_features) + y : DNDarray + Labels, Shape = (n_samples,) + + `predict(self, x: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Apply lasso model to input data. First row data corresponds to interception + + Parameters + ---------- + x : DNDarray + Input data, Shape = (n_samples, n_features) + + `rmse(self, gt: heat.core.dndarray.DNDarray, yest: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray` + : Root mean square error (RMSE) + + Parameters + ---------- + gt : DNDarray + Input model data, Shape = (1,) + yest : DNDarray + Thresholded model data, Shape = (1,) + + `soft_threshold(self, rho: heat.core.dndarray.DNDarray) ‑> heat.core.dndarray.DNDarray | float` + : Soft threshold operator + + Parameters + ---------- + rho : DNDarray + Input model data, Shape = (1,) + out : DNDarray or float + Thresholded model data, Shape = (1,) diff --git a/doc/api/heat/regression/tests/index.md b/doc/api/heat/regression/tests/index.md new file mode 100644 index 0000000000..1f5592c5d8 --- /dev/null +++ b/doc/api/heat/regression/tests/index.md @@ -0,0 +1,6 @@ +Module heat.regression.tests +============================ + +Sub-modules +----------- +* heat.regression.tests.test_lasso diff --git a/doc/api/heat/regression/tests/test_lasso.md b/doc/api/heat/regression/tests/test_lasso.md new file mode 100644 index 0000000000..eaa4b6c500 --- /dev/null +++ b/doc/api/heat/regression/tests/test_lasso.md @@ -0,0 +1,60 @@ +Module heat.regression.tests.test_lasso +======================================= + +Classes +------- + +`TestLasso(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_exceptions(self)` + : + + `test_get_and_set_params(self)` + : + + `test_lasso(self)` + : + + `test_regressor(self)` + : diff --git a/doc/api/heat/sparse/arithmetics.md b/doc/api/heat/sparse/arithmetics.md new file mode 100644 index 0000000000..08c84af61f --- /dev/null +++ b/doc/api/heat/sparse/arithmetics.md @@ -0,0 +1,68 @@ +Module heat.sparse.arithmetics +============================== +Arithmetic functions for Dcsr_matrices + +Functions +--------- + +`add(t1: DCSR_matrix, t2: DCSR_matrix, orientation: str = 'row') ‑> heat.sparse.dcsx_matrix.DCSR_matrix` +: Element-wise addition of values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.sparse.DCSR_matrix`) whose elements are to be added + as argument and returns a ``DCSR_matrix`` containing the results of element-wise addition of ``t1`` and ``t2``. + + Parameters + ---------- + t1: DCSR_matrix + The first operand involved in the addition + t2: DCSR_matrix + The second operand involved in the addition + orientation: str, optional + The orientation of the operation. Options: 'row' or 'col' + Default: 'row' + + Examples + -------- + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3]), indices: tensor([0, 2, 2]), data: tensor([1., 2., 3.]), dtype=ht.float32, device=cpu:0, split=0) + >>> heat_sparse_csr.todense() + DNDarray([[1., 0., 2.], + [0., 0., 3.]], dtype=ht.float32, device=cpu:0, split=0) + >>> sum_sparse = heat_sparse_csr + heat_sparse_csr + (or) + >>> sum_sparse = ht.sparse.sparse_add(heat_sparse_csr, heat_sparse_csr) + >>> sum_sparse + (indptr: tensor([0, 2, 3], dtype=torch.int32), indices: tensor([0, 2, 2], dtype=torch.int32), data: tensor([2., 4., 6.]), dtype=ht.float32, device=cpu:0, split=0) + >>> sum_sparse.todense() + DNDarray([[2., 0., 4.], + [0., 0., 6.]], dtype=ht.float32, device=cpu:0, split=0) + +`mul(t1: DCSR_matrix, t2: DCSR_matrix, orientation: str = 'row') ‑> heat.sparse.dcsx_matrix.DCSR_matrix` +: Element-wise multiplication (NOT matrix multiplication) of values from two operands, commutative. + Takes the first and second operand (scalar or :class:`~heat.sparse.DCSR_matrix`) whose elements are to be + multiplied as argument. + + Parameters + ---------- + t1: DCSR_matrix + The first operand involved in the multiplication + t2: DCSR_matrix + The second operand involved in the multiplication + orientation: str, optional + The orientation of the operation. Options: 'row' or 'col' + Default: 'row' + + Examples + -------- + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3]), indices: tensor([0, 2, 2]), data: tensor([1., 2., 3.]), dtype=ht.float32, device=cpu:0, split=0) + >>> heat_sparse_csr.todense() + DNDarray([[1., 0., 2.], + [0., 0., 3.]], dtype=ht.float32, device=cpu:0, split=0) + >>> pdt_sparse = heat_sparse_csr * heat_sparse_csr + (or) + >>> pdt_sparse = ht.sparse.sparse_mul(heat_sparse_csr, heat_sparse_csr) + >>> pdt_sparse + (indptr: tensor([0, 2, 3]), indices: tensor([0, 2, 2]), data: tensor([1., 4., 9.]), dtype=ht.float32, device=cpu:0, split=0) + >>> pdt_sparse.todense() + DNDarray([[1., 0., 4.], + [0., 0., 9.]], dtype=ht.float32, device=cpu:0, split=0) diff --git a/doc/api/heat/sparse/dcsx_matrix.md b/doc/api/heat/sparse/dcsx_matrix.md new file mode 100644 index 0000000000..64d35796ca --- /dev/null +++ b/doc/api/heat/sparse/dcsx_matrix.md @@ -0,0 +1,78 @@ +Module heat.sparse.dcsx_matrix +============================== +Provides DCSR_matrix, a distributed compressed sparse row matrix + +Classes +------- + +`DCSC_matrix(array: torch.Tensor, gnnz: int, gshape: Tuple[int, ...], dtype: datatype, split: Union[int, None], device: Device, comm: Communication, balanced: bool)` +: Distributed Compressed Sparse Column Matrix. It is composed of + PyTorch sparse_csc_tensors local to each process. + + Parameters + ---------- + array : torch.Tensor (layout ==> torch.sparse_csc) + Local sparse array + gnnz: int + Total number of non-zero elements across all processes + gshape : Tuple[int,...] + The global shape of the array + dtype : datatype + The datatype of the array + split : int or None + If split is not None, it denotes the axis on which the array is divided between processes. + DCSR_matrix only supports distribution along axis 0. + device : Device + The device on which the local arrays are using (cpu or gpu) + comm : Communication + The communications object for sending and receiving data + balanced: bool or None + Describes whether the data are evenly distributed across processes. + + ### Ancestors (in MRO) + + * heat.sparse.dcsx_matrix.__DCSX_matrix + + ### Instance variables + + `lindices: torch.Tensor` + : Local indices of the ``DCSC_matrix`` + + `lindptr: torch.Tensor` + : Local indptr of the ``DCSC_matrix`` + +`DCSR_matrix(array: torch.Tensor, gnnz: int, gshape: Tuple[int, ...], dtype: datatype, split: Union[int, None], device: Device, comm: Communication, balanced: bool)` +: Distributed Compressed Sparse Row Matrix. It is composed of + PyTorch sparse_csr_tensors local to each process. + + Parameters + ---------- + array : torch.Tensor (layout ==> torch.sparse_csr) + Local sparse array + gnnz: int + Total number of non-zero elements across all processes + gshape : Tuple[int,...] + The global shape of the array + dtype : datatype + The datatype of the array + split : int or None + If split is not None, it denotes the axis on which the array is divided between processes. + DCSR_matrix only supports distribution along axis 0. + device : Device + The device on which the local arrays are using (cpu or gpu) + comm : Communication + The communications object for sending and receiving data + balanced: bool or None + Describes whether the data are evenly distributed across processes. + + ### Ancestors (in MRO) + + * heat.sparse.dcsx_matrix.__DCSX_matrix + + ### Instance variables + + `lindices: torch.Tensor` + : Local indices of the ``DCSR_matrix`` + + `lindptr: torch.Tensor` + : Local indptr of the ``DCSR_matrix`` diff --git a/doc/api/heat/sparse/factories.md b/doc/api/heat/sparse/factories.md new file mode 100644 index 0000000000..7870aebda6 --- /dev/null +++ b/doc/api/heat/sparse/factories.md @@ -0,0 +1,136 @@ +Module heat.sparse.factories +============================ +Provides high-level DCSR_matrix initialization functions + +Functions +--------- + +`sparse_csc_matrix(obj: Iterable, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, is_split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None) ‑> heat.sparse.dcsx_matrix.DCSC_matrix` +: Create a :class:`~heat.sparse.DCSC_matrix`. + + Parameters + ---------- + obj : array_like + A tensor or array, any object exposing the array interface, an object whose ``__array__`` method returns an + array, or any (nested) sequence. Sparse tensor that needs to be distributed. + dtype : datatype, optional + The desired data-type for the sparse matrix. If not given, then the type will be determined as the minimum type required + to hold the objects in the sequence. This argument can only be used to ‘upcast’ the array. For downcasting, use + the :func:`~heat.sparse.DCSC_matrix.astype` method. + split : int or None, optional + The axis along which the passed array content ``obj`` is split and distributed in memory. DCSC_matrix only supports + distribution along axis 1. Mutually exclusive with ``is_split``. + is_split : int or None, optional + Specifies the axis along which the local data portions, passed in obj, are split across all machines. DCSC_matrix only + supports distribution along axis 1. Useful for interfacing with other distributed-memory code. The shape of the global + array is automatically inferred. Mutually exclusive with ``split``. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on (i.e. globally set default + device). + comm : Communication, optional + Handle to the nodes holding distributed array chunks. + + Raises + ------ + ValueError + If split and is_split parameters are not one of 1 or None. + + Examples + -------- + Create a :class:`~heat.sparse.DCSC_matrix` from :class:`torch.Tensor` (layout ==> torch.sparse_csc) + >>> indptr = torch.tensor([0, 2, 3, 6]) + >>> indices = torch.tensor([0, 2, 2, 0, 1, 2]) + >>> data = torch.tensor([1.0, 4.0, 5.0, 2.0, 3.0, 6.0], dtype=torch.float) + >>> torch_sparse_csc = torch.sparse_csc_tensor(indptr, indices, data) + >>> heat_sparse_csc = ht.sparse.sparse_csc_matrix(torch_sparse_csc, split=1) + >>> heat_sparse_csc + (indptr: tensor([0, 2, 3, 6]), indices: tensor([0, 2, 2, 0, 1, 2]), data: tensor([1., 4., 5., 2., 3., 6.]), dtype=ht.float32, device=cpu:0, split=1) + + Create a :class:`~heat.sparse.DCSC_matrix` from :class:`scipy.sparse.csc_matrix` + >>> scipy_sparse_csc = scipy.sparse.csc_matrix((data, indices, indptr)) + >>> heat_sparse_csc = ht.sparse.sparse_csc_matrix(scipy_sparse_csc, split=1) + >>> heat_sparse_csc + (indptr: tensor([0, 2, 3, 6], dtype=torch.int32), indices: tensor([0, 2, 2, 0, 1, 2], dtype=torch.int32), data: tensor([1., 4., 5., 2., 3., 6.]), dtype=ht.float32, device=cpu:0, split=1) + + Create a :class:`~heat.sparse.DCSC_matrix` using data that is already distributed (with `is_split`) + >>> indptrs = [torch.tensor([0, 2, 3]), torch.tensor([0, 3])] + >>> indices = [torch.tensor([0, 2, 2]), torch.tensor([0, 1, 2])] + >>> data = [torch.tensor([1, 2, 3], dtype=torch.float), + torch.tensor([4, 5, 6], dtype=torch.float)] + >>> rank = ht.MPI_WORLD.rank + >>> local_indptr = indptrs[rank] + >>> local_indices = indices[rank] + >>> local_data = data[rank] + >>> local_torch_sparse_csr = torch.sparse_csr_tensor(local_indptr, local_indices, local_data) + >>> heat_sparse_csr = ht.sparse.sparse_csr_matrix(local_torch_sparse_csr, is_split=0) + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3, 6]), indices: tensor([0, 2, 2, 0, 1, 2]), data: tensor([1., 2., 3., 4., 5., 6.]), dtype=ht.float32, device=cpu:0, split=1) + + Create a :class:`~heat.sparse.DCSC_matrix` from List + >>> ht.sparse.sparse_csc_matrix([[0, 0, 1], [1, 0, 2], [0, 0, 3]]) + (indptr: tensor([0, 1, 1, 4]), indices: tensor([1, 0, 1, 2]), data: tensor([1, 1, 2, 3]), dtype=ht.int64, device=cpu:0, split=None) + +`sparse_csr_matrix(obj: Iterable, dtype: Type[heat.core.types.datatype] | None = None, split: int | None = None, is_split: int | None = None, device: heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None) ‑> heat.sparse.dcsx_matrix.DCSR_matrix` +: Create a :class:`~heat.sparse.DCSR_matrix`. + + Parameters + ---------- + obj : array_like + A tensor or array, any object exposing the array interface, an object whose ``__array__`` method returns an + array, or any (nested) sequence. Sparse tensor that needs to be distributed. + dtype : datatype, optional + The desired data-type for the sparse matrix. If not given, then the type will be determined as the minimum type required + to hold the objects in the sequence. This argument can only be used to ‘upcast’ the array. For downcasting, use + the :func:`~heat.sparse.DCSR_matrix.astype` method. + split : int or None, optional + The axis along which the passed array content ``obj`` is split and distributed in memory. DCSR_matrix only supports + distribution along axis 0. Mutually exclusive with ``is_split``. + is_split : int or None, optional + Specifies the axis along which the local data portions, passed in obj, are split across all machines. DCSR_matrix only + supports distribution along axis 0. Useful for interfacing with other distributed-memory code. The shape of the global + array is automatically inferred. Mutually exclusive with ``split``. + device : str or Device, optional + Specifies the :class:`~heat.core.devices.Device` the array shall be allocated on (i.e. globally set default + device). + comm : Communication, optional + Handle to the nodes holding distributed array chunks. + + Raises + ------ + ValueError + If split and is_split parameters are not one of 0 or None. + + Examples + -------- + Create a :class:`~heat.sparse.DCSR_matrix` from :class:`torch.Tensor` (layout ==> torch.sparse_csr) + >>> indptr = torch.tensor([0, 2, 3, 6]) + >>> indices = torch.tensor([0, 2, 2, 0, 1, 2]) + >>> data = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.float) + >>> torch_sparse_csr = torch.sparse_csr_tensor(indptr, indices, data) + >>> heat_sparse_csr = ht.sparse.sparse_csr_matrix(torch_sparse_csr, split=0) + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3, 6]), indices: tensor([0, 2, 2, 0, 1, 2]), data: tensor([1., 2., 3., 4., 5., 6.]), dtype=ht.float32, device=cpu:0, split=0) + + Create a :class:`~heat.sparse.DCSR_matrix` from :class:`scipy.sparse.csr_matrix` + >>> scipy_sparse_csr = scipy.sparse.csr_matrix((data, indices, indptr)) + >>> heat_sparse_csr = ht.sparse.sparse_csr_matrix(scipy_sparse_csr, split=0) + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3, 6], dtype=torch.int32), indices: tensor([0, 2, 2, 0, 1, 2], dtype=torch.int32), data: tensor([1., 2., 3., 4., 5., 6.]), dtype=ht.float32, device=cpu:0, split=0) + + Create a :class:`~heat.sparse.DCSR_matrix` using data that is already distributed (with `is_split`) + >>> indptrs = [torch.tensor([0, 2, 3]), torch.tensor([0, 3])] + >>> indices = [torch.tensor([0, 2, 2]), torch.tensor([0, 1, 2])] + >>> data = [torch.tensor([1, 2, 3], dtype=torch.float), + torch.tensor([4, 5, 6], dtype=torch.float)] + >>> rank = ht.MPI_WORLD.rank + >>> local_indptr = indptrs[rank] + >>> local_indices = indices[rank] + >>> local_data = data[rank] + >>> local_torch_sparse_csr = torch.sparse_csr_tensor(local_indptr, local_indices, local_data) + >>> heat_sparse_csr = ht.sparse.sparse_csr_matrix(local_torch_sparse_csr, is_split=0) + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3, 6]), indices: tensor([0, 2, 2, 0, 1, 2]), data: tensor([1., 2., 3., 4., 5., 6.]), dtype=ht.float32, device=cpu:0, split=0) + + Create a :class:`~heat.sparse.DCSR_matrix` from List + >>> ht.sparse.sparse_csr_matrix([[0, 0, 1], [1, 0, 2], [0, 0, 3]]) + (indptr: tensor([0, 1, 3, 4]), indices: tensor([2, 0, 2, 2]), data: tensor([1, 1, 2, 3]), dtype=ht.int64, device=cpu:0, split=None) diff --git a/doc/api/heat/sparse/index.md b/doc/api/heat/sparse/index.md new file mode 100644 index 0000000000..8846299f6a --- /dev/null +++ b/doc/api/heat/sparse/index.md @@ -0,0 +1,11 @@ +Module heat.sparse +================== +add sparse heat function to the ht.sparse namespace + +Sub-modules +----------- +* heat.sparse.arithmetics +* heat.sparse.dcsx_matrix +* heat.sparse.factories +* heat.sparse.manipulations +* heat.sparse.tests diff --git a/doc/api/heat/sparse/manipulations.md b/doc/api/heat/sparse/manipulations.md new file mode 100644 index 0000000000..0f6275734a --- /dev/null +++ b/doc/api/heat/sparse/manipulations.md @@ -0,0 +1,81 @@ +Module heat.sparse.manipulations +================================ +Manipulation operations for (potentially distributed) `DCSR_matrix`. + +Functions +--------- + +`to_dense(sparse_matrix: __DCSX_matrix, order='C', out: DNDarray = None) ‑> heat.core.dndarray.DNDarray` +: Convert :class:`~heat.sparse.DCSX_matrix` to a dense :class:`~heat.core.DNDarray`. + Output follows the same distribution among processes as the input + + Parameters + ---------- + sparse_matrix : :class:`~heat.sparse.DCSR_matrix` + The sparse csr matrix which is to be converted to a dense array + order: str, optional + Options: ``'C'`` or ``'F'``. Specifies the memory layout of the newly created `DNDarray`. Default is ``order='C'``, + meaning the array will be stored in row-major order (C-like). If ``order=‘F’``, the array will be stored in + column-major order (Fortran-like). + out : DNDarray + Output buffer in which the values of the dense format is stored. + If not specified, a new DNDarray is created. + + Raises + ------ + ValueError + If shape of output buffer does not match that of the input. + ValueError + If split axis of output buffer does not match that of the input. + + Examples + -------- + >>> indptr = torch.tensor([0, 2, 3, 6]) + >>> indices = torch.tensor([0, 2, 2, 0, 1, 2]) + >>> data = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.float) + >>> torch_sparse_csr = torch.sparse_csr_tensor(indptr, indices, data) + >>> heat_sparse_csr = ht.sparse.sparse_csr_matrix(torch_sparse_csr, split=0) + >>> heat_sparse_csr + (indptr: tensor([0, 2, 3, 6]), indices: tensor([0, 2, 2, 0, 1, 2]), data: tensor([1., 2., 3., 4., 5., 6.]), dtype=ht.float32, device=cpu:0, split=0) + >>> heat_sparse_csr.todense() + DNDarray([[1., 0., 2.], + [0., 0., 3.], + [4., 5., 6.]], dtype=ht.float32, device=cpu:0, split=0) + +`to_sparse_csc(array: DNDarray) ‑> heat.sparse.dcsx_matrix.DCSC_matrix` +: Convert the distributed array to a sparse DCSC_matrix representation. + + Parameters + ---------- + array : DNDarray + The distributed array to be converted to a sparse DCSC_matrix. + + Returns + ------- + DCSC_matrix + A sparse DCSC_matrix representation of the input DNDarray. + + Examples + -------- + >>> dense_array = ht.array([[1, 0, 0], [0, 0, 2], [0, 3, 0]]) + >>> dense_array.to_sparse_csc() + (indptr: tensor([0, 1, 2, 3]), indices: tensor([0, 2, 1]), data: tensor([1, 3, 2]), dtype=ht.int64, device=cpu:0, split=None) + +`to_sparse_csr(array: DNDarray) ‑> heat.sparse.dcsx_matrix.DCSR_matrix` +: Convert the distributed array to a sparse DCSR_matrix representation. + + Parameters + ---------- + array : DNDarray + The distributed array to be converted to a sparse DCSR_matrix. + + Returns + ------- + DCSR_matrix + A sparse DCSR_matrix representation of the input DNDarray. + + Examples + -------- + >>> dense_array = ht.array([[1, 0, 0], [0, 0, 2], [0, 3, 0]]) + >>> dense_array.to_sparse_csr() + (indptr: tensor([0, 1, 2, 3]), indices: tensor([0, 2, 1]), data: tensor([1, 2, 3]), dtype=ht.int64, device=cpu:0, split=None) diff --git a/doc/api/heat/sparse/tests/index.md b/doc/api/heat/sparse/tests/index.md new file mode 100644 index 0000000000..080a6aa713 --- /dev/null +++ b/doc/api/heat/sparse/tests/index.md @@ -0,0 +1,10 @@ +Module heat.sparse.tests +======================== + +Sub-modules +----------- +* heat.sparse.tests.test_arithmetics_csr +* heat.sparse.tests.test_dcscmatrix +* heat.sparse.tests.test_dcsrmatrix +* heat.sparse.tests.test_factories +* heat.sparse.tests.test_manipulations diff --git a/doc/api/heat/sparse/tests/test_arithmetics_csr.md b/doc/api/heat/sparse/tests/test_arithmetics_csr.md new file mode 100644 index 0000000000..99f1b8834b --- /dev/null +++ b/doc/api/heat/sparse/tests/test_arithmetics_csr.md @@ -0,0 +1,54 @@ +Module heat.sparse.tests.test_arithmetics_csr +============================================= + +Classes +------- + +`TestArithmeticsCSR(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_add(self)` + : + + `test_mul(self)` + : diff --git a/doc/api/heat/sparse/tests/test_dcscmatrix.md b/doc/api/heat/sparse/tests/test_dcscmatrix.md new file mode 100644 index 0000000000..b91ab8691e --- /dev/null +++ b/doc/api/heat/sparse/tests/test_dcscmatrix.md @@ -0,0 +1,72 @@ +Module heat.sparse.tests.test_dcscmatrix +======================================== + +Classes +------- + +`TestDCSC_matrix(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_astype(self)` + : + + `test_data(self)` + : + + `test_dtype(self)` + : + + `test_indices(self)` + : + + `test_indptr(self)` + : + + `test_larray(self)` + : + + `test_nnz(self)` + : + + `test_shape(self)` + : diff --git a/doc/api/heat/sparse/tests/test_dcsrmatrix.md b/doc/api/heat/sparse/tests/test_dcsrmatrix.md new file mode 100644 index 0000000000..72f83aa28d --- /dev/null +++ b/doc/api/heat/sparse/tests/test_dcsrmatrix.md @@ -0,0 +1,72 @@ +Module heat.sparse.tests.test_dcsrmatrix +======================================== + +Classes +------- + +`TestDCSR_matrix(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_astype(self)` + : + + `test_data(self)` + : + + `test_dtype(self)` + : + + `test_indices(self)` + : + + `test_indptr(self)` + : + + `test_larray(self)` + : + + `test_nnz(self)` + : + + `test_shape(self)` + : diff --git a/doc/api/heat/sparse/tests/test_factories.md b/doc/api/heat/sparse/tests/test_factories.md new file mode 100644 index 0000000000..5203b42a33 --- /dev/null +++ b/doc/api/heat/sparse/tests/test_factories.md @@ -0,0 +1,54 @@ +Module heat.sparse.tests.test_factories +======================================= + +Classes +------- + +`TestFactories(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_sparse_csc_matrix(self)` + : + + `test_sparse_csr_matrix(self)` + : diff --git a/doc/api/heat/sparse/tests/test_manipulations.md b/doc/api/heat/sparse/tests/test_manipulations.md new file mode 100644 index 0000000000..a747d63a02 --- /dev/null +++ b/doc/api/heat/sparse/tests/test_manipulations.md @@ -0,0 +1,60 @@ +Module heat.sparse.tests.test_manipulations +=========================================== + +Classes +------- + +`TestManipulations(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_to_dense_csc(self)` + : + + `test_to_dense_csr(self)` + : + + `test_to_sparse_csc(self)` + : + + `test_to_sparse_csr(self)` + : diff --git a/doc/api/heat/spatial/distance.md b/doc/api/heat/spatial/distance.md new file mode 100644 index 0000000000..8e64e15e84 --- /dev/null +++ b/doc/api/heat/spatial/distance.md @@ -0,0 +1,56 @@ +Module heat.spatial.distance +============================ +Module for (pairwise) distance functions + +Functions +--------- + +`cdist(X: heat.core.dndarray.DNDarray, Y: heat.core.dndarray.DNDarray = None, quadratic_expansion: bool = False) ‑> heat.core.dndarray.DNDarray` +: Calculate Euclidian distance between two DNDarrays: + + .. math:: d(x,y) = \sqrt{(|x-y|^2)} + + Returns 2D DNDarray of size :math: `m \times n` + + Parameters + ---------- + X : DNDarray + 2D array of size :math: `m \times f` + Y : DNDarray + 2D array of size :math: `n \times f` + quadratic_expansion : bool + Whether to use quadratic expansion for :math:`\sqrt{(|x-y|^2)}` (Might yield speed-up) + +`manhattan(X: heat.core.dndarray.DNDarray, Y: heat.core.dndarray.DNDarray = None, expand: bool = False)` +: Calculate Manhattan distance between two DNDarrays: + + .. math:: d(x,y) = \sum{|x_i-y_i|} + + Returns 2D DNDarray of size :math: `m \times n` + + Parameters + ---------- + X : DNDarray + 2D array of size :math: `m \times f` + Y : DNDarray + 2D array of size :math: `n \times f` + expand : bool + Whether to use dimension expansion (Might yield speed-up) + +`rbf(X: heat.core.dndarray.DNDarray, Y: heat.core.dndarray.DNDarray = None, sigma: float = 1.0, quadratic_expansion: bool = False) ‑> heat.core.dndarray.DNDarray` +: Calculate Gaussian distance between two DNDarrays: + + .. math:: d(x,y) = exp(-(|x-y|^2/2\sigma^2) + + Returns 2D DNDarray of size :math: `m \times n` + + Parameters + ---------- + X : DNDarray + 2D array of size :math: `m \times f` + Y : DNDarray + 2D array of size `n \times f` + sigma: float + Scaling factor for gaussian kernel + quadratic_expansion : bool + Whether to use quadratic expansion for :math:`\sqrt{(|x-y|^2)}` (Might yield speed-up) diff --git a/doc/api/heat/spatial/index.md b/doc/api/heat/spatial/index.md new file mode 100644 index 0000000000..ec36a041c2 --- /dev/null +++ b/doc/api/heat/spatial/index.md @@ -0,0 +1,8 @@ +Module heat.spatial +=================== +Import files in this folder into the heat.spatial namespace + +Sub-modules +----------- +* heat.spatial.distance +* heat.spatial.tests diff --git a/doc/api/heat/spatial/tests/index.md b/doc/api/heat/spatial/tests/index.md new file mode 100644 index 0000000000..81b59ced89 --- /dev/null +++ b/doc/api/heat/spatial/tests/index.md @@ -0,0 +1,6 @@ +Module heat.spatial.tests +========================= + +Sub-modules +----------- +* heat.spatial.tests.test_distances diff --git a/doc/api/heat/spatial/tests/test_distances.md b/doc/api/heat/spatial/tests/test_distances.md new file mode 100644 index 0000000000..017ed65b96 --- /dev/null +++ b/doc/api/heat/spatial/tests/test_distances.md @@ -0,0 +1,51 @@ +Module heat.spatial.tests.test_distances +======================================== + +Classes +------- + +`TestDistances(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_cdist(self)` + : diff --git a/doc/api/heat/tests/index.md b/doc/api/heat/tests/index.md new file mode 100644 index 0000000000..8a211142cb --- /dev/null +++ b/doc/api/heat/tests/index.md @@ -0,0 +1,6 @@ +Namespace heat.tests +==================== + +Sub-modules +----------- +* heat.tests.test_cli diff --git a/doc/api/heat/tests/test_cli.md b/doc/api/heat/tests/test_cli.md new file mode 100644 index 0000000000..39a2e9566f --- /dev/null +++ b/doc/api/heat/tests/test_cli.md @@ -0,0 +1,16 @@ +Module heat.tests.test_cli +========================== + +Classes +------- + +`TestCLI()` +: + + ### Methods + + `test_cli_help(self, mock_parse_args)` + : + + `test_platform_info(self, mock_get_device_properties, mock_get_device_name, mock_get_default_device, mock_device_count, mock_cuda_current_device, mock_mpi_lib_version, mock_platform)` + : diff --git a/doc/api/heat/utils/data/datatools.md b/doc/api/heat/utils/data/datatools.md new file mode 100644 index 0000000000..cd59e262e5 --- /dev/null +++ b/doc/api/heat/utils/data/datatools.md @@ -0,0 +1,164 @@ +Module heat.utils.data.datatools +================================ +Function and classes useful for loading data into neural networks + +Functions +--------- + +`dataset_ishuffle(dataset: heat.utils.data.datatools.Dataset | torch.utils.data.dataset.Dataset, attrs: List[list])` +: Shuffle the given attributes of a dataset across multiple processes, using non-blocking communications. + This will send half of the data to rank + 1. The data must be received by the :func:`dataset_irecv` function. + + This function will be called by the DataLoader automatically if ``dataset.ishuffle = True``. This is set either + during the definition of the class of its initialization by a given paramete. + + Parameters + ---------- + dataset : Dataset + the dataset to shuffle + attrs : List[List[str, str], ... ] + List of lists each of which contains 2 strings. The strings are the handles corresponding to the Dataset + attributes corresponding to the global data DNDarray and the local data of that array, i.e. [["htdata, "data"],] + would shuffle the htdata around and set the correct amount of data for the ``dataset.data`` attribute. For + multiple parameters multiple lists are required. I.e. [["htdata", "data"], ["httargets", "targets"]] + + Notes + ----- + ``dataset.comm`` must be defined for this function to work. + +`dataset_shuffle(dataset: heat.utils.data.datatools.Dataset | torch.utils.data.dataset.Dataset, attrs: List[list])` +: Shuffle the given attributes of a dataset across multiple processes. This will send half of the data to rank + 1. + Once the new data is received, it will be shuffled into the existing data on the process. + This function will be called by the DataLoader automatically if ``dataset.ishuffle = False``. + attrs should have the form [[torch.Tensor, DNDarray], ... i.e. [['data', 'htdata`]] assume that all of the attrs have the same dim0 shape as the local data + + Parameters + ---------- + dataset : Dataset + the dataset to shuffle + attrs : List[List[str, str], ... ] + List of lists each of which contains 2 strings. The strings are the handles corresponding to the Dataset + attributes corresponding to the global data DNDarray and the local data of that array, i.e. [["data, "htdata"],] + would shuffle the htdata around and set the correct amount of data for the ``dataset.data`` attribute. For + multiple parameters multiple lists are required. I.e. [["data", "htdata"], ["targets", "httargets"]] + + Notes + ----- + ``dataset.comm`` must be defined for this function to work. + +Classes +------- + +`DataLoader(dataset: torch.utils.data.dataset.Dataset | heat.utils.data.partial_dataset.PartialH5Dataset, batch_size: int = 1, num_workers: int = 0, collate_fn: Callable = None, pin_memory: bool = False, drop_last: bool = False, timeout: int | float = 0, worker_init_fn: Callable = None)` +: The combines either a :func:`DNDarray ` or a torch `Dataset `_ + with a sampler. This provides an iterable over the local dataset and it will shuffle the data at the end of the + iterator. If a :func:`DNDarray ` is given, then a :func:`Dataset` will be created + internally. + + Currently, this only supports only map-style datasets with single-process loading. It uses the random + batch sampler. The rest of the ``DataLoader`` functionality mentioned in `torch.utils.data.dataloader `_ applies. + + Arguments: + dataset : :func:`Dataset`, torch `Dataset `_, :func:`heat.utils.data.partial_dataset.PartialH5Dataset` + A torch dataset from which the data will be returned by the created iterator + batch_size : int, optional + How many samples per batch to load\n + Default: 1 + num_workers : int, optional + How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.\n + Default: 0 + collate_fn : callable, optional + Merges a list of samples to form a mini-batch of torch.Tensor(s). Used when using batched loading from a + map-style dataset.\n + Default: None + pin_memory : bool, optional + If ``True``, the data loader will copy torch.Tensors into CUDA pinned memory before returning them. + If your data elements are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type, + see the example below. \n + Default: False + drop_last : bool, optional + Set to ``True`` to drop the last incomplete batch, if the dataset size is not divisible by + the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then + the last batch will be smaller.\n + Default: ``False`` + timeout : int or float, optional + If positive, the timeout value for collecting a batch from workers. Should always be non-negative.\n + Default: 0 + worker_init_fn : callable, optional + If not ``None``, this will be called on each worker subprocess with the worker id + (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading.\n + default: None + + Attributes + ---------- + dataset : :func:`Dataset`, torch `Dataset `_, :func:`heat.utils.data.partial_dataset.PartialH5Dataset` + The dataset created from the local data + DataLoader : `torch.utils.data.dataloader `_ + The local DataLoader object. Used in the creation of the iterable and the length + _first_iter : bool + Flag indicating if the iterator created is the first one. If it is not, then the data will be shuffled before + the iterator is created + last_epoch : bool + Flag indicating last epoch + +`Dataset(array, transforms: List | Callable | None = None, ishuffle: bool | None = False, test_set: bool | None = False)` +: An abstract class representing a given dataset. This inherits from torch.utils.data.Dataset. + + This class is a general example for what should be done to create a Dataset. When creating a dataset all of the + standard attributes should be set, the ``__getitem__``, ``__len__``, and ``shuffle`` functions must be defined. + + - ``__getitem__`` : how an item is given to the network + - ``__len__`` : the number of data elements to be given to the network in total + - ``Shuffle()`` : how the data should be shuffled between the processes. The function shown below is for a dataset composed of only data and without targets. The function :func:`dataset_shuffle` abstracts this. For this function only the dataset and a list of attributes to shuffle are given.\n + - ``Ishuffle()`` : A non-blocking version of ``Shuffle()``, this is handled in the abstract function :func:`dataset_ishuffle`. It works similarly to :func:`dataset_shuffle`. + + As the amount of data across processes can be non-uniform, the dataset class will slice off the remaining elements + on whichever processes have more data than the others. This should only be 1 element. + The shuffle function will shuffle all of the data on the process. + + It is recommended that for ``DNDarray`` s, the split is either 0 or None + + Parameters + ---------- + array : DNDarray + DNDarray for which to great the dataset + transform : Callable + Transformation to call before a data item is returned + ishuffle : bool, optional + flag indicating whether to use non-blocking communications for shuffling the data between epochs + Note: if ``True``, the ``Ishuffle()`` function must be defined within the class\n + Default: False + + Attributes + ---------- + These are the required attributes. + + htdata : DNDarray + Full data + _cut_slice : slice + Slice to cut off the last element to get a uniform amount of data on each process + comm : MPICommunicator + Communication object used to send the data between processes + lcl_half : int + Half of the number of data elements on the process + data : torch.Tensor + The local data to be used in training + transforms : Callable + Transform to be called during the getitem function + ishuffle : bool + Flag indicating if non-blocking communications are used for shuffling the data between epochs + + ### Ancestors (in MRO) + + * torch.utils.data.dataset.Dataset + * typing.Generic + + ### Methods + + `Ishuffle(self)` + : Send half of the local data to the process ``self.comm.rank + 1`` if available, else wrap around. After + receiving the new data, shuffle the local tensor. + + `Shuffle(self)` + : Send half of the local data to the process ``self.comm.rank + 1`` if available, else wrap around. After + receiving the new data, shuffle the local tensor. diff --git a/doc/api/heat/utils/data/index.md b/doc/api/heat/utils/data/index.md new file mode 100644 index 0000000000..fe6e8511ca --- /dev/null +++ b/doc/api/heat/utils/data/index.md @@ -0,0 +1,12 @@ +Module heat.utils.data +====================== +Add data utility functions to the ht.utils.data namespace + +Sub-modules +----------- +* heat.utils.data.datatools +* heat.utils.data.matrixgallery +* heat.utils.data.mnist +* heat.utils.data.partial_dataset +* heat.utils.data.spherical +* heat.utils.data.tests diff --git a/doc/api/heat/utils/data/matrixgallery.md b/doc/api/heat/utils/data/matrixgallery.md new file mode 100644 index 0000000000..dc72c60813 --- /dev/null +++ b/doc/api/heat/utils/data/matrixgallery.md @@ -0,0 +1,75 @@ +Module heat.utils.data.matrixgallery +==================================== +Generate matrices for specific tests and functions + +Functions +--------- + +`hermitian(n: int, dtype: Type[heat.core.types.datatype] = heat.core.types.complex64, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, positive_definite: bool = False) ‑> heat.core.dndarray.DNDarray` +: Generates a random Hermitian matrix of size `(n,n)`. A Hermitian matrix is a complex square matrix that is equal to its conjugate transpose; for real data-types this routine + returns a random symmetric matrix of size `(n,n)`. + + If `positive_definite=True`, the output is given by :math:`\frac{1}{n} R R^H` with :math:`R\in\mathbb{K}^{n\times n}` having entries distributed according to the standard normal distribution. + This corresponds to sampling a random matrix according to the so-called Wishart distribution; see, e.g., [2], and also [3] for additional information regarding the asymptotic distribution of + the singular values. The output matrix will be positive definite with probability 1. + + If `positive_definite=False`, the output is :math:`R+R^H` with :math:`R` generated as above. + + Parameters + ---------- + n : int + size of the resulting square matrix + dtype: Type[datatype], optional + The desired data-type for the array, defaults to ht.complex64; only floating-point data-types allowed. + For real data-types, i.e. float32 and float64, a matrix with real entries (i.e. a symmetric one) is returned. + split: None or int, optional + The axis along which the array content is split and distributed in memory. + device: None or str or Device, optional + Specifies the device the tensor shall be allocated on, defaults globally set default device. + comm : Communication, optional + Handle to the nodes holding distributed parts or copies of this array. + positive_definite : bool, optional + If True, the resulting matrix is positive definite, defaults to False. + + References + ---------- + [1] https://en.wikipedia.org/wiki/Hermitian_matrix + [2] https://en.wikipedia.org/wiki/Wishart_distribution + [3] https://en.wikipedia.org/wiki/Marchenko%E2%80%93Pastur_distribution + +`parter(n: int, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, dtype: Type[heat.core.types.datatype] = heat.core.types.float32) ‑> heat.core.dndarray.DNDarray` +: Generates the Parter matrix, a Toeplitz matrix that has the interesting property of having its singular values cluster at + pi. The matrix has been named so by Cleve Moler in recognition of Seymour Parter's proof of this fact. + + Parameters + ---------- + n : int + size of the resulting square matrix + split: None or int, optional + The axis along which the array content is split and distributed in memory. + device: None or str or Device, optional + Specifies the device the tensor shall be allocated on, defaults globally set default device. + comm: None or Communication, optional + Handle to the nodes holding distributed tensor chunks. + dtype: Type[datatype], optional + The desired data-type for the array, defaults to ht.float64. + + References + ---------- + [1] https://blogs.mathworks.com/cleve/2019/06/24/bohemian-matrices-in-the-matlab-gallery/ + + [2] https://blogs.mathworks.com/cleve/2014/02/03/surprising-svd-square-waves-and-pi/ + + [3] Seymour V. Parter, On the distribution of the singular values of Toeplitz matrices, Linear Algebra and its + Applications 80, 1986, 115-130, http://www.sciencedirect.com/science/article/pii/0024379586902806 + +`random_known_rank(m: int, n: int, r: int, quantile_function: Callable = >, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, dtype: Type[heat.core.types.datatype] = heat.core.types.float32) ‑> Tuple[heat.core.dndarray.DNDarray, Tuple[heat.core.dndarray.DNDarray]]` +: Creates a random m x n matrix with rank r. + This routine uses :func:`random_known_singularvalues` with r singular values randomly chosen + w.r.t. the distribution with quantile function given by the input quantile_function. Default yields exponential distibution with parameter lambda=1. + Unlike in :func:`random_known_singularvalues`, here the singular values of the output are sorted in descending order. + +`random_known_singularvalues(m: int, n: int, singular_values: heat.core.dndarray.DNDarray, split: int | None = None, device: str | heat.core.devices.Device | None = None, comm: heat.core.communication.Communication | None = None, dtype: Type[heat.core.types.datatype] = heat.core.types.float32) ‑> Tuple[heat.core.dndarray.DNDarray, Tuple[heat.core.dndarray.DNDarray]]` +: Creates an m x n matrix with singular values given by the entries of the input array singular_values. + Caveat: if the entries of `singular_values` are not sorted, the singular value decomposition of A (returned as second output) is so as well. + The singular vectors are chosen randomly using :func:`random_orthogonal`. diff --git a/doc/api/heat/utils/data/mnist.md b/doc/api/heat/utils/data/mnist.md new file mode 100644 index 0000000000..63c4ca88e1 --- /dev/null +++ b/doc/api/heat/utils/data/mnist.md @@ -0,0 +1,73 @@ +Module heat.utils.data.mnist +============================ +File for the MNIST dataset definition in heat + +Classes +------- + +`MNISTDataset(root: str, train: bool = True, transform: Callable = None, target_transform: Callable = None, download: bool = True, split: int = 0, ishuffle: bool = False, test_set: bool = False)` +: Dataset wrapper for `torchvision.datasets.MNIST `_. + This implements all of the required functions mentioned in :class:`heat.utils.data.Dataset`. The ``__getitem__`` and ``__len__`` functions are inherited from + `torchvision.datasets.MNIST `_. + + Parameters + ---------- + root : str + Directory containing the MNIST dataset + train : bool, optional + If the data is the training dataset or not, default is True + transform : Callable, optional + Transform to be applied to the data dataset in the ``__getitem__`` function, default is ``None`` + target_transform : Callable, optional + Transform to be applied to the target dataset in the ``__getitem__`` function, default is ``None`` + download : bool, optional + If the data does not exist in the directory, download it if True (default) + split : int, optional + On which access to split the data when it is loaded into a ``DNDarray`` + ishuffle : bool, optional + Flag indicating whether to use non-blocking communications for shuffling the data between epochs + Note: if True, the ``Ishuffle()`` function must be defined within the class + Default: ``False`` + test_set : bool, optional + If this dataset is the testing set then keep all of the data local + Default: ``False`` + + Attributes + ---------- + htdata : DNDarray + full data + httargets : DNDarray + full target data + comm : communication.MPICommunicator + heat communicator for sending data between processes + _cut_slice : slice + slice to remove the last element if all are not equal in length + lcl_half : int + integer value of half of the data on the process + data : torch.Tensor + the local data on a process + targets : torch.Tensor + the local targets on a process + ishuffle : bool + flag indicating if non-blocking communications are used for shuffling the data between epochs + test_set : bool + if this dataset is the testing set then keep all of the data local + + Notes + ----- + For other attributes see `torchvision.datasets.MNIST `_. + + ### Ancestors (in MRO) + + * torchvision.datasets.mnist.MNIST + * torchvision.datasets.vision.VisionDataset + * torch.utils.data.dataset.Dataset + * typing.Generic + + ### Methods + + `Ishuffle(self)` + : Uses the :func:`datatools.dataset_ishuffle` function to shuffle the data between the processes + + `Shuffle(self)` + : Uses the :func:`datatools.dataset_shuffle` function to shuffle the data between the processes diff --git a/doc/api/heat/utils/data/partial_dataset.md b/doc/api/heat/utils/data/partial_dataset.md new file mode 100644 index 0000000000..5b721ffa7e --- /dev/null +++ b/doc/api/heat/utils/data/partial_dataset.md @@ -0,0 +1,76 @@ +Module heat.utils.data.partial_dataset +====================================== +Tool for using a dataset which will not fit in memory with neural networks + +Classes +------- + +`PartialH5DataLoaderIter(loader)` +: Iterator to be used with :func:'PartialH5Dataset'. It closely mirrors the standard torch iterator while loading + new data to replace the loaded batches automatically. It also pre-fetches the batches and begins their + preparation, collation, and device setting in the background. + +`PartialH5Dataset(file: str, comm: heat.core.communication.MPICommunication = , dataset_names: str | List[str] = 'data', transforms: List[Callable] = None, use_gpu: bool = True, validate_set: bool = False, initial_load: int = 7000, load_length: int = 1000)` +: Create a Dataset object for a dataset which loads portions of data from an HDF5 file. Very similar to + :func:``. This will create 2 threads, one for loading the data from the target file, + and one for converting items before being passed to the network. The conversion is done by the iterator. + A portion of the data of length ``initial_load`` is loaded upon initialization, the rest of the data is loaded + after the loaded data is returned by :func:`PartialH5DataLoaderIter`. This iterator will be used by the HeAT + :func:`heat.utils.data.datatools.DataLoader` automatically with this type of dataset. + + Notes + ----- + H5 datasets require the GIL to load data. This can be a bottleneck if data needs to be loaded multiple times (e.g. + the case for using this dataset). It is recommended to find another way to preprocess the data and avoid using + H5 files for this reason. + + Parameters + ---------- + file: str + H5 file to use + comm: MPICommunication + Global MPI communicator generated by HeAT + dataset_names: Union[str, List[str]], optional + Name/s of dataset/s to load from ``file``. If a string is given, it will be the only dataset loaded. + Default is "data". + transforms : List[Callable], optional + Transforms to apply to the data after it is gotten from the loaded data before it is used by the network. + This should be a list of Callable torch functions for each item returned by the ``__getitem__`` function + of the individual dataset. If a list element is ``None`` then no transform will be applied to the + corresponding element returned by ``__getitem__``. I.e. if ``__getitem__`` returns an image and a label + then the list would look like this: ``transforms = [image_transforms, None]``. If this is ``None``, no + transforms will be applied to any elements. Default is ``None``. + use_gpu : bool, optional + Use GPUs if available. Defaults to True. + validate_set : bool, optional + Load the entire dataset onto each node upon initialization and skip loaded in iterator + This is typically the case needed for validation sets when the network should be tested against the whole + dataset. Default is False. + initial_load : int, optional + How many elements to load from the file in the 0th dimension. Default is 7000 elements + load_length : int, optional + How many elements to load from the file in the iterator. Default is 1000 elements + + ### Ancestors (in MRO) + + * torch.utils.data.dataset.Dataset + * typing.Generic + + ### Methods + + `Ishuffle(self)` + : Send half of the local data to the process ``self.comm.rank + 1`` if available, else wrap around. After + receiving the new data, shuffle the local tensor. + + Not implemented for partial dataset + + `Shuffle(self)` + : Send half of the local data to the process ``self.comm.rank + 1`` if available, else wrap around. After + receiving the new data, shuffle the local tensor. + + Not implemented for partial dataset + + `thread_replace_converted_batches(self)` + : Replace the elements of the dataset with newly loaded elements. :func:'PartialH5DataLoaderIter' will + put the used indices in the ``used_indices`` parameter. This object is reset to an empty list after + these elements are overwritten with new data. diff --git a/doc/api/heat/utils/data/spherical.md b/doc/api/heat/utils/data/spherical.md new file mode 100644 index 0000000000..fb3efae90e --- /dev/null +++ b/doc/api/heat/utils/data/spherical.md @@ -0,0 +1,48 @@ +Module heat.utils.data.spherical +================================ +Create a sperical dataset. + +Functions +--------- + +`create_clusters(n_samples, n_features, n_clusters, cluster_mean, cluster_std, cluster_weight=None, device=None)` +: Creates a DNDarray of shape (n_samples, n_features), split=0, and dtype=ht.float32, that is balanced (i.e. roughly same size of samples on each process). + The data set consists of n_clusters clusters, each of which is sampled from a multivariate normal distribution with mean cluster_mean[k,:] and covariance matrix cluster_std[k,:,:]. + The clusters are of the same size (quantitatively) and distributed evenly over the processes, unless cluster_weight is specified. + + Parameters + ---------- + n_samples: int + Number of overall samples + n_features: int + Number of features + n_clusters: int + Number of clusters + cluster_mean: torch.Tensor of shape (n_clusters, n_features) + featurewise mean (center) of each cluster; of course not the true mean, but rather the mean according to which the elements of the cluster are sampled. + cluster_std: torch.Tensor of shape (n_clusters, n_features, n_features), or (n_clusters,) + featurewise standard deviation of each cluster from the mean value; of course not the true std, but rather the std according to which the elements of the cluster are sampled. + If shape is (n_clusters,), std is assumed to be the same in each direction for each cluster + cluster_weight: torch.Tensor of shape (n_clusters,), optional + On each process, cluster_weight is assumed to be a torch.Tensor whose entries add up to 1. The i-th entry of cluster_weight on process p specified which amount of the samples on process p + is sampled according to the distribution of cluster i. Thus, this parameter allows to distribute the n_cluster clusters unevenly over the processes. + If None, each cluster is distributed evenly over all processes. + device: Optional[str] = None, + The device on which the data is stored. If None, the default device is used. + +`create_spherical_dataset(num_samples_cluster, radius=1.0, offset=4.0, dtype=heat.core.types.float32, random_state=1)` +: Creates k=4 sperical clusters in 3D space along the space-diagonal + + Parameters + ---------- + num_samples_cluster: int + Number of samples per cluster. Each process will create n // MPI_WORLD.size elements for each cluster + radius: float + Radius of the sphere + offset: float + Shift of the clusters along the axes. The 4 clusters will be positioned centered around c1=(offset, offset,offset), + c2=(2*offset,2*offset,2*offset), c3=(-offset, -offset, -offset) and c4=(2*offset, -2*offset, -2*offset) + dtype: ht.datatype + Dataset dtype + random_state: int + seed of the torch random number generator diff --git a/doc/api/heat/utils/data/tests/index.md b/doc/api/heat/utils/data/tests/index.md new file mode 100644 index 0000000000..ec53e0cbf1 --- /dev/null +++ b/doc/api/heat/utils/data/tests/index.md @@ -0,0 +1,8 @@ +Module heat.utils.data.tests +============================ + +Sub-modules +----------- +* heat.utils.data.tests.test_matrixgallery +* heat.utils.data.tests.test_partial_dataset +* heat.utils.data.tests.test_spherical diff --git a/doc/api/heat/utils/data/tests/test_matrixgallery.md b/doc/api/heat/utils/data/tests/test_matrixgallery.md new file mode 100644 index 0000000000..4968fdbe9c --- /dev/null +++ b/doc/api/heat/utils/data/tests/test_matrixgallery.md @@ -0,0 +1,63 @@ +Module heat.utils.data.tests.test_matrixgallery +=============================================== + +Classes +------- + +`TestMatrixgallery(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_hermitian(self)` + : + + `test_parter(self)` + : + + `test_random_known_rank(self)` + : + + `test_random_known_singularvalues(self)` + : + + `test_random_orthogonal(self)` + : diff --git a/doc/api/heat/utils/data/tests/test_partial_dataset.md b/doc/api/heat/utils/data/tests/test_partial_dataset.md new file mode 100644 index 0000000000..2719ec29fc --- /dev/null +++ b/doc/api/heat/utils/data/tests/test_partial_dataset.md @@ -0,0 +1,50 @@ +Module heat.utils.data.tests.test_partial_dataset +================================================= + +Classes +------- + +`TestPartialDataset(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * unittest.case.TestCase + + ### Methods + + `test_partial_h5_dataset(self)` + : diff --git a/doc/api/heat/utils/data/tests/test_spherical.md b/doc/api/heat/utils/data/tests/test_spherical.md new file mode 100644 index 0000000000..87ee491a2c --- /dev/null +++ b/doc/api/heat/utils/data/tests/test_spherical.md @@ -0,0 +1,54 @@ +Module heat.utils.data.tests.test_spherical +=========================================== + +Classes +------- + +`TestCreateClusters(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * heat.core.tests.test_suites.basic_test.TestCase + * unittest.case.TestCase + + ### Methods + + `test_create_cluster(self)` + : + + `test_if_errors_are_catched(self)` + : diff --git a/doc/api/heat/utils/index.md b/doc/api/heat/utils/index.md new file mode 100644 index 0000000000..2fe9bca518 --- /dev/null +++ b/doc/api/heat/utils/index.md @@ -0,0 +1,9 @@ +Module heat.utils +================= +Add the utility functions to the ht.utils namespace + +Sub-modules +----------- +* heat.utils.data +* heat.utils.tests +* heat.utils.vision_transforms diff --git a/doc/api/heat/utils/tests/index.md b/doc/api/heat/utils/tests/index.md new file mode 100644 index 0000000000..abb29320d0 --- /dev/null +++ b/doc/api/heat/utils/tests/index.md @@ -0,0 +1,6 @@ +Module heat.utils.tests +======================= + +Sub-modules +----------- +* heat.utils.tests.test_vision_transforms diff --git a/doc/api/heat/utils/tests/test_vision_transforms.md b/doc/api/heat/utils/tests/test_vision_transforms.md new file mode 100644 index 0000000000..d781d4bce1 --- /dev/null +++ b/doc/api/heat/utils/tests/test_vision_transforms.md @@ -0,0 +1,50 @@ +Module heat.utils.tests.test_vision_transforms +============================================== + +Classes +------- + +`TestVisionTransforms(methodName='runTest')` +: A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + + Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + + ### Ancestors (in MRO) + + * unittest.case.TestCase + + ### Methods + + `test_vision_transforms_getattr(self)` + : diff --git a/doc/api/heat/utils/vision_transforms.md b/doc/api/heat/utils/vision_transforms.md new file mode 100644 index 0000000000..dd1233c3f3 --- /dev/null +++ b/doc/api/heat/utils/vision_transforms.md @@ -0,0 +1,3 @@ +Module heat.utils.vision_transforms +=================================== +File with the available transforms for images diff --git a/doc/case_studies.md b/doc/case_studies.md new file mode 100644 index 0000000000..8343346336 --- /dev/null +++ b/doc/case_studies.md @@ -0,0 +1,25 @@ +
+
+ +
+
TerrSysMP
+

The IBG‑3 at Research Centre Juelich uses Heat to postprocess TerrSysMP hydrological flow simulations.

+
+
+ +
+ +
+
Rocket Science
+

The German Aerospace Center (DLR) uses Heat to analyze combustion phases of rockets in high‑speed camera videos.

+
+
+ +
+ +
+
Protein Simulations
+

At Karlsruhe Institute of Technology researchers identify protein folding states in MD‑simulations with Heat.

+
+
+
diff --git a/doc/documentation_howto.md b/doc/documentation_howto.md new file mode 100644 index 0000000000..8170ce6da8 --- /dev/null +++ b/doc/documentation_howto.md @@ -0,0 +1,141 @@ +# Writing Heat Documentation + +Heat’s documentation is now built entirely with **MkDocs** and the Material theme, with API reference pages generated from the source code. This guide explains how to build the docs locally and how to write consistent, high‑quality docstrings. + +## Prerequisites + +The documentation stack consists of: + +- MkDocs with the Material theme for the static site. +- pdoc for auto‑generated API reference pages under `doc/api/heat/...`. +- Standard Markdown (plus a few MkDocs/Material extensions such as admonitions and fenced code blocks). + +Install the documentation dependencies into your virtual environment: + +```bash +pip install -e . +pip install -r doc/requirements.txt +``` + +Typical requirements include `mkdocs`, `mkdocs-material`, `mkdocstrings-python`, `mkdocs-git-revision-date-localized-plugin`, and `pdoc`. + +All MkDocs configuration lives in `mkdocs.yml`, and the Markdown sources are under `doc/`. + +## Building the documentation + +There are two steps: regenerate the API reference and build the MkDocs site. + + I. From the project root, regenerate the API docs: + + ```bash + PYTHONPATH=. pdoc --skip-errors --force --output-dir doc/api heat + ``` + + This recreates `doc/api/heat/...` and produces one Markdown page per module, class, and function, including subpackages and tests. + + II. Build or serve the MkDocs site: + + ```bash + mkdocs serve # local preview at http://127.0.0.1:8000 + # or + mkdocs build # static site output in site/ + ``` + +MkDocs uses the navigation specified in `mkdocs.yml` to organize tutorials, guides, and the API Reference sidebar, fully replacing the previous Sphinx + autoapi setup. + +- The API navigation is maintained manually in `mkdocs.yml`. When new modules are added or removed, update the `API Reference` section so the sidebar matches the generated `doc/api/heat/...` pages. + +## Docstring guidelines + +Docstrings continue to follow the **NumPy** style, with reStructuredText‑like section headings (Parameters, Returns, Notes, Examples, …), but the surrounding site is now pure Markdown. pdoc renders these docstrings into the API pages, so clarity and consistency matter. + +### Docstring content + +- Write clear, concise descriptions that explain behavior and intent, not just types. +- Use type hints for all parameters and return types whenever possible. +- Cross‑reference major Heat classes (`DNDarray`, `Communication`, `Device`, `data_type`) by importing them in the module and referring to them by name in the text (pdoc will link them where possible). +- In narrative text, refer to Heat arrays as “array” and reserve “tensor” for PyTorch tensors. +- Use code formatting for function names, parameters, literals, and exceptions, for example `add`, `dtype`, `None`, `True`, `NotImplementedError`. +- Use math formatting (LaTeX inside `\( … \)` or `\[ … \]`) for formulas in docstrings or Markdown pages. + +### Docstring format + +A standard function should look like this: + +```python +def foo(x: DNDarray, y: str, k: int = 0) -> DNDarray: + """ + One-line summary of what the function does. + + A longer description can explain details, edge cases, or provide a short narrative + about how the function should be used. + + Parameters + ---------- + x : DNDarray + Description of x. + y : str + Description of y. Can be either 'a', 'b' or 'c'. + k : int, optional + Description of k. Default is 0. + + Notes + ----- + Additional background, algorithmic details, or caveats. + + References + ---------- + [1] Webpage or paper reference. + [2] Additional literature as needed. + + Warnings + -------- + Important usage warnings or behavioral quirks. + + Raises + ------ + ValueError + Describe when this is raised. + RuntimeError + Describe when this is raised. + + See Also + -------- + other_function : Brief explanation of the relationship. + + Examples + -------- + >>> import heat as ht + >>> T = ht.array([[1, 2], [3, 4]], dtype=ht.float32) + >>> ht.add(T, 2) + DNDarray([[3., 4.], + [5., 6.]], dtype=ht.float32, device=cpu:0) + """ +``` + +For classes, place the docstring directly under the `class` definition rather than in `__init__`, so that initialization parameters and attributes are captured correctly. + +### Parameter and example conventions + +- Define default values in the **Parameters** section (for example, “Default is 0”) rather than in separate notes. +- Shape information goes at the end of the parameter description, e.g. `Shape = (x, y, ...)`. +- For classes, describe initialization parameters in an **Attributes** section. +- When listing alternative types, separate them with `or`, not commas (for example, `int or None`). +- For complex type hints (`Union`, `List`, `Tuple`, etc.), follow the standard `typing` module conventions. + +Examples: + +- Group related examples into a single **Examples** block; use blank lines only when there is a clear distinction between examples. +- Do not add a colon after the **Examples** heading. +- Avoid inline comments inside doctest blocks; move explanatory text into **Notes** instead. + +## Writing Markdown pages + +All narrative documentation (tutorials, guides, case studies) is now written in Markdown under `doc/`. + +- Use standard Markdown headings (`#`, `##`, `###`) and fenced code blocks (```python). +- Prefer Markdown links for internal navigation, for example `[API Reference](../api/heat/core/arithmetics.md)`, with correct relative paths from the current page. +- HTML is allowed for advanced layout (tooltips, custom cards), but ensure all tags are properly closed and paths remain relative so they work on Read the Docs. +- Images should use repository‑relative paths under `doc/`, not raw GitHub URLs, to keep builds portable. + +By keeping docstrings NumPy‑style and Markdown pages consistent with these guidelines, Heat’s MkDocs site remains readable, maintainable, and fully synchronized with the source code and API surface. diff --git a/doc/getting_started.md b/doc/getting_started.md new file mode 100644 index 0000000000..0aa44edd5c --- /dev/null +++ b/doc/getting_started.md @@ -0,0 +1,101 @@ +# Getting Started + +Heat is a Python package for accelerated and distributed tensor computations. Internally, it is based on [PyTorch](https://pytorch.org/). Consequently, all operating systems that support Python and PyTorch also support a Heat installation. Currently, this list contains at least Linux, MacOS and Windows. However, most of our development is done under Linux and interoperability should therefore be optimal. + +## Prerequisites + +### Python + +Heat requires Python 3.7 or greater. You can check your Python by running: + +```bash +python3 --version +``` + +If you do not have a recent installation on you system, you may want to upgrade it. + +[Ubuntu](https://ubuntu.com/)/[Debian](https://www.debian.org/)/[Mint](https://www.linuxmint.com/) + +```bash +sudo apt-get update && sudo apt-get install python3 +``` + +[Fedora](https://getfedora.org/)/[CentOS](https://www.centos.org/)/[RHEL](https://www.redhat.com/de/technologies/linux-platforms/enterprise-linux) + +```bash +sudo dnf update python3 +``` + +If you have no administrator privileges on your system, because you are working on a cluster for example, make sure to check its *user guide*, the module system (`module spider python`) or get in touch with the administrators. + +### Optional Dependencies + +You can accelerate computations with Heat in different ways. For GPU acceleration ensure that you have a [CUDA](https://developer.nvidia.com/cuda-zone) installation on your system. Distributed computations require an MPI stack on your computer. We recommend [MVAPICH](https://mvapich.cse.ohio-state.edu/) or [OpenMPI](https://www.open-mpi.org/). Finally, for parallel data I/O, Heat offers interface to [HDF5](https://www.hdfgroup.org/solutions/hdf5/) and [NetCDF](https://www.unidata.ucar.edu/software/netcdf/). You can obtain these packages using your operating system's package manager. + +## Installation + +### Virtual Environments + +We highly recommend to use [virtual environments (venv)](https://docs.python.org/3/tutorial/venv.html) for managing your Python packages. A virtual environment is a self-contained directory tree for a particular Python version and its packages. It allows you not only to install packages without administrator privileges, install [pip](https://pypi.org/project/pip/), Python's package manager, but also to have multiple package environments with different package versions in parallel. + +You can find the complete manual for venv in the [Python documentation](https://docs.python.org/3/tutorial/venv.html). Below is a small code snippet that creates a new virtual environment in your home directory (`~/.virtualenvs/heat`). The subsequent command enables the environment. You can access the Python interpreter by typing `python` and PIP with `pip`. + +```bash +python3 -m venv ~/.virtualenvs/heatenv +source ~/.virtualenvs/heatenv/bin/activate +``` + +You can deactivate a virtual environment by executing: + +```bash +deactivate +``` + +### pip + +Official Heat releases are made available on the [Python Package Index (PyPI)](https://pypi.org/). You obtain the latest version by running: + +```bash +pip install heat +``` + +Optionally, you can enable and install HDF5 and/or NetCDF support by adding the respective extra requirements as follows. + +```bash +pip install 'heat[hdf5,netcdf]' +``` + +### Verification + +To ensure that Heat was installed correctly, you can run this tiny code snippet that creates a vector with 10 entries. + +```bash +python -c "import heat as ht; print(ht.arange(10))" +``` + +You should see the following output + +```bash +DNDarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=ht.int32, device=cpu:0, split=None) +``` + +## Building From Source + +For most users a Heat installation from pip will be the most simple. However, if you want to test out the latest features or even want to contribute to Heat, you will need to build from source. At first, clone our repository by running: + +```bash +git clone https://github.com/helmholtz-analytics/heat.git +``` + +Afterwards, change to the cloned source code directory and run the setup scripts. + +```bash +$ cd heat +$ pip install -e '.[hdf5, netcdf]' +``` + +## Support Channels + +We use [StackOverflow](https://stackoverflow.com/tags/pyheat/) as a forum for questions about Heat. +If you do not find an answer to your question, then please ask a new question there and be sure to +tag it with "pyheat". diff --git a/doc/source/_static/images/GSoC-Horizontal.svg b/doc/images/GSoC-Horizontal.svg similarity index 100% rename from doc/source/_static/images/GSoC-Horizontal.svg rename to doc/images/GSoC-Horizontal.svg diff --git a/doc/source/_static/images/bsp.svg b/doc/images/bsp.svg similarity index 100% rename from doc/source/_static/images/bsp.svg rename to doc/images/bsp.svg diff --git a/doc/source/_static/images/clustering.png b/doc/images/clustering.png similarity index 100% rename from doc/source/_static/images/clustering.png rename to doc/images/clustering.png diff --git a/doc/source/_static/images/clustering_kmeans.png b/doc/images/clustering_kmeans.png similarity index 100% rename from doc/source/_static/images/clustering_kmeans.png rename to doc/images/clustering_kmeans.png diff --git a/doc/source/_static/images/data.png b/doc/images/data.png similarity index 100% rename from doc/source/_static/images/data.png rename to doc/images/data.png diff --git a/doc/source/_static/images/dlr_logo.svg b/doc/images/dlr_logo.svg similarity index 100% rename from doc/source/_static/images/dlr_logo.svg rename to doc/images/dlr_logo.svg diff --git a/doc/source/_static/images/fzj_logo.svg b/doc/images/fzj_logo.svg similarity index 100% rename from doc/source/_static/images/fzj_logo.svg rename to doc/images/fzj_logo.svg diff --git a/doc/source/_static/images/hSVD_bench_rank5.png b/doc/images/hSVD_bench_rank5.png similarity index 100% rename from doc/source/_static/images/hSVD_bench_rank5.png rename to doc/images/hSVD_bench_rank5.png diff --git a/doc/source/_static/images/hSVD_bench_rank50.png b/doc/images/hSVD_bench_rank50.png similarity index 100% rename from doc/source/_static/images/hSVD_bench_rank50.png rename to doc/images/hSVD_bench_rank50.png diff --git a/doc/source/_static/images/hSVD_bench_rank500.png b/doc/images/hSVD_bench_rank500.png similarity index 100% rename from doc/source/_static/images/hSVD_bench_rank500.png rename to doc/images/hSVD_bench_rank500.png diff --git a/doc/source/_static/images/heat_split_array.png b/doc/images/heat_split_array.png similarity index 100% rename from doc/source/_static/images/heat_split_array.png rename to doc/images/heat_split_array.png diff --git a/doc/source/_static/images/heat_split_array.svg b/doc/images/heat_split_array.svg similarity index 100% rename from doc/source/_static/images/heat_split_array.svg rename to doc/images/heat_split_array.svg diff --git a/doc/source/_static/images/heatvsdask_strong_smalldata_without.png b/doc/images/heatvsdask_strong_smalldata_without.png similarity index 100% rename from doc/source/_static/images/heatvsdask_strong_smalldata_without.png rename to doc/images/heatvsdask_strong_smalldata_without.png diff --git a/doc/source/_static/images/heatvsdask_weak_smalldata_without.png b/doc/images/heatvsdask_weak_smalldata_without.png similarity index 100% rename from doc/source/_static/images/heatvsdask_weak_smalldata_without.png rename to doc/images/heatvsdask_weak_smalldata_without.png diff --git a/doc/source/_static/images/helmholtz_logo.svg b/doc/images/helmholtz_logo.svg similarity index 100% rename from doc/source/_static/images/helmholtz_logo.svg rename to doc/images/helmholtz_logo.svg diff --git a/doc/source/_static/images/jsc_logo.png b/doc/images/jsc_logo.png similarity index 100% rename from doc/source/_static/images/jsc_logo.png rename to doc/images/jsc_logo.png diff --git a/doc/source/_static/images/jupyter.png b/doc/images/jupyter.png similarity index 100% rename from doc/source/_static/images/jupyter.png rename to doc/images/jupyter.png diff --git a/doc/source/_static/images/kit_logo.svg b/doc/images/kit_logo.svg similarity index 100% rename from doc/source/_static/images/kit_logo.svg rename to doc/images/kit_logo.svg diff --git a/doc/source/_static/images/local_laptop.png b/doc/images/local_laptop.png similarity index 100% rename from doc/source/_static/images/local_laptop.png rename to doc/images/local_laptop.png diff --git a/doc/source/_static/images/logo.png b/doc/images/logo.png similarity index 100% rename from doc/source/_static/images/logo.png rename to doc/images/logo.png diff --git a/doc/source/_static/images/logo_emblem.png b/doc/images/logo_emblem.png similarity index 100% rename from doc/source/_static/images/logo_emblem.png rename to doc/images/logo_emblem.png diff --git a/doc/source/_static/images/logo_emblem.svg b/doc/images/logo_emblem.svg similarity index 100% rename from doc/source/_static/images/logo_emblem.svg rename to doc/images/logo_emblem.svg diff --git a/doc/source/_static/images/logo_white.png b/doc/images/logo_white.png similarity index 100% rename from doc/source/_static/images/logo_white.png rename to doc/images/logo_white.png diff --git a/doc/source/_static/images/logo_white.svg b/doc/images/logo_white.svg similarity index 100% rename from doc/source/_static/images/logo_white.svg rename to doc/images/logo_white.svg diff --git a/doc/source/_static/images/nhr_verein_logo.jpg b/doc/images/nhr_verein_logo.jpg similarity index 100% rename from doc/source/_static/images/nhr_verein_logo.jpg rename to doc/images/nhr_verein_logo.jpg diff --git a/doc/source/_static/images/perun_logo.svg b/doc/images/perun_logo.svg similarity index 100% rename from doc/source/_static/images/perun_logo.svg rename to doc/images/perun_logo.svg diff --git a/doc/source/_static/images/split_array.png b/doc/images/split_array.png similarity index 100% rename from doc/source/_static/images/split_array.png rename to doc/images/split_array.png diff --git a/doc/source/_static/images/split_array.svg b/doc/images/split_array.svg similarity index 100% rename from doc/source/_static/images/split_array.svg rename to doc/images/split_array.svg diff --git a/doc/source/_static/images/tutorial_clustering.svg b/doc/images/tutorial_clustering.svg similarity index 100% rename from doc/source/_static/images/tutorial_clustering.svg rename to doc/images/tutorial_clustering.svg diff --git a/doc/source/_static/images/tutorial_dpnn.svg b/doc/images/tutorial_dpnn.svg similarity index 100% rename from doc/source/_static/images/tutorial_dpnn.svg rename to doc/images/tutorial_dpnn.svg diff --git a/doc/source/_static/images/tutorial_logo.svg b/doc/images/tutorial_logo.svg similarity index 100% rename from doc/source/_static/images/tutorial_logo.svg rename to doc/images/tutorial_logo.svg diff --git a/doc/source/_static/images/tutorial_split_dndarray.svg b/doc/images/tutorial_split_dndarray.svg similarity index 100% rename from doc/source/_static/images/tutorial_split_dndarray.svg rename to doc/images/tutorial_split_dndarray.svg diff --git a/doc/source/_static/images/weak_scaling_gpu_terrabyte.png b/doc/images/weak_scaling_gpu_terrabyte.png similarity index 100% rename from doc/source/_static/images/weak_scaling_gpu_terrabyte.png rename to doc/images/weak_scaling_gpu_terrabyte.png diff --git a/doc/index.md b/doc/index.md new file mode 100644 index 0000000000..4d06c5867e --- /dev/null +++ b/doc/index.md @@ -0,0 +1,23 @@ +# Heat — The Helmholtz Analytics Toolkit + +**Release:** {{ release }} + +Heat is a distributed tensor framework for high‑performance data analytics. + + +## Quick links + +> - [Introduction](introduction.md) +> - [Getting Started](getting_started.md) +> - [Heat Tutorials](tutorials/tutorial.md) +> - [Case Studies](case_studies.md) +> - [Documentation How-To](documentation_howto.md) + + +Explore the [API Reference](./api/heat/core/index.md), once your environment can import the package. + +--- + +Also visit us on [GitHub](https://github.com/helmholtz-analytics/heat) for more examples, documentation, code, and contributions. + +--- diff --git a/doc/source/introduction.rst b/doc/introduction.md similarity index 67% rename from doc/source/introduction.rst rename to doc/introduction.md index e318f1a973..c2d05760c9 100644 --- a/doc/source/introduction.rst +++ b/doc/introduction.md @@ -1,8 +1,6 @@ -Introduction -============ +# Introduction -Goal ----- +## Goal The goal of Heat is to fill the gap between machine learning libraries that have a strong focus on exploiting GPUs for performance, and traditional, distributed @@ -12,9 +10,8 @@ distributed tensor library with machine learning methods based on it. Among other things, the implementation will allow us to tackle use cases that would otherwise exceed memory limits of a single node. -Features --------- +## Features - * high-performance n-dimensional tensors - * CPU, GPU and distributed computation using MPI - * powerful machine learning methods using above mentioned tensors +> - High-performance n-dimensional tensors +> - CPU, GPU and distributed computation using MPI +> - Powerful machine learning methods using above mentioned tensors diff --git a/doc/make.bat b/doc/make.bat deleted file mode 100644 index 747ffb7b30..0000000000 --- a/doc/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..e822837fbd --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,8 @@ +mkdocs +mkdocs-material +mkdocstrings +mkdocstrings-python +mkdocs-git-revision-date-localized-plugin +mkdocs-macros-plugin +mkdocs-jupyter>=0.24.6 +pdoc3 diff --git a/doc/source/_static/.DS_Store b/doc/source/_static/.DS_Store deleted file mode 100644 index 887c1a0bea..0000000000 Binary files a/doc/source/_static/.DS_Store and /dev/null differ diff --git a/doc/source/_static/css/custom.css b/doc/source/_static/css/custom.css deleted file mode 100644 index 553e7e5ea3..0000000000 --- a/doc/source/_static/css/custom.css +++ /dev/null @@ -1,7477 +0,0 @@ -/* sphinx_rtd_theme version 0.4.3 | MIT license */ - - -/* Built 20190212 16:02 */ -:root { - --blue: #007bff; - --indigo: #6610f2; - --purple: #6f42c1; - --pink: #e83e8c; - --red: #dc3545; - --orange: #fd7e14; - --yellow: #ffc107; - --green: #28a745; - --teal: #20c997; - --cyan: #17a2b8; - --white: #fff; - --gray: #6c757d; - --gray-dark: #343a40; - --primary: #007bff; - --secondary: #6c757d; - --success: #28a745; - --info: #17a2b8; - --warning: #ffc107; - --danger: #dc3545; - --light: #f8f9fa; - --dark: #343a40; - --hblue: #005aa0; - --hgreen: #8CB423; - --hgrey: #5a696e; - --hgrey-light: #e1e4e5; - --hred: #D23264; - --horange: #f0781e; - --horange-light: #fbe4d2; - --breakpoint-xs: 0; - --breakpoint-sm: 576px; - --breakpoint-md: 768px; - --breakpoint-lg: 992px; - --breakpoint-xl: 1200px; - --font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; - --font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; -} - -* { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box -} - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -nav, -section { - display: block -} - -audio, -canvas, -video { - display: inline-block; - *display: inline; - *zoom: 1 -} - -audio:not([controls]) { - display: none -} - -[hidden] { - display: none -} - -* { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box -} - -html { - font-size: 100%; - -webkit-text-size-adjust: 100%; - -ms-text-size-adjust: 100% -} - -body { - margin: 0 -} - -a:hover, -a:active { - outline: 0 -} - -abbr[title] { - border-bottom: 1px dotted -} - -b, -strong { - font-weight: bold -} - -blockquote { - margin: 0 -} - -dfn { - font-style: italic -} - -ins { - background: #ff9; - color: #000; - text-decoration: none -} - -mark { - background: #ff0; - color: #000; - font-style: italic; - font-weight: bold -} - -pre, -code, -.rst-content tt, -.rst-content code, -kbd, -samp { - font-family: monospace, serif; - _font-family: "courier new", monospace; - font-size: 1em -} - -pre { - white-space: pre -} - -q { - quotes: none -} - -q:before, -q:after { - content: ""; - content: none -} - -small { - font-size: 85% -} - -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline -} - -sup { - top: -0.5em -} - -sub { - bottom: -0.25em -} - -ul, -ol, -dl { - margin: 0; - padding: 0; - list-style: none; - list-style-image: none -} - -li { - list-style: none -} - -dd { - margin: 0 -} - -img { - border: 0; - -ms-interpolation-mode: bicubic; - vertical-align: middle; - max-width: 100% -} - -svg:not(:root) { - overflow: hidden -} - -figure { - margin: 0 -} - -form { - margin: 0 -} - -fieldset { - border: 0; - margin: 0; - padding: 0 -} - -label { - cursor: pointer -} - -legend { - border: 0; - *margin-left: -7px; - padding: 0; - white-space: normal -} - -button, -input, -select, -textarea { - font-size: 100%; - margin: 0; - vertical-align: baseline; - *vertical-align: middle -} - -button, -input { - line-height: normal -} - -button, -input[type="button"], -input[type="reset"], -input[type="submit"] { - cursor: pointer; - -webkit-appearance: button; - *overflow: visible -} - -button[disabled], -input[disabled] { - cursor: default -} - -input[type="checkbox"], -input[type="radio"] { - box-sizing: border-box; - padding: 0; - *width: 13px; - *height: 13px -} - -input[type="search"] { - -webkit-appearance: textfield; - -moz-box-sizing: content-box; - -webkit-box-sizing: content-box; - box-sizing: content-box -} - -input[type="search"]::-webkit-search-decoration, -input[type="search"]::-webkit-search-cancel-button { - -webkit-appearance: none -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - border: 0; - padding: 0 -} - -textarea { - overflow: auto; - vertical-align: top; - resize: vertical -} - -table { - border-collapse: collapse; - border-spacing: 0 -} - -td { - vertical-align: top -} - -.chromeframe { - margin: .2em 0; - background: #ccc; - color: #000; - padding: .2em 0 -} - -.ir { - display: block; - border: 0; - text-indent: -999em; - overflow: hidden; - background-color: transparent; - background-repeat: no-repeat; - text-align: left; - direction: ltr; - *line-height: 0 -} - -.ir br { - display: none -} - -.hidden { - display: none !important; - visibility: hidden -} - -.visuallyhidden { - border: 0; - clip: rect(0 0 0 0); - height: 1px; - margin: -1px; - overflow: hidden; - padding: 0; - position: absolute; - width: 1px -} - -.visuallyhidden.focusable:active, -.visuallyhidden.focusable:focus { - clip: auto; - height: auto; - margin: 0; - overflow: visible; - position: static; - width: auto -} - -.invisible { - visibility: hidden -} - -.relative { - position: relative -} - -big, -small { - font-size: 100% -} - -@media print { - html, - body, - section { - background: none !important - } - * { - box-shadow: none !important; - text-shadow: none !important; - filter: none !important; - -ms-filter: none !important - } - a, - a:visited { - text-decoration: underline - } - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: "" - } - pre, - blockquote { - page-break-inside: avoid - } - thead { - display: table-header-group - } - tr, - img { - page-break-inside: avoid - } - img { - max-width: 100% !important - } - @page { - margin: .5cm - } - p, - h2, - .rst-content .toctree-wrapper p.caption, - h3 { - orphans: 3; - widows: 3 - } - h2, - .rst-content .toctree-wrapper p.caption, - h3 { - page-break-after: avoid - } -} - -.fa:before, -.wy-menu-vertical li span.toctree-expand:before, -.wy-menu-vertical li.on a span.toctree-expand:before, -.wy-menu-vertical li.current>a span.toctree-expand:before, -.rst-content .admonition-title:before, -.rst-content h1 .headerlink:before, -.rst-content h2 .headerlink:before, -.rst-content h3 .headerlink:before, -.rst-content h4 .headerlink:before, -.rst-content h5 .headerlink:before, -.rst-content h6 .headerlink:before, -.rst-content dl dt .headerlink:before, -.rst-content p.caption .headerlink:before, -.rst-content table>caption .headerlink:before, -.rst-content .code-block-caption .headerlink:before, -.rst-content tt.download span:first-child:before, -.rst-content code.download span:first-child:before, -.icon:before, -.wy-dropdown .caret:before, -.wy-inline-validate.wy-inline-validate-success .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-info .wy-input-context:before, -.wy-alert, -.rst-content .note, -.rst-content .attention, -.rst-content .caution, -.rst-content .danger, -.rst-content .error, -.rst-content .hint, -.rst-content .important, -.rst-content .tip, -.rst-content .warning, -.rst-content .seealso, -.rst-content .admonition-todo, -.rst-content .admonition, -.btn, -input[type="text"], -input[type="password"], -input[type="email"], -input[type="url"], -input[type="date"], -input[type="month"], -input[type="time"], -input[type="datetime"], -input[type="datetime-local"], -input[type="week"], -input[type="number"], -input[type="search"], -input[type="tel"], -input[type="color"], -select, -textarea, -.wy-menu-vertical li.on a, -.wy-menu-vertical li.current>a, -.wy-side-nav-search>a, -.wy-side-nav-search .wy-dropdown>a, -.wy-nav-top a { - -webkit-font-smoothing: antialiased -} - -.clearfix { - *zoom: 1 -} - -.clearfix:before, -.clearfix:after { - display: table; - content: "" -} - -.clearfix:after { - clear: both -} - - -/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */ - -@font-face { - font-family: 'FontAwesome'; - src: url("../fonts/fontawesome-webfont.eot?v=4.7.0"); - src: url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"), url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"), url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"), url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"), url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg"); - font-weight: normal; - font-style: normal -} - -.fa, -.wy-menu-vertical li span.toctree-expand, -.wy-menu-vertical li.on a span.toctree-expand, -.wy-menu-vertical li.current>a span.toctree-expand, -.rst-content .admonition-title, -.rst-content h1 .headerlink, -.rst-content h2 .headerlink, -.rst-content h3 .headerlink, -.rst-content h4 .headerlink, -.rst-content h5 .headerlink, -.rst-content h6 .headerlink, -.rst-content dl dt .headerlink, -.rst-content p.caption .headerlink, -.rst-content table>caption .headerlink, -.rst-content .code-block-caption .headerlink, -.rst-content tt.download span:first-child, -.rst-content code.download span:first-child, -.icon { - display: inline-block; - font: normal normal normal 14px/1 FontAwesome; - font-size: inherit; - text-rendering: auto; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale -} - -.fa-lg { - font-size: 1.3333333333em; - line-height: .75em; - vertical-align: -15% -} - -.fa-2x { - font-size: 2em -} - -.fa-3x { - font-size: 3em -} - -.fa-4x { - font-size: 4em -} - -.fa-5x { - font-size: 5em -} - -.fa-fw { - width: 1.2857142857em; - text-align: center -} - -.fa-ul { - padding-left: 0; - margin-left: 2.1428571429em; - list-style-type: none -} - -.fa-ul>li { - position: relative -} - -.fa-li { - position: absolute; - left: -2.1428571429em; - width: 2.1428571429em; - top: .1428571429em; - text-align: center -} - -.fa-li.fa-lg { - left: -1.8571428571em -} - -.fa-border { - padding: .2em .25em .15em; - border: solid 0.08em #eee; - border-radius: .1em -} - -.fa-pull-left { - float: left -} - -.fa-pull-right { - float: right -} - -.fa.fa-pull-left, -.wy-menu-vertical li span.fa-pull-left.toctree-expand, -.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand, -.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand, -.rst-content .fa-pull-left.admonition-title, -.rst-content h1 .fa-pull-left.headerlink, -.rst-content h2 .fa-pull-left.headerlink, -.rst-content h3 .fa-pull-left.headerlink, -.rst-content h4 .fa-pull-left.headerlink, -.rst-content h5 .fa-pull-left.headerlink, -.rst-content h6 .fa-pull-left.headerlink, -.rst-content dl dt .fa-pull-left.headerlink, -.rst-content p.caption .fa-pull-left.headerlink, -.rst-content table>caption .fa-pull-left.headerlink, -.rst-content .code-block-caption .fa-pull-left.headerlink, -.rst-content tt.download span.fa-pull-left:first-child, -.rst-content code.download span.fa-pull-left:first-child, -.fa-pull-left.icon { - margin-right: .3em -} - -.fa.fa-pull-right, -.wy-menu-vertical li span.fa-pull-right.toctree-expand, -.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand, -.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand, -.rst-content .fa-pull-right.admonition-title, -.rst-content h1 .fa-pull-right.headerlink, -.rst-content h2 .fa-pull-right.headerlink, -.rst-content h3 .fa-pull-right.headerlink, -.rst-content h4 .fa-pull-right.headerlink, -.rst-content h5 .fa-pull-right.headerlink, -.rst-content h6 .fa-pull-right.headerlink, -.rst-content dl dt .fa-pull-right.headerlink, -.rst-content p.caption .fa-pull-right.headerlink, -.rst-content table>caption .fa-pull-right.headerlink, -.rst-content .code-block-caption .fa-pull-right.headerlink, -.rst-content tt.download span.fa-pull-right:first-child, -.rst-content code.download span.fa-pull-right:first-child, -.fa-pull-right.icon { - margin-left: .3em -} - -.pull-right { - float: right -} - -.pull-left { - float: left -} - -.fa.pull-left, -.wy-menu-vertical li span.pull-left.toctree-expand, -.wy-menu-vertical li.on a span.pull-left.toctree-expand, -.wy-menu-vertical li.current>a span.pull-left.toctree-expand, -.rst-content .pull-left.admonition-title, -.rst-content h1 .pull-left.headerlink, -.rst-content h2 .pull-left.headerlink, -.rst-content h3 .pull-left.headerlink, -.rst-content h4 .pull-left.headerlink, -.rst-content h5 .pull-left.headerlink, -.rst-content h6 .pull-left.headerlink, -.rst-content dl dt .pull-left.headerlink, -.rst-content p.caption .pull-left.headerlink, -.rst-content table>caption .pull-left.headerlink, -.rst-content .code-block-caption .pull-left.headerlink, -.rst-content tt.download span.pull-left:first-child, -.rst-content code.download span.pull-left:first-child, -.pull-left.icon { - margin-right: .3em -} - -.fa.pull-right, -.wy-menu-vertical li span.pull-right.toctree-expand, -.wy-menu-vertical li.on a span.pull-right.toctree-expand, -.wy-menu-vertical li.current>a span.pull-right.toctree-expand, -.rst-content .pull-right.admonition-title, -.rst-content h1 .pull-right.headerlink, -.rst-content h2 .pull-right.headerlink, -.rst-content h3 .pull-right.headerlink, -.rst-content h4 .pull-right.headerlink, -.rst-content h5 .pull-right.headerlink, -.rst-content h6 .pull-right.headerlink, -.rst-content dl dt .pull-right.headerlink, -.rst-content p.caption .pull-right.headerlink, -.rst-content table>caption .pull-right.headerlink, -.rst-content .code-block-caption .pull-right.headerlink, -.rst-content tt.download span.pull-right:first-child, -.rst-content code.download span.pull-right:first-child, -.pull-right.icon { - margin-left: .3em -} - -.fa-spin { - -webkit-animation: fa-spin 2s infinite linear; - animation: fa-spin 2s infinite linear -} - -.fa-pulse { - -webkit-animation: fa-spin 1s infinite steps(8); - animation: fa-spin 1s infinite steps(8) -} - -@-webkit-keyframes fa-spin { - 0% { - -webkit-transform: rotate(0deg); - transform: rotate(0deg) - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg) - } -} - -@keyframes fa-spin { - 0% { - -webkit-transform: rotate(0deg); - transform: rotate(0deg) - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg) - } -} - -.fa-rotate-90 { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)"; - -webkit-transform: rotate(90deg); - -ms-transform: rotate(90deg); - transform: rotate(90deg) -} - -.fa-rotate-180 { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)"; - -webkit-transform: rotate(180deg); - -ms-transform: rotate(180deg); - transform: rotate(180deg) -} - -.fa-rotate-270 { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)"; - -webkit-transform: rotate(270deg); - -ms-transform: rotate(270deg); - transform: rotate(270deg) -} - -.fa-flip-horizontal { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)"; - -webkit-transform: scale(-1, 1); - -ms-transform: scale(-1, 1); - transform: scale(-1, 1) -} - -.fa-flip-vertical { - -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"; - -webkit-transform: scale(1, -1); - -ms-transform: scale(1, -1); - transform: scale(1, -1) -} - -:root .fa-rotate-90, -:root .fa-rotate-180, -:root .fa-rotate-270, -:root .fa-flip-horizontal, -:root .fa-flip-vertical { - filter: none -} - -.fa-stack { - position: relative; - display: inline-block; - width: 2em; - height: 2em; - line-height: 2em; - vertical-align: middle -} - -.fa-stack-1x, -.fa-stack-2x { - position: absolute; - left: 0; - width: 100%; - text-align: center -} - -.fa-stack-1x { - line-height: inherit -} - -.fa-stack-2x { - font-size: 2em -} - -.fa-inverse { - color: #fff -} - -.fa-glass:before { - content: "" -} - -.fa-music:before { - content: "" -} - -.fa-search:before, -.icon-search:before { - content: "" -} - -.fa-envelope-o:before { - content: "" -} - -.fa-heart:before { - content: "" -} - -.fa-star:before { - content: "" -} - -.fa-star-o:before { - content: "" -} - -.fa-user:before { - content: "" -} - -.fa-film:before { - content: "" -} - -.fa-th-large:before { - content: "" -} - -.fa-th:before { - content: "" -} - -.fa-th-list:before { - content: "" -} - -.fa-check:before { - content: "" -} - -.fa-remove:before, -.fa-close:before, -.fa-times:before { - content: "" -} - -.fa-search-plus:before { - content: "" -} - -.fa-search-minus:before { - content: "" -} - -.fa-power-off:before { - content: "" -} - -.fa-signal:before { - content: "" -} - -.fa-gear:before, -.fa-cog:before { - content: "" -} - -.fa-trash-o:before { - content: "" -} - -.fa-home:before, -.icon-home:before { - content: "" -} - -.fa-file-o:before { - content: "" -} - -.fa-clock-o:before { - content: "" -} - -.fa-road:before { - content: "" -} - -.fa-download:before, -.rst-content tt.download span:first-child:before, -.rst-content code.download span:first-child:before { - content: "" -} - -.fa-arrow-circle-o-down:before { - content: "" -} - -.fa-arrow-circle-o-up:before { - content: "" -} - -.fa-inbox:before { - content: "" -} - -.fa-play-circle-o:before { - content: "" -} - -.fa-rotate-right:before, -.fa-repeat:before { - content: "" -} - -.fa-refresh:before { - content: "" -} - -.fa-list-alt:before { - content: "" -} - -.fa-lock:before { - content: "" -} - -.fa-flag:before { - content: "" -} - -.fa-headphones:before { - content: "" -} - -.fa-volume-off:before { - content: "" -} - -.fa-volume-down:before { - content: "" -} - -.fa-volume-up:before { - content: "" -} - -.fa-qrcode:before { - content: "" -} - -.fa-barcode:before { - content: "" -} - -.fa-tag:before { - content: "" -} - -.fa-tags:before { - content: "" -} - -.fa-book:before, -.icon-book:before { - content: "" -} - -.fa-bookmark:before { - content: "" -} - -.fa-print:before { - content: "" -} - -.fa-camera:before { - content: "" -} - -.fa-font:before { - content: "" -} - -.fa-bold:before { - content: "" -} - -.fa-italic:before { - content: "" -} - -.fa-text-height:before { - content: "" -} - -.fa-text-width:before { - content: "" -} - -.fa-align-left:before { - content: "" -} - -.fa-align-center:before { - content: "" -} - -.fa-align-right:before { - content: "" -} - -.fa-align-justify:before { - content: "" -} - -.fa-list:before { - content: "" -} - -.fa-dedent:before, -.fa-outdent:before { - content: "" -} - -.fa-indent:before { - content: "" -} - -.fa-video-camera:before { - content: "" -} - -.fa-photo:before, -.fa-image:before, -.fa-picture-o:before { - content: "" -} - -.fa-pencil:before { - content: "" -} - -.fa-map-marker:before { - content: "" -} - -.fa-adjust:before { - content: "" -} - -.fa-tint:before { - content: "" -} - -.fa-edit:before, -.fa-pencil-square-o:before { - content: "" -} - -.fa-share-square-o:before { - content: "" -} - -.fa-check-square-o:before { - content: "" -} - -.fa-arrows:before { - content: "" -} - -.fa-step-backward:before { - content: "" -} - -.fa-fast-backward:before { - content: "" -} - -.fa-backward:before { - content: "" -} - -.fa-play:before { - content: "" -} - -.fa-pause:before { - content: "" -} - -.fa-stop:before { - content: "" -} - -.fa-forward:before { - content: "" -} - -.fa-fast-forward:before { - content: "" -} - -.fa-step-forward:before { - content: "" -} - -.fa-eject:before { - content: "" -} - -.fa-chevron-left:before { - content: "" -} - -.fa-chevron-right:before { - content: "" -} - -.fa-plus-circle:before { - content: "" -} - -.fa-minus-circle:before { - content: "" -} - -.fa-times-circle:before, -.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before { - content: "" -} - -.fa-check-circle:before, -.wy-inline-validate.wy-inline-validate-success .wy-input-context:before { - content: "" -} - -.fa-question-circle:before { - content: "" -} - -.fa-info-circle:before { - content: "" -} - -.fa-crosshairs:before { - content: "" -} - -.fa-times-circle-o:before { - content: "" -} - -.fa-check-circle-o:before { - content: "" -} - -.fa-ban:before { - content: "" -} - -.fa-arrow-left:before { - content: "" -} - -.fa-arrow-right:before { - content: "" -} - -.fa-arrow-up:before { - content: "" -} - -.fa-arrow-down:before { - content: "" -} - -.fa-mail-forward:before, -.fa-share:before { - content: "" -} - -.fa-expand:before { - content: "" -} - -.fa-compress:before { - content: "" -} - -.fa-plus:before { - content: "" -} - -.fa-minus:before { - content: "" -} - -.fa-asterisk:before { - content: "" -} - -.fa-exclamation-circle:before, -.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-info .wy-input-context:before, -.rst-content .admonition-title:before { - content: "" -} - -.fa-gift:before { - content: "" -} - -.fa-leaf:before { - content: "" -} - -.fa-fire:before, -.icon-fire:before { - content: "" -} - -.fa-eye:before { - content: "" -} - -.fa-eye-slash:before { - content: "" -} - -.fa-warning:before, -.fa-exclamation-triangle:before { - content: "" -} - -.fa-plane:before { - content: "" -} - -.fa-calendar:before { - content: "" -} - -.fa-random:before { - content: "" -} - -.fa-comment:before { - content: "" -} - -.fa-magnet:before { - content: "" -} - -.fa-chevron-up:before { - content: "" -} - -.fa-chevron-down:before { - content: "" -} - -.fa-retweet:before { - content: "" -} - -.fa-shopping-cart:before { - content: "" -} - -.fa-folder:before { - content: "" -} - -.fa-folder-open:before { - content: "" -} - -.fa-arrows-v:before { - content: "" -} - -.fa-arrows-h:before { - content: "" -} - -.fa-bar-chart-o:before, -.fa-bar-chart:before { - content: "" -} - -.fa-twitter-square:before { - content: "" -} - -.fa-facebook-square:before { - content: "" -} - -.fa-camera-retro:before { - content: "" -} - -.fa-key:before { - content: "" -} - -.fa-gears:before, -.fa-cogs:before { - content: "" -} - -.fa-comments:before { - content: "" -} - -.fa-thumbs-o-up:before { - content: "" -} - -.fa-thumbs-o-down:before { - content: "" -} - -.fa-star-half:before { - content: "" -} - -.fa-heart-o:before { - content: "" -} - -.fa-sign-out:before { - content: "" -} - -.fa-linkedin-square:before { - content: "" -} - -.fa-thumb-tack:before { - content: "" -} - -.fa-external-link:before { - content: "" -} - -.fa-sign-in:before { - content: "" -} - -.fa-trophy:before { - content: "" -} - -.fa-github-square:before { - content: "" -} - -.fa-upload:before { - content: "" -} - -.fa-lemon-o:before { - content: "" -} - -.fa-phone:before { - content: "" -} - -.fa-square-o:before { - content: "" -} - -.fa-bookmark-o:before { - content: "" -} - -.fa-phone-square:before { - content: "" -} - -.fa-twitter:before { - content: "" -} - -.fa-facebook-f:before, -.fa-facebook:before { - content: "" -} - -.fa-github:before, -.icon-github:before { - content: "" -} - -.fa-unlock:before { - content: "" -} - -.fa-credit-card:before { - content: "" -} - -.fa-feed:before, -.fa-rss:before { - content: "" -} - -.fa-hdd-o:before { - content: "" -} - -.fa-bullhorn:before { - content: "" -} - -.fa-bell:before { - content: "" -} - -.fa-certificate:before { - content: "" -} - -.fa-hand-o-right:before { - content: "" -} - -.fa-hand-o-left:before { - content: "" -} - -.fa-hand-o-up:before { - content: "" -} - -.fa-hand-o-down:before { - content: "" -} - -.fa-arrow-circle-left:before, -.icon-circle-arrow-left:before { - content: "" -} - -.fa-arrow-circle-right:before, -.icon-circle-arrow-right:before { - content: "" -} - -.fa-arrow-circle-up:before { - content: "" -} - -.fa-arrow-circle-down:before { - content: "" -} - -.fa-globe:before { - content: "" -} - -.fa-wrench:before { - content: "" -} - -.fa-tasks:before { - content: "" -} - -.fa-filter:before { - content: "" -} - -.fa-briefcase:before { - content: "" -} - -.fa-arrows-alt:before { - content: "" -} - -.fa-group:before, -.fa-users:before { - content: "" -} - -.fa-chain:before, -.fa-link:before, -.icon-link:before { - content: "" -} - -.fa-cloud:before { - content: "" -} - -.fa-flask:before { - content: "" -} - -.fa-cut:before, -.fa-scissors:before { - content: "" -} - -.fa-copy:before, -.fa-files-o:before { - content: "" -} - -.fa-paperclip:before { - content: "" -} - -.fa-save:before, -.fa-floppy-o:before { - content: "" -} - -.fa-square:before { - content: "" -} - -.fa-navicon:before, -.fa-reorder:before, -.fa-bars:before { - content: "" -} - -.fa-list-ul:before { - content: "" -} - -.fa-list-ol:before { - content: "" -} - -.fa-strikethrough:before { - content: "" -} - -.fa-underline:before { - content: "" -} - -.fa-table:before { - content: "" -} - -.fa-magic:before { - content: "" -} - -.fa-truck:before { - content: "" -} - -.fa-pinterest:before { - content: "" -} - -.fa-pinterest-square:before { - content: "" -} - -.fa-google-plus-square:before { - content: "" -} - -.fa-google-plus:before { - content: "" -} - -.fa-money:before { - content: "" -} - -.fa-caret-down:before, -.wy-dropdown .caret:before, -.icon-caret-down:before { - content: "" -} - -.fa-caret-up:before { - content: "" -} - -.fa-caret-left:before { - content: "" -} - -.fa-caret-right:before { - content: "" -} - -.fa-columns:before { - content: "" -} - -.fa-unsorted:before, -.fa-sort:before { - content: "" -} - -.fa-sort-down:before, -.fa-sort-desc:before { - content: "" -} - -.fa-sort-up:before, -.fa-sort-asc:before { - content: "" -} - -.fa-envelope:before { - content: "" -} - -.fa-linkedin:before { - content: "" -} - -.fa-rotate-left:before, -.fa-undo:before { - content: "" -} - -.fa-legal:before, -.fa-gavel:before { - content: "" -} - -.fa-dashboard:before, -.fa-tachometer:before { - content: "" -} - -.fa-comment-o:before { - content: "" -} - -.fa-comments-o:before { - content: "" -} - -.fa-flash:before, -.fa-bolt:before { - content: "" -} - -.fa-sitemap:before { - content: "" -} - -.fa-umbrella:before { - content: "" -} - -.fa-paste:before, -.fa-clipboard:before { - content: "" -} - -.fa-lightbulb-o:before { - content: "" -} - -.fa-exchange:before { - content: "" -} - -.fa-cloud-download:before { - content: "" -} - -.fa-cloud-upload:before { - content: "" -} - -.fa-user-md:before { - content: "" -} - -.fa-stethoscope:before { - content: "" -} - -.fa-suitcase:before { - content: "" -} - -.fa-bell-o:before { - content: "" -} - -.fa-coffee:before { - content: "" -} - -.fa-cutlery:before { - content: "" -} - -.fa-file-text-o:before { - content: "" -} - -.fa-building-o:before { - content: "" -} - -.fa-hospital-o:before { - content: "" -} - -.fa-ambulance:before { - content: "" -} - -.fa-medkit:before { - content: "" -} - -.fa-fighter-jet:before { - content: "" -} - -.fa-beer:before { - content: "" -} - -.fa-h-square:before { - content: "" -} - -.fa-plus-square:before { - content: "" -} - -.fa-angle-double-left:before { - content: "" -} - -.fa-angle-double-right:before { - content: "" -} - -.fa-angle-double-up:before { - content: "" -} - -.fa-angle-double-down:before { - content: "" -} - -.fa-angle-left:before { - content: "" -} - -.fa-angle-right:before { - content: "" -} - -.fa-angle-up:before { - content: "" -} - -.fa-angle-down:before { - content: "" -} - -.fa-desktop:before { - content: "" -} - -.fa-laptop:before { - content: "" -} - -.fa-tablet:before { - content: "" -} - -.fa-mobile-phone:before, -.fa-mobile:before { - content: "" -} - -.fa-circle-o:before { - content: "" -} - -.fa-quote-left:before { - content: "" -} - -.fa-quote-right:before { - content: "" -} - -.fa-spinner:before { - content: "" -} - -.fa-circle:before { - content: "" -} - -.fa-mail-reply:before, -.fa-reply:before { - content: "" -} - -.fa-github-alt:before { - content: "" -} - -.fa-folder-o:before { - content: "" -} - -.fa-folder-open-o:before { - content: "" -} - -.fa-smile-o:before { - content: "" -} - -.fa-frown-o:before { - content: "" -} - -.fa-meh-o:before { - content: "" -} - -.fa-gamepad:before { - content: "" -} - -.fa-keyboard-o:before { - content: "" -} - -.fa-flag-o:before { - content: "" -} - -.fa-flag-checkered:before { - content: "" -} - -.fa-terminal:before { - content: "" -} - -.fa-code:before { - content: "" -} - -.fa-mail-reply-all:before, -.fa-reply-all:before { - content: "" -} - -.fa-star-half-empty:before, -.fa-star-half-full:before, -.fa-star-half-o:before { - content: "" -} - -.fa-location-arrow:before { - content: "" -} - -.fa-crop:before { - content: "" -} - -.fa-code-fork:before { - content: "" -} - -.fa-unlink:before, -.fa-chain-broken:before { - content: "" -} - -.fa-question:before { - content: "" -} - -.fa-info:before { - content: "" -} - -.fa-exclamation:before { - content: "" -} - -.fa-superscript:before { - content: "" -} - -.fa-subscript:before { - content: "" -} - -.fa-eraser:before { - content: "" -} - -.fa-puzzle-piece:before { - content: "" -} - -.fa-microphone:before { - content: "" -} - -.fa-microphone-slash:before { - content: "" -} - -.fa-shield:before { - content: "" -} - -.fa-calendar-o:before { - content: "" -} - -.fa-fire-extinguisher:before { - content: "" -} - -.fa-rocket:before { - content: "" -} - -.fa-maxcdn:before { - content: "" -} - -.fa-chevron-circle-left:before { - content: "" -} - -.fa-chevron-circle-right:before { - content: "" -} - -.fa-chevron-circle-up:before { - content: "" -} - -.fa-chevron-circle-down:before { - content: "" -} - -.fa-html5:before { - content: "" -} - -.fa-css3:before { - content: "" -} - -.fa-anchor:before { - content: "" -} - -.fa-unlock-alt:before { - content: "" -} - -.fa-bullseye:before { - content: "" -} - -.fa-ellipsis-h:before { - content: "" -} - -.fa-ellipsis-v:before { - content: "" -} - -.fa-rss-square:before { - content: "" -} - -.fa-play-circle:before { - content: "" -} - -.fa-ticket:before { - content: "" -} - -.fa-minus-square:before { - content: "" -} - -.fa-minus-square-o:before, -.wy-menu-vertical li.on a span.toctree-expand:before, -.wy-menu-vertical li.current>a span.toctree-expand:before { - content: "" -} - -.fa-level-up:before { - content: "" -} - -.fa-level-down:before { - content: "" -} - -.fa-check-square:before { - content: "" -} - -.fa-pencil-square:before { - content: "" -} - -.fa-external-link-square:before { - content: "" -} - -.fa-share-square:before { - content: "" -} - -.fa-compass:before { - content: "" -} - -.fa-toggle-down:before, -.fa-caret-square-o-down:before { - content: "" -} - -.fa-toggle-up:before, -.fa-caret-square-o-up:before { - content: "" -} - -.fa-toggle-right:before, -.fa-caret-square-o-right:before { - content: "" -} - -.fa-euro:before, -.fa-eur:before { - content: "" -} - -.fa-gbp:before { - content: "" -} - -.fa-dollar:before, -.fa-usd:before { - content: "" -} - -.fa-rupee:before, -.fa-inr:before { - content: "" -} - -.fa-cny:before, -.fa-rmb:before, -.fa-yen:before, -.fa-jpy:before { - content: "" -} - -.fa-ruble:before, -.fa-rouble:before, -.fa-rub:before { - content: "" -} - -.fa-won:before, -.fa-krw:before { - content: "" -} - -.fa-bitcoin:before, -.fa-btc:before { - content: "" -} - -.fa-file:before { - content: "" -} - -.fa-file-text:before { - content: "" -} - -.fa-sort-alpha-asc:before { - content: "" -} - -.fa-sort-alpha-desc:before { - content: "" -} - -.fa-sort-amount-asc:before { - content: "" -} - -.fa-sort-amount-desc:before { - content: "" -} - -.fa-sort-numeric-asc:before { - content: "" -} - -.fa-sort-numeric-desc:before { - content: "" -} - -.fa-thumbs-up:before { - content: "" -} - -.fa-thumbs-down:before { - content: "" -} - -.fa-youtube-square:before { - content: "" -} - -.fa-youtube:before { - content: "" -} - -.fa-xing:before { - content: "" -} - -.fa-xing-square:before { - content: "" -} - -.fa-youtube-play:before { - content: "" -} - -.fa-dropbox:before { - content: "" -} - -.fa-stack-overflow:before { - content: "" -} - -.fa-instagram:before { - content: "" -} - -.fa-flickr:before { - content: "" -} - -.fa-adn:before { - content: "" -} - -.fa-bitbucket:before, -.icon-bitbucket:before { - content: "" -} - -.fa-bitbucket-square:before { - content: "" -} - -.fa-tumblr:before { - content: "" -} - -.fa-tumblr-square:before { - content: "" -} - -.fa-long-arrow-down:before { - content: "" -} - -.fa-long-arrow-up:before { - content: "" -} - -.fa-long-arrow-left:before { - content: "" -} - -.fa-long-arrow-right:before { - content: "" -} - -.fa-apple:before { - content: "" -} - -.fa-windows:before { - content: "" -} - -.fa-android:before { - content: "" -} - -.fa-linux:before { - content: "" -} - -.fa-dribbble:before { - content: "" -} - -.fa-skype:before { - content: "" -} - -.fa-foursquare:before { - content: "" -} - -.fa-trello:before { - content: "" -} - -.fa-female:before { - content: "" -} - -.fa-male:before { - content: "" -} - -.fa-gittip:before, -.fa-gratipay:before { - content: "" -} - -.fa-sun-o:before { - content: "" -} - -.fa-moon-o:before { - content: "" -} - -.fa-archive:before { - content: "" -} - -.fa-bug:before { - content: "" -} - -.fa-vk:before { - content: "" -} - -.fa-weibo:before { - content: "" -} - -.fa-renren:before { - content: "" -} - -.fa-pagelines:before { - content: "" -} - -.fa-stack-exchange:before { - content: "" -} - -.fa-arrow-circle-o-right:before { - content: "" -} - -.fa-arrow-circle-o-left:before { - content: "" -} - -.fa-toggle-left:before, -.fa-caret-square-o-left:before { - content: "" -} - -.fa-dot-circle-o:before { - content: "" -} - -.fa-wheelchair:before { - content: "" -} - -.fa-vimeo-square:before { - content: "" -} - -.fa-turkish-lira:before, -.fa-try:before { - content: "" -} - -.fa-plus-square-o:before, -.wy-menu-vertical li span.toctree-expand:before { - content: "" -} - -.fa-space-shuttle:before { - content: "" -} - -.fa-slack:before { - content: "" -} - -.fa-envelope-square:before { - content: "" -} - -.fa-wordpress:before { - content: "" -} - -.fa-openid:before { - content: "" -} - -.fa-institution:before, -.fa-bank:before, -.fa-university:before { - content: "" -} - -.fa-mortar-board:before, -.fa-graduation-cap:before { - content: "" -} - -.fa-yahoo:before { - content: "" -} - -.fa-google:before { - content: "" -} - -.fa-reddit:before { - content: "" -} - -.fa-reddit-square:before { - content: "" -} - -.fa-stumbleupon-circle:before { - content: "" -} - -.fa-stumbleupon:before { - content: "" -} - -.fa-delicious:before { - content: "" -} - -.fa-digg:before { - content: "" -} - -.fa-pied-piper-pp:before { - content: "" -} - -.fa-pied-piper-alt:before { - content: "" -} - -.fa-drupal:before { - content: "" -} - -.fa-joomla:before { - content: "" -} - -.fa-language:before { - content: "" -} - -.fa-fax:before { - content: "" -} - -.fa-building:before { - content: "" -} - -.fa-child:before { - content: "" -} - -.fa-paw:before { - content: "" -} - -.fa-spoon:before { - content: "" -} - -.fa-cube:before { - content: "" -} - -.fa-cubes:before { - content: "" -} - -.fa-behance:before { - content: "" -} - -.fa-behance-square:before { - content: "" -} - -.fa-steam:before { - content: "" -} - -.fa-steam-square:before { - content: "" -} - -.fa-recycle:before { - content: "" -} - -.fa-automobile:before, -.fa-car:before { - content: "" -} - -.fa-cab:before, -.fa-taxi:before { - content: "" -} - -.fa-tree:before { - content: "" -} - -.fa-spotify:before { - content: "" -} - -.fa-deviantart:before { - content: "" -} - -.fa-soundcloud:before { - content: "" -} - -.fa-database:before { - content: "" -} - -.fa-file-pdf-o:before { - content: "" -} - -.fa-file-word-o:before { - content: "" -} - -.fa-file-excel-o:before { - content: "" -} - -.fa-file-powerpoint-o:before { - content: "" -} - -.fa-file-photo-o:before, -.fa-file-picture-o:before, -.fa-file-image-o:before { - content: "" -} - -.fa-file-zip-o:before, -.fa-file-archive-o:before { - content: "" -} - -.fa-file-sound-o:before, -.fa-file-audio-o:before { - content: "" -} - -.fa-file-movie-o:before, -.fa-file-video-o:before { - content: "" -} - -.fa-file-code-o:before { - content: "" -} - -.fa-vine:before { - content: "" -} - -.fa-codepen:before { - content: "" -} - -.fa-jsfiddle:before { - content: "" -} - -.fa-life-bouy:before, -.fa-life-buoy:before, -.fa-life-saver:before, -.fa-support:before, -.fa-life-ring:before { - content: "" -} - -.fa-circle-o-notch:before { - content: "" -} - -.fa-ra:before, -.fa-resistance:before, -.fa-rebel:before { - content: "" -} - -.fa-ge:before, -.fa-empire:before { - content: "" -} - -.fa-git-square:before { - content: "" -} - -.fa-git:before { - content: "" -} - -.fa-y-combinator-square:before, -.fa-yc-square:before, -.fa-hacker-news:before { - content: "" -} - -.fa-tencent-weibo:before { - content: "" -} - -.fa-qq:before { - content: "" -} - -.fa-wechat:before, -.fa-weixin:before { - content: "" -} - -.fa-send:before, -.fa-paper-plane:before { - content: "" -} - -.fa-send-o:before, -.fa-paper-plane-o:before { - content: "" -} - -.fa-history:before { - content: "" -} - -.fa-circle-thin:before { - content: "" -} - -.fa-header:before { - content: "" -} - -.fa-paragraph:before { - content: "" -} - -.fa-sliders:before { - content: "" -} - -.fa-share-alt:before { - content: "" -} - -.fa-share-alt-square:before { - content: "" -} - -.fa-bomb:before { - content: "" -} - -.fa-soccer-ball-o:before, -.fa-futbol-o:before { - content: "" -} - -.fa-tty:before { - content: "" -} - -.fa-binoculars:before { - content: "" -} - -.fa-plug:before { - content: "" -} - -.fa-slideshare:before { - content: "" -} - -.fa-twitch:before { - content: "" -} - -.fa-yelp:before { - content: "" -} - -.fa-newspaper-o:before { - content: "" -} - -.fa-wifi:before { - content: "" -} - -.fa-calculator:before { - content: "" -} - -.fa-paypal:before { - content: "" -} - -.fa-google-wallet:before { - content: "" -} - -.fa-cc-visa:before { - content: "" -} - -.fa-cc-mastercard:before { - content: "" -} - -.fa-cc-discover:before { - content: "" -} - -.fa-cc-amex:before { - content: "" -} - -.fa-cc-paypal:before { - content: "" -} - -.fa-cc-stripe:before { - content: "" -} - -.fa-bell-slash:before { - content: "" -} - -.fa-bell-slash-o:before { - content: "" -} - -.fa-trash:before { - content: "" -} - -.fa-copyright:before { - content: "" -} - -.fa-at:before { - content: "" -} - -.fa-eyedropper:before { - content: "" -} - -.fa-paint-brush:before { - content: "" -} - -.fa-birthday-cake:before { - content: "" -} - -.fa-area-chart:before { - content: "" -} - -.fa-pie-chart:before { - content: "" -} - -.fa-line-chart:before { - content: "" -} - -.fa-lastfm:before { - content: "" -} - -.fa-lastfm-square:before { - content: "" -} - -.fa-toggle-off:before { - content: "" -} - -.fa-toggle-on:before { - content: "" -} - -.fa-bicycle:before { - content: "" -} - -.fa-bus:before { - content: "" -} - -.fa-ioxhost:before { - content: "" -} - -.fa-angellist:before { - content: "" -} - -.fa-cc:before { - content: "" -} - -.fa-shekel:before, -.fa-sheqel:before, -.fa-ils:before { - content: "" -} - -.fa-meanpath:before { - content: "" -} - -.fa-buysellads:before { - content: "" -} - -.fa-connectdevelop:before { - content: "" -} - -.fa-dashcube:before { - content: "" -} - -.fa-forumbee:before { - content: "" -} - -.fa-leanpub:before { - content: "" -} - -.fa-sellsy:before { - content: "" -} - -.fa-shirtsinbulk:before { - content: "" -} - -.fa-simplybuilt:before { - content: "" -} - -.fa-skyatlas:before { - content: "" -} - -.fa-cart-plus:before { - content: "" -} - -.fa-cart-arrow-down:before { - content: "" -} - -.fa-diamond:before { - content: "" -} - -.fa-ship:before { - content: "" -} - -.fa-user-secret:before { - content: "" -} - -.fa-motorcycle:before { - content: "" -} - -.fa-street-view:before { - content: "" -} - -.fa-heartbeat:before { - content: "" -} - -.fa-venus:before { - content: "" -} - -.fa-mars:before { - content: "" -} - -.fa-mercury:before { - content: "" -} - -.fa-intersex:before, -.fa-transgender:before { - content: "" -} - -.fa-transgender-alt:before { - content: "" -} - -.fa-venus-double:before { - content: "" -} - -.fa-mars-double:before { - content: "" -} - -.fa-venus-mars:before { - content: "" -} - -.fa-mars-stroke:before { - content: "" -} - -.fa-mars-stroke-v:before { - content: "" -} - -.fa-mars-stroke-h:before { - content: "" -} - -.fa-neuter:before { - content: "" -} - -.fa-genderless:before { - content: "" -} - -.fa-facebook-official:before { - content: "" -} - -.fa-pinterest-p:before { - content: "" -} - -.fa-whatsapp:before { - content: "" -} - -.fa-server:before { - content: "" -} - -.fa-user-plus:before { - content: "" -} - -.fa-user-times:before { - content: "" -} - -.fa-hotel:before, -.fa-bed:before { - content: "" -} - -.fa-viacoin:before { - content: "" -} - -.fa-train:before { - content: "" -} - -.fa-subway:before { - content: "" -} - -.fa-medium:before { - content: "" -} - -.fa-yc:before, -.fa-y-combinator:before { - content: "" -} - -.fa-optin-monster:before { - content: "" -} - -.fa-opencart:before { - content: "" -} - -.fa-expeditedssl:before { - content: "" -} - -.fa-battery-4:before, -.fa-battery:before, -.fa-battery-full:before { - content: "" -} - -.fa-battery-3:before, -.fa-battery-three-quarters:before { - content: "" -} - -.fa-battery-2:before, -.fa-battery-half:before { - content: "" -} - -.fa-battery-1:before, -.fa-battery-quarter:before { - content: "" -} - -.fa-battery-0:before, -.fa-battery-empty:before { - content: "" -} - -.fa-mouse-pointer:before { - content: "" -} - -.fa-i-cursor:before { - content: "" -} - -.fa-object-group:before { - content: "" -} - -.fa-object-ungroup:before { - content: "" -} - -.fa-sticky-note:before { - content: "" -} - -.fa-sticky-note-o:before { - content: "" -} - -.fa-cc-jcb:before { - content: "" -} - -.fa-cc-diners-club:before { - content: "" -} - -.fa-clone:before { - content: "" -} - -.fa-balance-scale:before { - content: "" -} - -.fa-hourglass-o:before { - content: "" -} - -.fa-hourglass-1:before, -.fa-hourglass-start:before { - content: "" -} - -.fa-hourglass-2:before, -.fa-hourglass-half:before { - content: "" -} - -.fa-hourglass-3:before, -.fa-hourglass-end:before { - content: "" -} - -.fa-hourglass:before { - content: "" -} - -.fa-hand-grab-o:before, -.fa-hand-rock-o:before { - content: "" -} - -.fa-hand-stop-o:before, -.fa-hand-paper-o:before { - content: "" -} - -.fa-hand-scissors-o:before { - content: "" -} - -.fa-hand-lizard-o:before { - content: "" -} - -.fa-hand-spock-o:before { - content: "" -} - -.fa-hand-pointer-o:before { - content: "" -} - -.fa-hand-peace-o:before { - content: "" -} - -.fa-trademark:before { - content: "" -} - -.fa-registered:before { - content: "" -} - -.fa-creative-commons:before { - content: "" -} - -.fa-gg:before { - content: "" -} - -.fa-gg-circle:before { - content: "" -} - -.fa-tripadvisor:before { - content: "" -} - -.fa-odnoklassniki:before { - content: "" -} - -.fa-odnoklassniki-square:before { - content: "" -} - -.fa-get-pocket:before { - content: "" -} - -.fa-wikipedia-w:before { - content: "" -} - -.fa-safari:before { - content: "" -} - -.fa-chrome:before { - content: "" -} - -.fa-firefox:before { - content: "" -} - -.fa-opera:before { - content: "" -} - -.fa-internet-explorer:before { - content: "" -} - -.fa-tv:before, -.fa-television:before { - content: "" -} - -.fa-contao:before { - content: "" -} - -.fa-500px:before { - content: "" -} - -.fa-amazon:before { - content: "" -} - -.fa-calendar-plus-o:before { - content: "" -} - -.fa-calendar-minus-o:before { - content: "" -} - -.fa-calendar-times-o:before { - content: "" -} - -.fa-calendar-check-o:before { - content: "" -} - -.fa-industry:before { - content: "" -} - -.fa-map-pin:before { - content: "" -} - -.fa-map-signs:before { - content: "" -} - -.fa-map-o:before { - content: "" -} - -.fa-map:before { - content: "" -} - -.fa-commenting:before { - content: "" -} - -.fa-commenting-o:before { - content: "" -} - -.fa-houzz:before { - content: "" -} - -.fa-vimeo:before { - content: "" -} - -.fa-black-tie:before { - content: "" -} - -.fa-fonticons:before { - content: "" -} - -.fa-reddit-alien:before { - content: "" -} - -.fa-edge:before { - content: "" -} - -.fa-credit-card-alt:before { - content: "" -} - -.fa-codiepie:before { - content: "" -} - -.fa-modx:before { - content: "" -} - -.fa-fort-awesome:before { - content: "" -} - -.fa-usb:before { - content: "" -} - -.fa-product-hunt:before { - content: "" -} - -.fa-mixcloud:before { - content: "" -} - -.fa-scribd:before { - content: "" -} - -.fa-pause-circle:before { - content: "" -} - -.fa-pause-circle-o:before { - content: "" -} - -.fa-stop-circle:before { - content: "" -} - -.fa-stop-circle-o:before { - content: "" -} - -.fa-shopping-bag:before { - content: "" -} - -.fa-shopping-basket:before { - content: "" -} - -.fa-hashtag:before { - content: "" -} - -.fa-bluetooth:before { - content: "" -} - -.fa-bluetooth-b:before { - content: "" -} - -.fa-percent:before { - content: "" -} - -.fa-gitlab:before, -.icon-gitlab:before { - content: "" -} - -.fa-wpbeginner:before { - content: "" -} - -.fa-wpforms:before { - content: "" -} - -.fa-envira:before { - content: "" -} - -.fa-universal-access:before { - content: "" -} - -.fa-wheelchair-alt:before { - content: "" -} - -.fa-question-circle-o:before { - content: "" -} - -.fa-blind:before { - content: "" -} - -.fa-audio-description:before { - content: "" -} - -.fa-volume-control-phone:before { - content: "" -} - -.fa-braille:before { - content: "" -} - -.fa-assistive-listening-systems:before { - content: "" -} - -.fa-asl-interpreting:before, -.fa-american-sign-language-interpreting:before { - content: "" -} - -.fa-deafness:before, -.fa-hard-of-hearing:before, -.fa-deaf:before { - content: "" -} - -.fa-glide:before { - content: "" -} - -.fa-glide-g:before { - content: "" -} - -.fa-signing:before, -.fa-sign-language:before { - content: "" -} - -.fa-low-vision:before { - content: "" -} - -.fa-viadeo:before { - content: "" -} - -.fa-viadeo-square:before { - content: "" -} - -.fa-snapchat:before { - content: "" -} - -.fa-snapchat-ghost:before { - content: "" -} - -.fa-snapchat-square:before { - content: "" -} - -.fa-pied-piper:before { - content: "" -} - -.fa-first-order:before { - content: "" -} - -.fa-yoast:before { - content: "" -} - -.fa-themeisle:before { - content: "" -} - -.fa-google-plus-circle:before, -.fa-google-plus-official:before { - content: "" -} - -.fa-fa:before, -.fa-font-awesome:before { - content: "" -} - -.fa-handshake-o:before { - content: "" -} - -.fa-envelope-open:before { - content: "" -} - -.fa-envelope-open-o:before { - content: "" -} - -.fa-linode:before { - content: "" -} - -.fa-address-book:before { - content: "" -} - -.fa-address-book-o:before { - content: "" -} - -.fa-vcard:before, -.fa-address-card:before { - content: "" -} - -.fa-vcard-o:before, -.fa-address-card-o:before { - content: "" -} - -.fa-user-circle:before { - content: "" -} - -.fa-user-circle-o:before { - content: "" -} - -.fa-user-o:before { - content: "" -} - -.fa-id-badge:before { - content: "" -} - -.fa-drivers-license:before, -.fa-id-card:before { - content: "" -} - -.fa-drivers-license-o:before, -.fa-id-card-o:before { - content: "" -} - -.fa-quora:before { - content: "" -} - -.fa-free-code-camp:before { - content: "" -} - -.fa-telegram:before { - content: "" -} - -.fa-thermometer-4:before, -.fa-thermometer:before, -.fa-thermometer-full:before { - content: "" -} - -.fa-thermometer-3:before, -.fa-thermometer-three-quarters:before { - content: "" -} - -.fa-thermometer-2:before, -.fa-thermometer-half:before { - content: "" -} - -.fa-thermometer-1:before, -.fa-thermometer-quarter:before { - content: "" -} - -.fa-thermometer-0:before, -.fa-thermometer-empty:before { - content: "" -} - -.fa-shower:before { - content: "" -} - -.fa-bathtub:before, -.fa-s15:before, -.fa-bath:before { - content: "" -} - -.fa-podcast:before { - content: "" -} - -.fa-window-maximize:before { - content: "" -} - -.fa-window-minimize:before { - content: "" -} - -.fa-window-restore:before { - content: "" -} - -.fa-times-rectangle:before, -.fa-window-close:before { - content: "" -} - -.fa-times-rectangle-o:before, -.fa-window-close-o:before { - content: "" -} - -.fa-bandcamp:before { - content: "" -} - -.fa-grav:before { - content: "" -} - -.fa-etsy:before { - content: "" -} - -.fa-imdb:before { - content: "" -} - -.fa-ravelry:before { - content: "" -} - -.fa-eercast:before { - content: "" -} - -.fa-microchip:before { - content: "" -} - -.fa-snowflake-o:before { - content: "" -} - -.fa-superpowers:before { - content: "" -} - -.fa-wpexplorer:before { - content: "" -} - -.fa-meetup:before { - content: "" -} - -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - border: 0 -} - -.sr-only-focusable:active, -.sr-only-focusable:focus { - position: static; - width: auto; - height: auto; - margin: 0; - overflow: visible; - clip: auto -} - -.fa, -.wy-menu-vertical li span.toctree-expand, -.wy-menu-vertical li.on a span.toctree-expand, -.wy-menu-vertical li.current>a span.toctree-expand, -.rst-content .admonition-title, -.rst-content h1 .headerlink, -.rst-content h2 .headerlink, -.rst-content h3 .headerlink, -.rst-content h4 .headerlink, -.rst-content h5 .headerlink, -.rst-content h6 .headerlink, -.rst-content dl dt .headerlink, -.rst-content p.caption .headerlink, -.rst-content table>caption .headerlink, -.rst-content .code-block-caption .headerlink, -.rst-content tt.download span:first-child, -.rst-content code.download span:first-child, -.icon, -.wy-dropdown .caret, -.wy-inline-validate.wy-inline-validate-success .wy-input-context, -.wy-inline-validate.wy-inline-validate-danger .wy-input-context, -.wy-inline-validate.wy-inline-validate-warning .wy-input-context, -.wy-inline-validate.wy-inline-validate-info .wy-input-context { - font-family: inherit -} - -.fa:before, -.wy-menu-vertical li span.toctree-expand:before, -.wy-menu-vertical li.on a span.toctree-expand:before, -.wy-menu-vertical li.current>a span.toctree-expand:before, -.rst-content .admonition-title:before, -.rst-content h1 .headerlink:before, -.rst-content h2 .headerlink:before, -.rst-content h3 .headerlink:before, -.rst-content h4 .headerlink:before, -.rst-content h5 .headerlink:before, -.rst-content h6 .headerlink:before, -.rst-content dl dt .headerlink:before, -.rst-content p.caption .headerlink:before, -.rst-content table>caption .headerlink:before, -.rst-content .code-block-caption .headerlink:before, -.rst-content tt.download span:first-child:before, -.rst-content code.download span:first-child:before, -.icon:before, -.wy-dropdown .caret:before, -.wy-inline-validate.wy-inline-validate-success .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before, -.wy-inline-validate.wy-inline-validate-info .wy-input-context:before { - font-family: "FontAwesome"; - display: inline-block; - font-style: normal; - font-weight: normal; - line-height: 1; - text-decoration: inherit -} - -a .fa, -a .wy-menu-vertical li span.toctree-expand, -.wy-menu-vertical li a span.toctree-expand, -.wy-menu-vertical li.on a span.toctree-expand, -.wy-menu-vertical li.current>a span.toctree-expand, -a .rst-content .admonition-title, -.rst-content a .admonition-title, -a .rst-content h1 .headerlink, -.rst-content h1 a .headerlink, -a .rst-content h2 .headerlink, -.rst-content h2 a .headerlink, -a .rst-content h3 .headerlink, -.rst-content h3 a .headerlink, -a .rst-content h4 .headerlink, -.rst-content h4 a .headerlink, -a .rst-content h5 .headerlink, -.rst-content h5 a .headerlink, -a .rst-content h6 .headerlink, -.rst-content h6 a .headerlink, -a .rst-content dl dt .headerlink, -.rst-content dl dt a .headerlink, -a .rst-content p.caption .headerlink, -.rst-content p.caption a .headerlink, -a .rst-content table>caption .headerlink, -.rst-content table>caption a .headerlink, -a .rst-content .code-block-caption .headerlink, -.rst-content .code-block-caption a .headerlink, -a .rst-content tt.download span:first-child, -.rst-content tt.download a span:first-child, -a .rst-content code.download span:first-child, -.rst-content code.download a span:first-child, -a .icon { - display: inline-block; - text-decoration: inherit -} - -.btn .fa, -.btn .wy-menu-vertical li span.toctree-expand, -.wy-menu-vertical li .btn span.toctree-expand, -.btn .wy-menu-vertical li.on a span.toctree-expand, -.wy-menu-vertical li.on a .btn span.toctree-expand, -.btn .wy-menu-vertical li.current>a span.toctree-expand, -.wy-menu-vertical li.current>a .btn span.toctree-expand, -.btn .rst-content .admonition-title, -.rst-content .btn .admonition-title, -.btn .rst-content h1 .headerlink, -.rst-content h1 .btn .headerlink, -.btn .rst-content h2 .headerlink, -.rst-content h2 .btn .headerlink, -.btn .rst-content h3 .headerlink, -.rst-content h3 .btn .headerlink, -.btn .rst-content h4 .headerlink, -.rst-content h4 .btn .headerlink, -.btn .rst-content h5 .headerlink, -.rst-content h5 .btn .headerlink, -.btn .rst-content h6 .headerlink, -.rst-content h6 .btn .headerlink, -.btn .rst-content dl dt .headerlink, -.rst-content dl dt .btn .headerlink, -.btn .rst-content p.caption .headerlink, -.rst-content p.caption .btn .headerlink, -.btn .rst-content table>caption .headerlink, -.rst-content table>caption .btn .headerlink, -.btn .rst-content .code-block-caption .headerlink, -.rst-content .code-block-caption .btn .headerlink, -.btn .rst-content tt.download span:first-child, -.rst-content tt.download .btn span:first-child, -.btn .rst-content code.download span:first-child, -.rst-content code.download .btn span:first-child, -.btn .icon, -.nav .fa, -.nav .wy-menu-vertical li span.toctree-expand, -.wy-menu-vertical li .nav span.toctree-expand, -.nav .wy-menu-vertical li.on a span.toctree-expand, -.wy-menu-vertical li.on a .nav span.toctree-expand, -.nav .wy-menu-vertical li.current>a span.toctree-expand, -.wy-menu-vertical li.current>a .nav span.toctree-expand, -.nav .rst-content .admonition-title, -.rst-content .nav .admonition-title, -.nav .rst-content h1 .headerlink, -.rst-content h1 .nav .headerlink, -.nav .rst-content h2 .headerlink, -.rst-content h2 .nav .headerlink, -.nav .rst-content h3 .headerlink, -.rst-content h3 .nav .headerlink, -.nav .rst-content h4 .headerlink, -.rst-content h4 .nav .headerlink, -.nav .rst-content h5 .headerlink, -.rst-content h5 .nav .headerlink, -.nav .rst-content h6 .headerlink, -.rst-content h6 .nav .headerlink, -.nav .rst-content dl dt .headerlink, -.rst-content dl dt .nav .headerlink, -.nav .rst-content p.caption .headerlink, -.rst-content p.caption .nav .headerlink, -.nav .rst-content table>caption .headerlink, -.rst-content table>caption .nav .headerlink, -.nav .rst-content .code-block-caption .headerlink, -.rst-content .code-block-caption .nav .headerlink, -.nav .rst-content tt.download span:first-child, -.rst-content tt.download .nav span:first-child, -.nav .rst-content code.download span:first-child, -.rst-content code.download .nav span:first-child, -.nav .icon { - display: inline -} - -.btn .fa.fa-large, -.btn .wy-menu-vertical li span.fa-large.toctree-expand, -.wy-menu-vertical li .btn span.fa-large.toctree-expand, -.btn .rst-content .fa-large.admonition-title, -.rst-content .btn .fa-large.admonition-title, -.btn .rst-content h1 .fa-large.headerlink, -.rst-content h1 .btn .fa-large.headerlink, -.btn .rst-content h2 .fa-large.headerlink, -.rst-content h2 .btn .fa-large.headerlink, -.btn .rst-content h3 .fa-large.headerlink, -.rst-content h3 .btn .fa-large.headerlink, -.btn .rst-content h4 .fa-large.headerlink, -.rst-content h4 .btn .fa-large.headerlink, -.btn .rst-content h5 .fa-large.headerlink, -.rst-content h5 .btn .fa-large.headerlink, -.btn .rst-content h6 .fa-large.headerlink, -.rst-content h6 .btn .fa-large.headerlink, -.btn .rst-content dl dt .fa-large.headerlink, -.rst-content dl dt .btn .fa-large.headerlink, -.btn .rst-content p.caption .fa-large.headerlink, -.rst-content p.caption .btn .fa-large.headerlink, -.btn .rst-content table>caption .fa-large.headerlink, -.rst-content table>caption .btn .fa-large.headerlink, -.btn .rst-content .code-block-caption .fa-large.headerlink, -.rst-content .code-block-caption .btn .fa-large.headerlink, -.btn .rst-content tt.download span.fa-large:first-child, -.rst-content tt.download .btn span.fa-large:first-child, -.btn .rst-content code.download span.fa-large:first-child, -.rst-content code.download .btn span.fa-large:first-child, -.btn .fa-large.icon, -.nav .fa.fa-large, -.nav .wy-menu-vertical li span.fa-large.toctree-expand, -.wy-menu-vertical li .nav span.fa-large.toctree-expand, -.nav .rst-content .fa-large.admonition-title, -.rst-content .nav .fa-large.admonition-title, -.nav .rst-content h1 .fa-large.headerlink, -.rst-content h1 .nav .fa-large.headerlink, -.nav .rst-content h2 .fa-large.headerlink, -.rst-content h2 .nav .fa-large.headerlink, -.nav .rst-content h3 .fa-large.headerlink, -.rst-content h3 .nav .fa-large.headerlink, -.nav .rst-content h4 .fa-large.headerlink, -.rst-content h4 .nav .fa-large.headerlink, -.nav .rst-content h5 .fa-large.headerlink, -.rst-content h5 .nav .fa-large.headerlink, -.nav .rst-content h6 .fa-large.headerlink, -.rst-content h6 .nav .fa-large.headerlink, -.nav .rst-content dl dt .fa-large.headerlink, -.rst-content dl dt .nav .fa-large.headerlink, -.nav .rst-content p.caption .fa-large.headerlink, -.rst-content p.caption .nav .fa-large.headerlink, -.nav .rst-content table>caption .fa-large.headerlink, -.rst-content table>caption .nav .fa-large.headerlink, -.nav .rst-content .code-block-caption .fa-large.headerlink, -.rst-content .code-block-caption .nav .fa-large.headerlink, -.nav .rst-content tt.download span.fa-large:first-child, -.rst-content tt.download .nav span.fa-large:first-child, -.nav .rst-content code.download span.fa-large:first-child, -.rst-content code.download .nav span.fa-large:first-child, -.nav .fa-large.icon { - line-height: .9em -} - -.btn .fa.fa-spin, -.btn .wy-menu-vertical li span.fa-spin.toctree-expand, -.wy-menu-vertical li .btn span.fa-spin.toctree-expand, -.btn .rst-content .fa-spin.admonition-title, -.rst-content .btn .fa-spin.admonition-title, -.btn .rst-content h1 .fa-spin.headerlink, -.rst-content h1 .btn .fa-spin.headerlink, -.btn .rst-content h2 .fa-spin.headerlink, -.rst-content h2 .btn .fa-spin.headerlink, -.btn .rst-content h3 .fa-spin.headerlink, -.rst-content h3 .btn .fa-spin.headerlink, -.btn .rst-content h4 .fa-spin.headerlink, -.rst-content h4 .btn .fa-spin.headerlink, -.btn .rst-content h5 .fa-spin.headerlink, -.rst-content h5 .btn .fa-spin.headerlink, -.btn .rst-content h6 .fa-spin.headerlink, -.rst-content h6 .btn .fa-spin.headerlink, -.btn .rst-content dl dt .fa-spin.headerlink, -.rst-content dl dt .btn .fa-spin.headerlink, -.btn .rst-content p.caption .fa-spin.headerlink, -.rst-content p.caption .btn .fa-spin.headerlink, -.btn .rst-content table>caption .fa-spin.headerlink, -.rst-content table>caption .btn .fa-spin.headerlink, -.btn .rst-content .code-block-caption .fa-spin.headerlink, -.rst-content .code-block-caption .btn .fa-spin.headerlink, -.btn .rst-content tt.download span.fa-spin:first-child, -.rst-content tt.download .btn span.fa-spin:first-child, -.btn .rst-content code.download span.fa-spin:first-child, -.rst-content code.download .btn span.fa-spin:first-child, -.btn .fa-spin.icon, -.nav .fa.fa-spin, -.nav .wy-menu-vertical li span.fa-spin.toctree-expand, -.wy-menu-vertical li .nav span.fa-spin.toctree-expand, -.nav .rst-content .fa-spin.admonition-title, -.rst-content .nav .fa-spin.admonition-title, -.nav .rst-content h1 .fa-spin.headerlink, -.rst-content h1 .nav .fa-spin.headerlink, -.nav .rst-content h2 .fa-spin.headerlink, -.rst-content h2 .nav .fa-spin.headerlink, -.nav .rst-content h3 .fa-spin.headerlink, -.rst-content h3 .nav .fa-spin.headerlink, -.nav .rst-content h4 .fa-spin.headerlink, -.rst-content h4 .nav .fa-spin.headerlink, -.nav .rst-content h5 .fa-spin.headerlink, -.rst-content h5 .nav .fa-spin.headerlink, -.nav .rst-content h6 .fa-spin.headerlink, -.rst-content h6 .nav .fa-spin.headerlink, -.nav .rst-content dl dt .fa-spin.headerlink, -.rst-content dl dt .nav .fa-spin.headerlink, -.nav .rst-content p.caption .fa-spin.headerlink, -.rst-content p.caption .nav .fa-spin.headerlink, -.nav .rst-content table>caption .fa-spin.headerlink, -.rst-content table>caption .nav .fa-spin.headerlink, -.nav .rst-content .code-block-caption .fa-spin.headerlink, -.rst-content .code-block-caption .nav .fa-spin.headerlink, -.nav .rst-content tt.download span.fa-spin:first-child, -.rst-content tt.download .nav span.fa-spin:first-child, -.nav .rst-content code.download span.fa-spin:first-child, -.rst-content code.download .nav span.fa-spin:first-child, -.nav .fa-spin.icon { - display: inline-block -} - -.btn.fa:before, -.wy-menu-vertical li span.btn.toctree-expand:before, -.rst-content .btn.admonition-title:before, -.rst-content h1 .btn.headerlink:before, -.rst-content h2 .btn.headerlink:before, -.rst-content h3 .btn.headerlink:before, -.rst-content h4 .btn.headerlink:before, -.rst-content h5 .btn.headerlink:before, -.rst-content h6 .btn.headerlink:before, -.rst-content dl dt .btn.headerlink:before, -.rst-content p.caption .btn.headerlink:before, -.rst-content table>caption .btn.headerlink:before, -.rst-content .code-block-caption .btn.headerlink:before, -.rst-content tt.download span.btn:first-child:before, -.rst-content code.download span.btn:first-child:before, -.btn.icon:before { - opacity: .5; - -webkit-transition: opacity .05s ease-in; - -moz-transition: opacity .05s ease-in; - transition: opacity .05s ease-in -} - -.btn.fa:hover:before, -.wy-menu-vertical li span.btn.toctree-expand:hover:before, -.rst-content .btn.admonition-title:hover:before, -.rst-content h1 .btn.headerlink:hover:before, -.rst-content h2 .btn.headerlink:hover:before, -.rst-content h3 .btn.headerlink:hover:before, -.rst-content h4 .btn.headerlink:hover:before, -.rst-content h5 .btn.headerlink:hover:before, -.rst-content h6 .btn.headerlink:hover:before, -.rst-content dl dt .btn.headerlink:hover:before, -.rst-content p.caption .btn.headerlink:hover:before, -.rst-content table>caption .btn.headerlink:hover:before, -.rst-content .code-block-caption .btn.headerlink:hover:before, -.rst-content tt.download span.btn:first-child:hover:before, -.rst-content code.download span.btn:first-child:hover:before, -.btn.icon:hover:before { - opacity: 1 -} - -.btn-mini .fa:before, -.btn-mini .wy-menu-vertical li span.toctree-expand:before, -.wy-menu-vertical li .btn-mini span.toctree-expand:before, -.btn-mini .rst-content .admonition-title:before, -.rst-content .btn-mini .admonition-title:before, -.btn-mini .rst-content h1 .headerlink:before, -.rst-content h1 .btn-mini .headerlink:before, -.btn-mini .rst-content h2 .headerlink:before, -.rst-content h2 .btn-mini .headerlink:before, -.btn-mini .rst-content h3 .headerlink:before, -.rst-content h3 .btn-mini .headerlink:before, -.btn-mini .rst-content h4 .headerlink:before, -.rst-content h4 .btn-mini .headerlink:before, -.btn-mini .rst-content h5 .headerlink:before, -.rst-content h5 .btn-mini .headerlink:before, -.btn-mini .rst-content h6 .headerlink:before, -.rst-content h6 .btn-mini .headerlink:before, -.btn-mini .rst-content dl dt .headerlink:before, -.rst-content dl dt .btn-mini .headerlink:before, -.btn-mini .rst-content p.caption .headerlink:before, -.rst-content p.caption .btn-mini .headerlink:before, -.btn-mini .rst-content table>caption .headerlink:before, -.rst-content table>caption .btn-mini .headerlink:before, -.btn-mini .rst-content .code-block-caption .headerlink:before, -.rst-content .code-block-caption .btn-mini .headerlink:before, -.btn-mini .rst-content tt.download span:first-child:before, -.rst-content tt.download .btn-mini span:first-child:before, -.btn-mini .rst-content code.download span:first-child:before, -.rst-content code.download .btn-mini span:first-child:before, -.btn-mini .icon:before { - font-size: 14px; - vertical-align: -15% -} - -.wy-alert, -.rst-content .note, -.rst-content .attention, -.rst-content .caution, -.rst-content .danger, -.rst-content .error, -.rst-content .hint, -.rst-content .important, -.rst-content .tip, -.rst-content .warning, -.rst-content .seealso, -.rst-content .admonition-todo, -.rst-content .admonition { - padding: 12px; - line-height: 24px; - margin-bottom: 24px; - background: #e7f2fa -} - -.wy-alert-title, -.rst-content .admonition-title { - color: #fff; - font-weight: bold; - display: block; - color: #fff; - background: #6ab0de; - margin: -12px; - padding: 6px 12px; - margin-bottom: 12px -} - -.wy-alert.wy-alert-danger, -.rst-content .wy-alert-danger.note, -.rst-content .wy-alert-danger.attention, -.rst-content .wy-alert-danger.caution, -.rst-content .danger, -.rst-content .error, -.rst-content .wy-alert-danger.hint, -.rst-content .wy-alert-danger.important, -.rst-content .wy-alert-danger.tip, -.rst-content .wy-alert-danger.warning, -.rst-content .wy-alert-danger.seealso, -.rst-content .wy-alert-danger.admonition-todo, -.rst-content .wy-alert-danger.admonition { - background: #fdf3f2 -} - -.wy-alert.wy-alert-danger .wy-alert-title, -.rst-content .wy-alert-danger.note .wy-alert-title, -.rst-content .wy-alert-danger.attention .wy-alert-title, -.rst-content .wy-alert-danger.caution .wy-alert-title, -.rst-content .danger .wy-alert-title, -.rst-content .error .wy-alert-title, -.rst-content .wy-alert-danger.hint .wy-alert-title, -.rst-content .wy-alert-danger.important .wy-alert-title, -.rst-content .wy-alert-danger.tip .wy-alert-title, -.rst-content .wy-alert-danger.warning .wy-alert-title, -.rst-content .wy-alert-danger.seealso .wy-alert-title, -.rst-content .wy-alert-danger.admonition-todo .wy-alert-title, -.rst-content .wy-alert-danger.admonition .wy-alert-title, -.wy-alert.wy-alert-danger .rst-content .admonition-title, -.rst-content .wy-alert.wy-alert-danger .admonition-title, -.rst-content .wy-alert-danger.note .admonition-title, -.rst-content .wy-alert-danger.attention .admonition-title, -.rst-content .wy-alert-danger.caution .admonition-title, -.rst-content .danger .admonition-title, -.rst-content .error .admonition-title, -.rst-content .wy-alert-danger.hint .admonition-title, -.rst-content .wy-alert-danger.important .admonition-title, -.rst-content .wy-alert-danger.tip .admonition-title, -.rst-content .wy-alert-danger.warning .admonition-title, -.rst-content .wy-alert-danger.seealso .admonition-title, -.rst-content .wy-alert-danger.admonition-todo .admonition-title, -.rst-content .wy-alert-danger.admonition .admonition-title { - background: #f29f97 -} - -.wy-alert.wy-alert-warning, -.rst-content .wy-alert-warning.note, -.rst-content .attention, -.rst-content .caution, -.rst-content .wy-alert-warning.danger, -.rst-content .wy-alert-warning.error, -.rst-content .wy-alert-warning.hint, -.rst-content .wy-alert-warning.important, -.rst-content .wy-alert-warning.tip, -.rst-content .warning, -.rst-content .wy-alert-warning.seealso, -.rst-content .admonition-todo, -.rst-content .wy-alert-warning.admonition { - background: #f0781e80 -} - -.wy-alert.wy-alert-warning .wy-alert-title, -.rst-content .wy-alert-warning.note .wy-alert-title, -.rst-content .attention .wy-alert-title, -.rst-content .caution .wy-alert-title, -.rst-content .wy-alert-warning.danger .wy-alert-title, -.rst-content .wy-alert-warning.error .wy-alert-title, -.rst-content .wy-alert-warning.hint .wy-alert-title, -.rst-content .wy-alert-warning.important .wy-alert-title, -.rst-content .wy-alert-warning.tip .wy-alert-title, -.rst-content .warning .wy-alert-title, -.rst-content .wy-alert-warning.seealso .wy-alert-title, -.rst-content .admonition-todo .wy-alert-title, -.rst-content .wy-alert-warning.admonition .wy-alert-title, -.wy-alert.wy-alert-warning .rst-content .admonition-title, -.rst-content .wy-alert.wy-alert-warning .admonition-title, -.rst-content .wy-alert-warning.note .admonition-title, -.rst-content .attention .admonition-title, -.rst-content .caution .admonition-title, -.rst-content .wy-alert-warning.danger .admonition-title, -.rst-content .wy-alert-warning.error .admonition-title, -.rst-content .wy-alert-warning.hint .admonition-title, -.rst-content .wy-alert-warning.important .admonition-title, -.rst-content .wy-alert-warning.tip .admonition-title, -.rst-content .warning .admonition-title, -.rst-content .wy-alert-warning.seealso .admonition-title, -.rst-content .admonition-todo .admonition-title, -.rst-content .wy-alert-warning.admonition .admonition-title { - background: var(--horange) -} - -.wy-alert.wy-alert-info, -.rst-content .note, -.rst-content .wy-alert-info.attention, -.rst-content .wy-alert-info.caution, -.rst-content .wy-alert-info.danger, -.rst-content .wy-alert-info.error, -.rst-content .wy-alert-info.hint, -.rst-content .wy-alert-info.important, -.rst-content .wy-alert-info.tip, -.rst-content .wy-alert-info.warning, -.rst-content .seealso, -.rst-content .wy-alert-info.admonition-todo, -.rst-content .wy-alert-info.admonition { - background: var(--hgrey-light) -} - -.wy-alert.wy-alert-info .wy-alert-title, -.rst-content .note .wy-alert-title, -.rst-content .wy-alert-info.attention .wy-alert-title, -.rst-content .wy-alert-info.caution .wy-alert-title, -.rst-content .wy-alert-info.danger .wy-alert-title, -.rst-content .wy-alert-info.error .wy-alert-title, -.rst-content .wy-alert-info.hint .wy-alert-title, -.rst-content .wy-alert-info.important .wy-alert-title, -.rst-content .wy-alert-info.tip .wy-alert-title, -.rst-content .wy-alert-info.warning .wy-alert-title, -.rst-content .seealso .wy-alert-title, -.rst-content .wy-alert-info.admonition-todo .wy-alert-title, -.rst-content .wy-alert-info.admonition .wy-alert-title, -.wy-alert.wy-alert-info .rst-content .admonition-title, -.rst-content .wy-alert.wy-alert-info .admonition-title, -.rst-content .note .admonition-title, -.rst-content .wy-alert-info.attention .admonition-title, -.rst-content .wy-alert-info.caution .admonition-title, -.rst-content .wy-alert-info.danger .admonition-title, -.rst-content .wy-alert-info.error .admonition-title, -.rst-content .wy-alert-info.hint .admonition-title, -.rst-content .wy-alert-info.important .admonition-title, -.rst-content .wy-alert-info.tip .admonition-title, -.rst-content .wy-alert-info.warning .admonition-title, -.rst-content .seealso .admonition-title, -.rst-content .wy-alert-info.admonition-todo .admonition-title, -.rst-content .wy-alert-info.admonition .admonition-title { - background: var(--hgrey) -} - -.wy-alert.wy-alert-success, -.rst-content .wy-alert-success.note, -.rst-content .wy-alert-success.attention, -.rst-content .wy-alert-success.caution, -.rst-content .wy-alert-success.danger, -.rst-content .wy-alert-success.error, -.rst-content .hint, -.rst-content .important, -.rst-content .tip, -.rst-content .wy-alert-success.warning, -.rst-content .wy-alert-success.seealso, -.rst-content .wy-alert-success.admonition-todo, -.rst-content .wy-alert-success.admonition { - background: #dbfaf4 -} - -.wy-alert.wy-alert-success .wy-alert-title, -.rst-content .wy-alert-success.note .wy-alert-title, -.rst-content .wy-alert-success.attention .wy-alert-title, -.rst-content .wy-alert-success.caution .wy-alert-title, -.rst-content .wy-alert-success.danger .wy-alert-title, -.rst-content .wy-alert-success.error .wy-alert-title, -.rst-content .hint .wy-alert-title, -.rst-content .important .wy-alert-title, -.rst-content .tip .wy-alert-title, -.rst-content .wy-alert-success.warning .wy-alert-title, -.rst-content .wy-alert-success.seealso .wy-alert-title, -.rst-content .wy-alert-success.admonition-todo .wy-alert-title, -.rst-content .wy-alert-success.admonition .wy-alert-title, -.wy-alert.wy-alert-success .rst-content .admonition-title, -.rst-content .wy-alert.wy-alert-success .admonition-title, -.rst-content .wy-alert-success.note .admonition-title, -.rst-content .wy-alert-success.attention .admonition-title, -.rst-content .wy-alert-success.caution .admonition-title, -.rst-content .wy-alert-success.danger .admonition-title, -.rst-content .wy-alert-success.error .admonition-title, -.rst-content .hint .admonition-title, -.rst-content .important .admonition-title, -.rst-content .tip .admonition-title, -.rst-content .wy-alert-success.warning .admonition-title, -.rst-content .wy-alert-success.seealso .admonition-title, -.rst-content .wy-alert-success.admonition-todo .admonition-title, -.rst-content .wy-alert-success.admonition .admonition-title { - background: #1abc9c -} - -.wy-alert.wy-alert-neutral, -.rst-content .wy-alert-neutral.note, -.rst-content .wy-alert-neutral.attention, -.rst-content .wy-alert-neutral.caution, -.rst-content .wy-alert-neutral.danger, -.rst-content .wy-alert-neutral.error, -.rst-content .wy-alert-neutral.hint, -.rst-content .wy-alert-neutral.important, -.rst-content .wy-alert-neutral.tip, -.rst-content .wy-alert-neutral.warning, -.rst-content .wy-alert-neutral.seealso, -.rst-content .wy-alert-neutral.admonition-todo, -.rst-content .wy-alert-neutral.admonition { - background: #f3f6f6 -} - -.wy-alert.wy-alert-neutral .wy-alert-title, -.rst-content .wy-alert-neutral.note .wy-alert-title, -.rst-content .wy-alert-neutral.attention .wy-alert-title, -.rst-content .wy-alert-neutral.caution .wy-alert-title, -.rst-content .wy-alert-neutral.danger .wy-alert-title, -.rst-content .wy-alert-neutral.error .wy-alert-title, -.rst-content .wy-alert-neutral.hint .wy-alert-title, -.rst-content .wy-alert-neutral.important .wy-alert-title, -.rst-content .wy-alert-neutral.tip .wy-alert-title, -.rst-content .wy-alert-neutral.warning .wy-alert-title, -.rst-content .wy-alert-neutral.seealso .wy-alert-title, -.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title, -.rst-content .wy-alert-neutral.admonition .wy-alert-title, -.wy-alert.wy-alert-neutral .rst-content .admonition-title, -.rst-content .wy-alert.wy-alert-neutral .admonition-title, -.rst-content .wy-alert-neutral.note .admonition-title, -.rst-content .wy-alert-neutral.attention .admonition-title, -.rst-content .wy-alert-neutral.caution .admonition-title, -.rst-content .wy-alert-neutral.danger .admonition-title, -.rst-content .wy-alert-neutral.error .admonition-title, -.rst-content .wy-alert-neutral.hint .admonition-title, -.rst-content .wy-alert-neutral.important .admonition-title, -.rst-content .wy-alert-neutral.tip .admonition-title, -.rst-content .wy-alert-neutral.warning .admonition-title, -.rst-content .wy-alert-neutral.seealso .admonition-title, -.rst-content .wy-alert-neutral.admonition-todo .admonition-title, -.rst-content .wy-alert-neutral.admonition .admonition-title { - color: #404040; - background: #e1e4e5 -} - -.wy-alert.wy-alert-neutral a, -.rst-content .wy-alert-neutral.note a, -.rst-content .wy-alert-neutral.attention a, -.rst-content .wy-alert-neutral.caution a, -.rst-content .wy-alert-neutral.danger a, -.rst-content .wy-alert-neutral.error a, -.rst-content .wy-alert-neutral.hint a, -.rst-content .wy-alert-neutral.important a, -.rst-content .wy-alert-neutral.tip a, -.rst-content .wy-alert-neutral.warning a, -.rst-content .wy-alert-neutral.seealso a, -.rst-content .wy-alert-neutral.admonition-todo a, -.rst-content .wy-alert-neutral.admonition a { - color: #2980B9 -} - -.wy-alert p:last-child, -.rst-content .note p:last-child, -.rst-content .attention p:last-child, -.rst-content .caution p:last-child, -.rst-content .danger p:last-child, -.rst-content .error p:last-child, -.rst-content .hint p:last-child, -.rst-content .important p:last-child, -.rst-content .tip p:last-child, -.rst-content .warning p:last-child, -.rst-content .seealso p:last-child, -.rst-content .admonition-todo p:last-child, -.rst-content .admonition p:last-child { - margin-bottom: 0 -} - -.wy-tray-container { - position: fixed; - bottom: 0px; - left: 0; - z-index: 600 -} - -.wy-tray-container li { - display: block; - width: 300px; - background: transparent; - color: #fff; - text-align: center; - box-shadow: 0 5px 5px 0 rgba(0, 0, 0, 0.1); - padding: 0 24px; - min-width: 20%; - opacity: 0; - height: 0; - line-height: 56px; - overflow: hidden; - -webkit-transition: all .3s ease-in; - -moz-transition: all .3s ease-in; - transition: all .3s ease-in -} - -.wy-tray-container li.wy-tray-item-success { - background: #27AE60 -} - -.wy-tray-container li.wy-tray-item-info { - background: #2980B9 -} - -.wy-tray-container li.wy-tray-item-warning { - background: #E67E22 -} - -.wy-tray-container li.wy-tray-item-danger { - background: #E74C3C -} - -.wy-tray-container li.on { - opacity: 1; - height: 56px -} - -@media screen and (max-width: 768px) { - .wy-tray-container { - bottom: auto; - top: 0; - width: 100% - } - .wy-tray-container li { - width: 100% - } -} - -button { - font-size: 100%; - margin: 0; - vertical-align: baseline; - *vertical-align: middle; - cursor: pointer; - line-height: normal; - -webkit-appearance: button; - *overflow: visible -} - -button::-moz-focus-inner, -input::-moz-focus-inner { - border: 0; - padding: 0 -} - -button[disabled] { - cursor: default -} - -.btn { - display: inline-block; - border-radius: 2px; - line-height: normal; - white-space: nowrap; - text-align: center; - cursor: pointer; - font-size: 100%; - padding: 6px 12px 8px 12px; - color: #fff; - border: 1px solid rgba(0, 0, 0, 0.1); - background-color: #27AE60; - text-decoration: none; - font-weight: normal; - font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif; - box-shadow: 0px 1px 2px -1px rgba(255, 255, 255, 0.5) inset, 0px -2px 0px 0px rgba(0, 0, 0, 0.1) inset; - outline-none: false; - vertical-align: middle; - *display: inline; - zoom: 1; - -webkit-user-drag: none; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; - -webkit-transition: all .1s linear; - -moz-transition: all .1s linear; - transition: all .1s linear -} - -.btn-hover { - background: #2e8ece; - color: #fff -} - -.btn:hover { - background: #2cc36b; - color: #fff -} - -.btn:focus { - background: #2cc36b; - outline: 0 -} - -.btn:active { - box-shadow: 0px -1px 0px 0px rgba(0, 0, 0, 0.05) inset, 0px 2px 0px 0px rgba(0, 0, 0, 0.1) inset; - padding: 8px 12px 6px 12px -} - -.btn:visited { - color: #fff -} - -.btn:disabled { - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - filter: alpha(opacity=40); - opacity: .4; - cursor: not-allowed; - box-shadow: none -} - -.btn-disabled { - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - filter: alpha(opacity=40); - opacity: .4; - cursor: not-allowed; - box-shadow: none -} - -.btn-disabled:hover, -.btn-disabled:focus, -.btn-disabled:active { - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - filter: alpha(opacity=40); - opacity: .4; - cursor: not-allowed; - box-shadow: none -} - -.btn::-moz-focus-inner { - padding: 0; - border: 0 -} - -.btn-small { - font-size: 80% -} - -.btn-info { - background-color: #2980B9 !important -} - -.btn-info:hover { - background-color: #2e8ece !important -} - -.btn-neutral { - background-color: #f3f6f6 !important; - color: #404040 !important -} - -.btn-neutral:hover { - background-color: #e5ebeb !important; - color: #404040 -} - -.btn-neutral:visited { - color: #404040 !important -} - -.btn-success { - background-color: #27AE60 !important -} - -.btn-success:hover { - background-color: #295 !important -} - -.btn-danger { - background-color: #E74C3C !important -} - -.btn-danger:hover { - background-color: #ea6153 !important -} - -.btn-warning { - background-color: #E67E22 !important -} - -.btn-warning:hover { - background-color: #e98b39 !important -} - -.btn-invert { - background-color: #222 -} - -.btn-invert:hover { - background-color: #2f2f2f !important -} - -.btn-link { - background-color: transparent !important; - color: #2980B9; - box-shadow: none; - border-color: transparent !important -} - -.btn-link:hover { - background-color: transparent !important; - color: #409ad5 !important; - box-shadow: none -} - -.btn-link:active { - background-color: transparent !important; - color: #409ad5 !important; - box-shadow: none -} - -.btn-link:visited { - color: #9B59B6 -} - -.wy-btn-group .btn, -.wy-control .btn { - vertical-align: middle -} - -.wy-btn-group { - margin-bottom: 24px; - *zoom: 1 -} - -.wy-btn-group:before, -.wy-btn-group:after { - display: table; - content: "" -} - -.wy-btn-group:after { - clear: both -} - -.wy-dropdown { - position: relative; - display: inline-block -} - -.wy-dropdown-active .wy-dropdown-menu { - display: block -} - -.wy-dropdown-menu { - position: absolute; - left: 0; - display: none; - float: left; - top: 100%; - min-width: 100%; - background: #fcfcfc; - z-index: 100; - border: solid 1px #cfd7dd; - box-shadow: 0 2px 2px 0 rgba(0, 0, 0, 0.1); - padding: 12px -} - -.wy-dropdown-menu>dd>a { - display: block; - clear: both; - color: #404040; - white-space: nowrap; - font-size: 90%; - padding: 0 12px; - cursor: pointer -} - -.wy-dropdown-menu>dd>a:hover { - background: #2980B9; - color: #fff -} - -.wy-dropdown-menu>dd.divider { - border-top: solid 1px #cfd7dd; - margin: 6px 0 -} - -.wy-dropdown-menu>dd.search { - padding-bottom: 12px -} - -.wy-dropdown-menu>dd.search input[type="search"] { - width: 100% -} - -.wy-dropdown-menu>dd.call-to-action { - background: #e3e3e3; - text-transform: uppercase; - font-weight: 500; - font-size: 80% -} - -.wy-dropdown-menu>dd.call-to-action:hover { - background: #e3e3e3 -} - -.wy-dropdown-menu>dd.call-to-action .btn { - color: #fff -} - -.wy-dropdown.wy-dropdown-up .wy-dropdown-menu { - bottom: 100%; - top: auto; - left: auto; - right: 0 -} - -.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu { - background: #fcfcfc; - margin-top: 2px -} - -.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a { - padding: 6px 12px -} - -.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover { - background: #2980B9; - color: #fff -} - -.wy-dropdown.wy-dropdown-left .wy-dropdown-menu { - right: 0; - left: auto; - text-align: right -} - -.wy-dropdown-arrow:before { - content: " "; - border-bottom: 5px solid #f5f5f5; - border-left: 5px solid transparent; - border-right: 5px solid transparent; - position: absolute; - display: block; - top: -4px; - left: 50%; - margin-left: -3px -} - -.wy-dropdown-arrow.wy-dropdown-arrow-left:before { - left: 11px -} - -.wy-form-stacked select { - display: block -} - -.wy-form-aligned input, -.wy-form-aligned textarea, -.wy-form-aligned select, -.wy-form-aligned .wy-help-inline, -.wy-form-aligned label { - display: inline-block; - *display: inline; - *zoom: 1; - vertical-align: middle -} - -.wy-form-aligned .wy-control-group>label { - display: inline-block; - vertical-align: middle; - width: 10em; - margin: 6px 12px 0 0; - float: left -} - -.wy-form-aligned .wy-control { - float: left -} - -.wy-form-aligned .wy-control label { - display: block -} - -.wy-form-aligned .wy-control select { - margin-top: 6px -} - -fieldset { - border: 0; - margin: 0; - padding: 0 -} - -legend { - display: block; - width: 100%; - border: 0; - padding: 0; - white-space: normal; - margin-bottom: 24px; - font-size: 150%; - *margin-left: -7px -} - -label { - display: block; - margin: 0 0 .3125em 0; - color: #333; - font-size: 90% -} - -input, -select, -textarea { - font-size: 100%; - margin: 0; - vertical-align: baseline; - *vertical-align: middle -} - -.wy-control-group { - margin-bottom: 24px; - *zoom: 1; - max-width: 68em; - margin-left: auto; - margin-right: auto; - *zoom: 1 -} - -.wy-control-group:before, -.wy-control-group:after { - display: table; - content: "" -} - -.wy-control-group:after { - clear: both -} - -.wy-control-group:before, -.wy-control-group:after { - display: table; - content: "" -} - -.wy-control-group:after { - clear: both -} - -.wy-control-group.wy-control-group-required>label:after { - content: " *"; - color: #E74C3C -} - -.wy-control-group .wy-form-full, -.wy-control-group .wy-form-halves, -.wy-control-group .wy-form-thirds { - padding-bottom: 12px -} - -.wy-control-group .wy-form-full select, -.wy-control-group .wy-form-halves select, -.wy-control-group .wy-form-thirds select { - width: 100% -} - -.wy-control-group .wy-form-full input[type="text"], -.wy-control-group .wy-form-full input[type="password"], -.wy-control-group .wy-form-full input[type="email"], -.wy-control-group .wy-form-full input[type="url"], -.wy-control-group .wy-form-full input[type="date"], -.wy-control-group .wy-form-full input[type="month"], -.wy-control-group .wy-form-full input[type="time"], -.wy-control-group .wy-form-full input[type="datetime"], -.wy-control-group .wy-form-full input[type="datetime-local"], -.wy-control-group .wy-form-full input[type="week"], -.wy-control-group .wy-form-full input[type="number"], -.wy-control-group .wy-form-full input[type="search"], -.wy-control-group .wy-form-full input[type="tel"], -.wy-control-group .wy-form-full input[type="color"], -.wy-control-group .wy-form-halves input[type="text"], -.wy-control-group .wy-form-halves input[type="password"], -.wy-control-group .wy-form-halves input[type="email"], -.wy-control-group .wy-form-halves input[type="url"], -.wy-control-group .wy-form-halves input[type="date"], -.wy-control-group .wy-form-halves input[type="month"], -.wy-control-group .wy-form-halves input[type="time"], -.wy-control-group .wy-form-halves input[type="datetime"], -.wy-control-group .wy-form-halves input[type="datetime-local"], -.wy-control-group .wy-form-halves input[type="week"], -.wy-control-group .wy-form-halves input[type="number"], -.wy-control-group .wy-form-halves input[type="search"], -.wy-control-group .wy-form-halves input[type="tel"], -.wy-control-group .wy-form-halves input[type="color"], -.wy-control-group .wy-form-thirds input[type="text"], -.wy-control-group .wy-form-thirds input[type="password"], -.wy-control-group .wy-form-thirds input[type="email"], -.wy-control-group .wy-form-thirds input[type="url"], -.wy-control-group .wy-form-thirds input[type="date"], -.wy-control-group .wy-form-thirds input[type="month"], -.wy-control-group .wy-form-thirds input[type="time"], -.wy-control-group .wy-form-thirds input[type="datetime"], -.wy-control-group .wy-form-thirds input[type="datetime-local"], -.wy-control-group .wy-form-thirds input[type="week"], -.wy-control-group .wy-form-thirds input[type="number"], -.wy-control-group .wy-form-thirds input[type="search"], -.wy-control-group .wy-form-thirds input[type="tel"], -.wy-control-group .wy-form-thirds input[type="color"] { - width: 100% -} - -.wy-control-group .wy-form-full { - float: left; - display: block; - margin-right: 2.3576515979%; - width: 100%; - margin-right: 0 -} - -.wy-control-group .wy-form-full:last-child { - margin-right: 0 -} - -.wy-control-group .wy-form-halves { - float: left; - display: block; - margin-right: 2.3576515979%; - width: 48.821174201% -} - -.wy-control-group .wy-form-halves:last-child { - margin-right: 0 -} - -.wy-control-group .wy-form-halves:nth-of-type(2n) { - margin-right: 0 -} - -.wy-control-group .wy-form-halves:nth-of-type(2n+1) { - clear: left -} - -.wy-control-group .wy-form-thirds { - float: left; - display: block; - margin-right: 2.3576515979%; - width: 31.7615656014% -} - -.wy-control-group .wy-form-thirds:last-child { - margin-right: 0 -} - -.wy-control-group .wy-form-thirds:nth-of-type(3n) { - margin-right: 0 -} - -.wy-control-group .wy-form-thirds:nth-of-type(3n+1) { - clear: left -} - -.wy-control-group.wy-control-group-no-input .wy-control { - margin: 6px 0 0 0; - font-size: 90% -} - -.wy-control-no-input { - display: inline-block; - margin: 6px 0 0 0; - font-size: 90% -} - -.wy-control-group.fluid-input input[type="text"], -.wy-control-group.fluid-input input[type="password"], -.wy-control-group.fluid-input input[type="email"], -.wy-control-group.fluid-input input[type="url"], -.wy-control-group.fluid-input input[type="date"], -.wy-control-group.fluid-input input[type="month"], -.wy-control-group.fluid-input input[type="time"], -.wy-control-group.fluid-input input[type="datetime"], -.wy-control-group.fluid-input input[type="datetime-local"], -.wy-control-group.fluid-input input[type="week"], -.wy-control-group.fluid-input input[type="number"], -.wy-control-group.fluid-input input[type="search"], -.wy-control-group.fluid-input input[type="tel"], -.wy-control-group.fluid-input input[type="color"] { - width: 100% -} - -.wy-form-message-inline { - display: inline-block; - padding-left: .3em; - color: #666; - vertical-align: middle; - font-size: 90% -} - -.wy-form-message { - display: block; - color: #999; - font-size: 70%; - margin-top: .3125em; - font-style: italic -} - -.wy-form-message p { - font-size: inherit; - font-style: italic; - margin-bottom: 6px -} - -.wy-form-message p:last-child { - margin-bottom: 0 -} - -input { - line-height: normal -} - -input[type="button"], -input[type="reset"], -input[type="submit"] { - -webkit-appearance: button; - cursor: pointer; - font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif; - *overflow: visible -} - -input[type="text"], -input[type="password"], -input[type="email"], -input[type="url"], -input[type="date"], -input[type="month"], -input[type="time"], -input[type="datetime"], -input[type="datetime-local"], -input[type="week"], -input[type="number"], -input[type="search"], -input[type="tel"], -input[type="color"] { - -webkit-appearance: none; - padding: 6px; - display: inline-block; - border: 1px solid #ccc; - font-size: 80%; - font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif; - box-shadow: inset 0 1px 3px #ddd; - border-radius: 0; - -webkit-transition: border .3s linear; - -moz-transition: border .3s linear; - transition: border .3s linear -} - -input[type="datetime-local"] { - padding: .34375em .625em -} - -input[disabled] { - cursor: default -} - -input[type="checkbox"], -input[type="radio"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - padding: 0; - margin-right: .3125em; - *height: 13px; - *width: 13px -} - -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box -} - -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none -} - -input[type="text"]:focus, -input[type="password"]:focus, -input[type="email"]:focus, -input[type="url"]:focus, -input[type="date"]:focus, -input[type="month"]:focus, -input[type="time"]:focus, -input[type="datetime"]:focus, -input[type="datetime-local"]:focus, -input[type="week"]:focus, -input[type="number"]:focus, -input[type="search"]:focus, -input[type="tel"]:focus, -input[type="color"]:focus { - outline: 0; - outline: thin dotted \9; - border-color: #333 -} - -input.no-focus:focus { - border-color: #ccc !important -} - -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: thin dotted #333; - outline: 1px auto #129FEA -} - -input[type="text"][disabled], -input[type="password"][disabled], -input[type="email"][disabled], -input[type="url"][disabled], -input[type="date"][disabled], -input[type="month"][disabled], -input[type="time"][disabled], -input[type="datetime"][disabled], -input[type="datetime-local"][disabled], -input[type="week"][disabled], -input[type="number"][disabled], -input[type="search"][disabled], -input[type="tel"][disabled], -input[type="color"][disabled] { - cursor: not-allowed; - background-color: #fafafa -} - -input:focus:invalid, -textarea:focus:invalid, -select:focus:invalid { - color: #E74C3C; - border: 1px solid #E74C3C -} - -input:focus:invalid:focus, -textarea:focus:invalid:focus, -select:focus:invalid:focus { - border-color: #E74C3C -} - -input[type="file"]:focus:invalid:focus, -input[type="radio"]:focus:invalid:focus, -input[type="checkbox"]:focus:invalid:focus { - outline-color: #E74C3C -} - -input.wy-input-large { - padding: 12px; - font-size: 100% -} - -textarea { - overflow: auto; - vertical-align: top; - width: 100%; - font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif -} - -select, -textarea { - padding: .5em .625em; - display: inline-block; - border: 1px solid #ccc; - font-size: 80%; - box-shadow: inset 0 1px 3px #ddd; - -webkit-transition: border .3s linear; - -moz-transition: border .3s linear; - transition: border .3s linear -} - -select { - border: 1px solid #ccc; - background-color: #fff -} - -select[multiple] { - height: auto -} - -select:focus, -textarea:focus { - outline: 0 -} - -select[disabled], -textarea[disabled], -input[readonly], -select[readonly], -textarea[readonly] { - cursor: not-allowed; - background-color: #fafafa -} - -input[type="radio"][disabled], -input[type="checkbox"][disabled] { - cursor: not-allowed -} - -.wy-checkbox, -.wy-radio { - margin: 6px 0; - color: #404040; - display: block -} - -.wy-checkbox input, -.wy-radio input { - vertical-align: baseline -} - -.wy-form-message-inline { - display: inline-block; - *display: inline; - *zoom: 1; - vertical-align: middle -} - -.wy-input-prefix, -.wy-input-suffix { - white-space: nowrap; - padding: 6px -} - -.wy-input-prefix .wy-input-context, -.wy-input-suffix .wy-input-context { - line-height: 27px; - padding: 0 8px; - display: inline-block; - font-size: 80%; - background-color: #f3f6f6; - border: solid 1px #ccc; - color: #999 -} - -.wy-input-suffix .wy-input-context { - border-left: 0 -} - -.wy-input-prefix .wy-input-context { - border-right: 0 -} - -.wy-switch { - position: relative; - display: block; - height: 24px; - margin-top: 12px; - cursor: pointer -} - -.wy-switch:before { - position: absolute; - content: ""; - display: block; - left: 0; - top: 0; - width: 36px; - height: 12px; - border-radius: 4px; - background: #ccc; - -webkit-transition: all .2s ease-in-out; - -moz-transition: all .2s ease-in-out; - transition: all .2s ease-in-out -} - -.wy-switch:after { - position: absolute; - content: ""; - display: block; - width: 18px; - height: 18px; - border-radius: 4px; - background: #999; - left: -3px; - top: -3px; - -webkit-transition: all .2s ease-in-out; - -moz-transition: all .2s ease-in-out; - transition: all .2s ease-in-out -} - -.wy-switch span { - position: absolute; - left: 48px; - display: block; - font-size: 12px; - color: #ccc; - line-height: 1 -} - -.wy-switch.active:before { - background: #1e8449 -} - -.wy-switch.active:after { - left: 24px; - background: #27AE60 -} - -.wy-switch.disabled { - cursor: not-allowed; - opacity: .8 -} - -.wy-control-group.wy-control-group-error .wy-form-message, -.wy-control-group.wy-control-group-error>label { - color: #E74C3C -} - -.wy-control-group.wy-control-group-error input[type="text"], -.wy-control-group.wy-control-group-error input[type="password"], -.wy-control-group.wy-control-group-error input[type="email"], -.wy-control-group.wy-control-group-error input[type="url"], -.wy-control-group.wy-control-group-error input[type="date"], -.wy-control-group.wy-control-group-error input[type="month"], -.wy-control-group.wy-control-group-error input[type="time"], -.wy-control-group.wy-control-group-error input[type="datetime"], -.wy-control-group.wy-control-group-error input[type="datetime-local"], -.wy-control-group.wy-control-group-error input[type="week"], -.wy-control-group.wy-control-group-error input[type="number"], -.wy-control-group.wy-control-group-error input[type="search"], -.wy-control-group.wy-control-group-error input[type="tel"], -.wy-control-group.wy-control-group-error input[type="color"] { - border: solid 1px #E74C3C -} - -.wy-control-group.wy-control-group-error textarea { - border: solid 1px #E74C3C -} - -.wy-inline-validate { - white-space: nowrap -} - -.wy-inline-validate .wy-input-context { - padding: .5em .625em; - display: inline-block; - font-size: 80% -} - -.wy-inline-validate.wy-inline-validate-success .wy-input-context { - color: #27AE60 -} - -.wy-inline-validate.wy-inline-validate-danger .wy-input-context { - color: #E74C3C -} - -.wy-inline-validate.wy-inline-validate-warning .wy-input-context { - color: #E67E22 -} - -.wy-inline-validate.wy-inline-validate-info .wy-input-context { - color: #2980B9 -} - -.rotate-90 { - -webkit-transform: rotate(90deg); - -moz-transform: rotate(90deg); - -ms-transform: rotate(90deg); - -o-transform: rotate(90deg); - transform: rotate(90deg) -} - -.rotate-180 { - -webkit-transform: rotate(180deg); - -moz-transform: rotate(180deg); - -ms-transform: rotate(180deg); - -o-transform: rotate(180deg); - transform: rotate(180deg) -} - -.rotate-270 { - -webkit-transform: rotate(270deg); - -moz-transform: rotate(270deg); - -ms-transform: rotate(270deg); - -o-transform: rotate(270deg); - transform: rotate(270deg) -} - -.mirror { - -webkit-transform: scaleX(-1); - -moz-transform: scaleX(-1); - -ms-transform: scaleX(-1); - -o-transform: scaleX(-1); - transform: scaleX(-1) -} - -.mirror.rotate-90 { - -webkit-transform: scaleX(-1) rotate(90deg); - -moz-transform: scaleX(-1) rotate(90deg); - -ms-transform: scaleX(-1) rotate(90deg); - -o-transform: scaleX(-1) rotate(90deg); - transform: scaleX(-1) rotate(90deg) -} - -.mirror.rotate-180 { - -webkit-transform: scaleX(-1) rotate(180deg); - -moz-transform: scaleX(-1) rotate(180deg); - -ms-transform: scaleX(-1) rotate(180deg); - -o-transform: scaleX(-1) rotate(180deg); - transform: scaleX(-1) rotate(180deg) -} - -.mirror.rotate-270 { - -webkit-transform: scaleX(-1) rotate(270deg); - -moz-transform: scaleX(-1) rotate(270deg); - -ms-transform: scaleX(-1) rotate(270deg); - -o-transform: scaleX(-1) rotate(270deg); - transform: scaleX(-1) rotate(270deg) -} - -@media only screen and (max-width: 480px) { - .wy-form button[type="submit"] { - margin: .7em 0 0 - } - .wy-form input[type="text"], - .wy-form input[type="password"], - .wy-form input[type="email"], - .wy-form input[type="url"], - .wy-form input[type="date"], - .wy-form input[type="month"], - .wy-form input[type="time"], - .wy-form input[type="datetime"], - .wy-form input[type="datetime-local"], - .wy-form input[type="week"], - .wy-form input[type="number"], - .wy-form input[type="search"], - .wy-form input[type="tel"], - .wy-form input[type="color"] { - margin-bottom: .3em; - display: block - } - .wy-form label { - margin-bottom: .3em; - display: block - } - .wy-form input[type="password"], - .wy-form input[type="email"], - .wy-form input[type="url"], - .wy-form input[type="date"], - .wy-form input[type="month"], - .wy-form input[type="time"], - .wy-form input[type="datetime"], - .wy-form input[type="datetime-local"], - .wy-form input[type="week"], - .wy-form input[type="number"], - .wy-form input[type="search"], - .wy-form input[type="tel"], - .wy-form input[type="color"] { - margin-bottom: 0 - } - .wy-form-aligned .wy-control-group label { - margin-bottom: .3em; - text-align: left; - display: block; - width: 100% - } - .wy-form-aligned .wy-control { - margin: 1.5em 0 0 0 - } - .wy-form .wy-help-inline, - .wy-form-message-inline, - .wy-form-message { - display: block; - font-size: 80%; - padding: 6px 0 - } -} - -@media screen and (max-width: 768px) { - .tablet-hide { - display: none - } -} - -@media screen and (max-width: 480px) { - .mobile-hide { - display: none - } -} - -.float-left { - float: left -} - -.float-right { - float: right -} - -.full-width { - width: 100% -} - -.wy-table, -.rst-content table.docutils, -.rst-content table.field-list { - border-collapse: collapse; - border-spacing: 0; - empty-cells: show; - margin-bottom: 24px -} - -.wy-table caption, -.rst-content table.docutils caption, -.rst-content table.field-list caption { - color: #000; - font: italic 85%/1 arial, sans-serif; - padding: 1em 0; - text-align: center -} - -.wy-table td, -.rst-content table.docutils td, -.rst-content table.field-list td, -.wy-table th, -.rst-content table.docutils th, -.rst-content table.field-list th { - font-size: 90%; - margin: 0; - overflow: visible; - padding: 8px 16px -} - -.wy-table td:first-child, -.rst-content table.docutils td:first-child, -.rst-content table.field-list td:first-child, -.wy-table th:first-child, -.rst-content table.docutils th:first-child, -.rst-content table.field-list th:first-child { - border-left-width: 0 -} - -.wy-table thead, -.rst-content table.docutils thead, -.rst-content table.field-list thead { - color: #000; - text-align: left; - vertical-align: bottom; - white-space: nowrap -} - -.wy-table thead th, -.rst-content table.docutils thead th, -.rst-content table.field-list thead th { - font-weight: bold; - border-bottom: solid 2px #e1e4e5 -} - -.wy-table td, -.rst-content table.docutils td, -.rst-content table.field-list td { - background-color: transparent; - vertical-align: middle -} - -.wy-table td p, -.rst-content table.docutils td p, -.rst-content table.field-list td p { - line-height: 18px -} - -.wy-table td p:last-child, -.rst-content table.docutils td p:last-child, -.rst-content table.field-list td p:last-child { - margin-bottom: 0 -} - -.wy-table .wy-table-cell-min, -.rst-content table.docutils .wy-table-cell-min, -.rst-content table.field-list .wy-table-cell-min { - width: 1%; - padding-right: 0 -} - -.wy-table .wy-table-cell-min input[type=checkbox], -.rst-content table.docutils .wy-table-cell-min input[type=checkbox], -.rst-content table.field-list .wy-table-cell-min input[type=checkbox], -.wy-table .wy-table-cell-min input[type=checkbox], -.rst-content table.docutils .wy-table-cell-min input[type=checkbox], -.rst-content table.field-list .wy-table-cell-min input[type=checkbox] { - margin: 0 -} - -.wy-table-secondary { - color: gray; - font-size: 90% -} - -.wy-table-tertiary { - color: gray; - font-size: 80% -} - -.wy-table-odd td, -.wy-table-striped tr:nth-child(2n-1) td, -.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td { - background-color: #f3f6f6 -} - -.wy-table-backed { - background-color: #f3f6f6 -} - -.wy-table-bordered-all, -.rst-content table.docutils { - border: 1px solid #e1e4e5 -} - -.wy-table-bordered-all td, -.rst-content table.docutils td { - border-bottom: 1px solid #e1e4e5; - border-left: 1px solid #e1e4e5 -} - -.wy-table-bordered-all tbody>tr:last-child td, -.rst-content table.docutils tbody>tr:last-child td { - border-bottom-width: 0 -} - -.wy-table-bordered { - border: 1px solid #e1e4e5 -} - -.wy-table-bordered-rows td { - border-bottom: 1px solid #e1e4e5 -} - -.wy-table-bordered-rows tbody>tr:last-child td { - border-bottom-width: 0 -} - -.wy-table-horizontal tbody>tr:last-child td { - border-bottom-width: 0 -} - -.wy-table-horizontal td, -.wy-table-horizontal th { - border-width: 0 0 1px 0; - border-bottom: 1px solid #e1e4e5 -} - -.wy-table-horizontal tbody>tr:last-child td { - border-bottom-width: 0 -} - -.wy-table-responsive { - margin-bottom: 24px; - max-width: 100%; - overflow: auto -} - -.wy-table-responsive table { - margin-bottom: 0 !important -} - -.wy-table-responsive table td, -.wy-table-responsive table th { - white-space: nowrap -} - -a { - color: var(--black); - text-decoration: none; - cursor: pointer -} - -a:hover { - color: var(--black) -} - -a:visited { - color: var(--hgrey) -} - -html { - height: 100%; - overflow-x: hidden -} - -body { - font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif; - font-weight: normal; - color: #404040; - min-height: 100%; - overflow-x: hidden; - background: #edf0f2 -} - -.wy-text-left { - text-align: left -} - -.wy-text-center { - text-align: center -} - -.wy-text-right { - text-align: right -} - -.wy-text-large { - font-size: 120% -} - -.wy-text-normal { - font-size: 100% -} - -.wy-text-small, -small { - font-size: 80% -} - -.wy-text-strike { - text-decoration: line-through -} - -.wy-text-warning { - color: #E67E22 !important -} - -a.wy-text-warning:hover { - color: #eb9950 !important -} - -.wy-text-info { - color: #2980B9 !important -} - -a.wy-text-info:hover { - color: #409ad5 !important -} - -.wy-text-success { - color: #27AE60 !important -} - -a.wy-text-success:hover { - color: #36d278 !important -} - -.wy-text-danger { - color: #E74C3C !important -} - -a.wy-text-danger:hover { - color: #ed7669 !important -} - -.wy-text-neutral { - color: #404040 !important -} - -a.wy-text-neutral:hover { - color: #595959 !important -} - -h1, -h2, -.rst-content .toctree-wrapper p.caption, -h3, -h4, -h5, -h6, -legend { - margin-top: 0; - font-weight: 700; - font-family: "Roboto Slab", "ff-tisa-web-pro", "Georgia", Arial, sans-serif -} - -p { - line-height: 24px; - margin: 0; - font-size: 16px; - margin-bottom: 24px -} - -h1 { - font-size: 175% -} - -h2, -.rst-content .toctree-wrapper p.caption { - font-size: 150% -} - -h3 { - font-size: 125% -} - -h4 { - font-size: 115% -} - -h5 { - font-size: 110% -} - -h6 { - font-size: 100% -} - -hr { - display: block; - height: 1px; - border: 0; - border-top: 1px solid #e1e4e5; - margin: 24px 0; - padding: 0 -} - -code, -.rst-content tt, -.rst-content code { - white-space: nowrap; - max-width: 100%; - background: none; - border: none; - font-size: 85%; - padding: 0 5px; - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace; - color: #E74C3C; - overflow-x: auto -} - -code.code-large, -.rst-content tt.code-large { - font-size: 90% -} - -.wy-plain-list-disc, -.rst-content .section ul, -.rst-content .toctree-wrapper ul, -article ul { - list-style: disc; - line-height: 24px; - margin-bottom: 24px -} - -.wy-plain-list-disc li, -.rst-content .section ul li, -.rst-content .toctree-wrapper ul li, -article ul li { - list-style: disc; - margin-left: 24px -} - -.wy-plain-list-disc li p:last-child, -.rst-content .section ul li p:last-child, -.rst-content .toctree-wrapper ul li p:last-child, -article ul li p:last-child { - margin-bottom: 0 -} - -.wy-plain-list-disc li ul, -.rst-content .section ul li ul, -.rst-content .toctree-wrapper ul li ul, -article ul li ul { - margin-bottom: 0 -} - -.wy-plain-list-disc li li, -.rst-content .section ul li li, -.rst-content .toctree-wrapper ul li li, -article ul li li { - list-style: circle -} - -.wy-plain-list-disc li li li, -.rst-content .section ul li li li, -.rst-content .toctree-wrapper ul li li li, -article ul li li li { - list-style: square -} - -.wy-plain-list-disc li ol li, -.rst-content .section ul li ol li, -.rst-content .toctree-wrapper ul li ol li, -article ul li ol li { - list-style: decimal -} - -.wy-plain-list-decimal, -.rst-content .section ol, -.rst-content ol.arabic, -article ol { - list-style: decimal; - line-height: 24px; - margin-bottom: 24px -} - -.wy-plain-list-decimal li, -.rst-content .section ol li, -.rst-content ol.arabic li, -article ol li { - list-style: decimal; - margin-left: 24px -} - -.wy-plain-list-decimal li p:last-child, -.rst-content .section ol li p:last-child, -.rst-content ol.arabic li p:last-child, -article ol li p:last-child { - margin-bottom: 0 -} - -.wy-plain-list-decimal li ul, -.rst-content .section ol li ul, -.rst-content ol.arabic li ul, -article ol li ul { - margin-bottom: 0 -} - -.wy-plain-list-decimal li ul li, -.rst-content .section ol li ul li, -.rst-content ol.arabic li ul li, -article ol li ul li { - list-style: disc -} - -.wy-breadcrumbs { - *zoom: 1 -} - -.wy-breadcrumbs:before, -.wy-breadcrumbs:after { - display: table; - content: "" -} - -.wy-breadcrumbs:after { - clear: both -} - -.wy-breadcrumbs li { - display: inline-block -} - -.wy-breadcrumbs li.wy-breadcrumbs-aside { - float: right -} - -.wy-breadcrumbs li a { - display: inline-block; - padding: 5px -} - -.wy-breadcrumbs li a:first-child { - padding-left: 0 -} - -.wy-breadcrumbs li code, -.wy-breadcrumbs li .rst-content tt, -.rst-content .wy-breadcrumbs li tt { - padding: 5px; - border: none; - background: none -} - -.wy-breadcrumbs li code.literal, -.wy-breadcrumbs li .rst-content tt.literal, -.rst-content .wy-breadcrumbs li tt.literal { - color: #404040 -} - -.wy-breadcrumbs-extra { - margin-bottom: 0; - color: #b3b3b3; - font-size: 80%; - display: inline-block -} - -@media screen and (max-width: 480px) { - .wy-breadcrumbs-extra { - display: none - } - .wy-breadcrumbs li.wy-breadcrumbs-aside { - display: none - } -} - -@media print { - .wy-breadcrumbs li.wy-breadcrumbs-aside { - display: none - } -} - -html { - font-size: 16px -} - -.wy-affix { - position: fixed; - top: 1.618em -} - -.wy-menu a:hover { - text-decoration: none -} - -.wy-menu-horiz { - *zoom: 1 -} - -.wy-menu-horiz:before, -.wy-menu-horiz:after { - display: table; - content: "" -} - -.wy-menu-horiz:after { - clear: both -} - -.wy-menu-horiz ul, -.wy-menu-horiz li { - display: inline-block -} - -.wy-menu-horiz li:hover { - background: rgba(255, 255, 255, 0.1) -} - -.wy-menu-horiz li.divide-left { - border-left: solid 1px #404040 -} - -.wy-menu-horiz li.divide-right { - border-right: solid 1px #404040 -} - -.wy-menu-horiz a { - height: 32px; - display: inline-block; - line-height: 32px; - padding: 0 16px -} - -.wy-menu-vertical { - width: 300px -} - -.wy-menu-vertical header, -.wy-menu-vertical p.caption { - color: var(--hgrey); - height: 32px; - display: inline-block; - line-height: 32px; - padding: 0 1.618em; - margin: 12px 0 0 0; - display: block; - font-weight: bold; - text-transform: uppercase; - font-size: 85%; - white-space: nowrap -} - -.wy-menu-vertical ul { - margin-bottom: 0 -} - -.wy-menu-vertical li.divide-top { - border-top: solid 1px #404040 -} - -.wy-menu-vertical li.divide-bottom { - border-bottom: solid 1px #404040 -} - -.wy-menu-vertical li.current { - background: #e3e3e3 -} - -.wy-menu-vertical li.current a { - color: gray; - border-right: solid 1px #c9c9c9; - padding: .4045em 2.427em -} - -.wy-menu-vertical li.current a:hover { - background: #d6d6d6 -} - -.wy-menu-vertical li code, -.wy-menu-vertical li .rst-content tt, -.rst-content .wy-menu-vertical li tt { - border: none; - background: inherit; - color: inherit; - padding-left: 0; - padding-right: 0 -} - -.wy-menu-vertical li span.toctree-expand { - display: block; - float: left; - margin-left: -1.2em; - font-size: .8em; - line-height: 1.6em; - color: #4d4d4d -} - -.wy-menu-vertical li.on a, -.wy-menu-vertical li.current>a { - color: #404040; - padding: .4045em 1.618em; - font-weight: bold; - position: relative; - background: #fcfcfc; - border: none; - padding-left: 1.618em -4px -} - -.wy-menu-vertical li.on a:hover, -.wy-menu-vertical li.current>a:hover { - background: #fcfcfc -} - -.wy-menu-vertical li.on a:hover span.toctree-expand, -.wy-menu-vertical li.current>a:hover span.toctree-expand { - color: gray -} - -.wy-menu-vertical li.on a span.toctree-expand, -.wy-menu-vertical li.current>a span.toctree-expand { - display: block; - font-size: .8em; - line-height: 1.6em; - color: #333 -} - -.wy-menu-vertical li.toctree-l1.current>a { - border-bottom: solid 1px #c9c9c9; - border-top: solid 1px #c9c9c9 -} - -.wy-menu-vertical li.toctree-l2 a, -.wy-menu-vertical li.toctree-l3 a, -.wy-menu-vertical li.toctree-l4 a { - color: #404040 -} - -.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul, -.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul { - display: none -} - -.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul, -.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul { - display: block -} - -.wy-menu-vertical li.toctree-l2.current>a { - background: #c9c9c9; - padding: .4045em 2.427em -} - -.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a { - display: block; - background: #c9c9c9; - padding: .4045em 4.045em -} - -.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand { - color: gray -} - -.wy-menu-vertical li.toctree-l2 span.toctree-expand { - color: #a3a3a3 -} - -.wy-menu-vertical li.toctree-l3 { - font-size: .9em -} - -.wy-menu-vertical li.toctree-l3.current>a { - background: #bdbdbd; - padding: .4045em 4.045em -} - -.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a { - display: block; - background: #bdbdbd; - padding: .4045em 5.663em -} - -.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand { - color: gray -} - -.wy-menu-vertical li.toctree-l3 span.toctree-expand { - color: #969696 -} - -.wy-menu-vertical li.toctree-l4 { - font-size: .9em -} - -.wy-menu-vertical li.current ul { - display: block -} - -.wy-menu-vertical li ul { - margin-bottom: 0; - display: none -} - -.wy-menu-vertical li ul li a { - margin-bottom: 0; - color: #d9d9d9; - font-weight: normal -} - -.wy-menu-vertical a { - display: inline-block; - line-height: 18px; - padding: .4045em 1.618em; - display: block; - position: relative; - font-size: 90%; - color: var(--hgrey) -} - -.wy-menu-vertical a:hover { - background-color: #d6d6d6; - cursor: pointer -} - -.wy-menu-vertical a:hover span.toctree-expand { - color: #d9d9d9 -} - -.wy-menu-vertical a:active { - background-color: #2980B9; - cursor: pointer; - color: #fff -} - -.wy-menu-vertical a:active span.toctree-expand { - color: #fff -} - -.wy-side-nav-search { - display: block; - width: 300px; - padding: .809em; - margin-bottom: .809em; - z-index: 200; - background-color: #2980B9; - text-align: center; - padding: .809em; - display: block; - color: #fcfcfc; - margin-bottom: .809em -} - -.wy-side-nav-search input[type=text] { - width: 100%; - border-radius: 50px; - padding: 6px 12px; - border-color: #2472a4 -} - -.wy-side-nav-search img { - display: block; - margin: auto auto .809em auto; - height: 45px; - width: 45px; - background-color: #2980B9; - padding: 5px; - border-radius: 100% -} - -.wy-side-nav-search>a, -.wy-side-nav-search .wy-dropdown>a { - color: #fcfcfc; - font-size: 100%; - font-weight: bold; - display: inline-block; - padding: 4px 6px; - margin-bottom: .809em -} - -.wy-side-nav-search>a:hover, -.wy-side-nav-search .wy-dropdown>a:hover { - background: rgba(255, 255, 255, 0.1) -} - -.wy-side-nav-search>a img.logo, -.wy-side-nav-search .wy-dropdown>a img.logo { - display: block; - margin: 0 auto; - height: auto; - width: auto; - border-radius: 0; - max-width: 100%; - background: transparent -} - -.wy-side-nav-search>a.icon img.logo, -.wy-side-nav-search .wy-dropdown>a.icon img.logo { - margin-top: .85em -} - -.wy-side-nav-search>div.version { - margin-top: -.4045em; - margin-bottom: .809em; - font-weight: normal; - color: rgba(255, 255, 255, 0.3) -} - -.wy-nav .wy-menu-vertical header { - color: #2980B9 -} - -.wy-nav .wy-menu-vertical a { - color: #b3b3b3 -} - -.wy-nav .wy-menu-vertical a:hover { - background-color: #2980B9; - color: #fff -} - -[data-menu-wrap] { - -webkit-transition: all .2s ease-in; - -moz-transition: all .2s ease-in; - transition: all .2s ease-in; - position: absolute; - opacity: 1; - width: 100%; - opacity: 0 -} - -[data-menu-wrap].move-center { - left: 0; - right: auto; - opacity: 1 -} - -[data-menu-wrap].move-left { - right: auto; - left: -100%; - opacity: 0 -} - -[data-menu-wrap].move-right { - right: -100%; - left: auto; - opacity: 0 -} - -.wy-body-for-nav { - background: #fcfcfc -} - -.wy-grid-for-nav { - position: absolute; - width: 100%; - height: 100% -} - -.wy-nav-side { - position: fixed; - top: 0; - bottom: 0; - left: 0; - padding-bottom: 2em; - width: 300px; - overflow-x: hidden; - overflow-y: hidden; - min-height: 100%; - color: var(--hgrey); - background: none; - z-index: 200 -} - -.wy-side-scroll { - width: 320px; - position: relative; - overflow-x: hidden; - overflow-y: scroll; - height: 100% -} - -.wy-nav-top { - display: none; - background: #2980B9; - color: #fff; - padding: .4045em .809em; - position: relative; - line-height: 50px; - text-align: center; - font-size: 100%; - *zoom: 1 -} - -.wy-nav-top:before, -.wy-nav-top:after { - display: table; - content: "" -} - -.wy-nav-top:after { - clear: both -} - -.wy-nav-top a { - color: #fff; - font-weight: bold -} - -.wy-nav-top img { - margin-right: 12px; - height: 45px; - width: 45px; - background-color: #2980B9; - padding: 5px; - border-radius: 100% -} - -.wy-nav-top i { - font-size: 30px; - float: left; - cursor: pointer; - padding-top: inherit -} - -.wy-nav-content-wrap { - margin-left: 300px; - background: #fcfcfc; - min-height: 100% -} - -.wy-nav-content { - padding: 1.618em 3.236em; - height: 100%; - max-width: 800px; - margin: auto -} - -.wy-body-mask { - position: fixed; - width: 100%; - height: 100%; - background: rgba(0, 0, 0, 0.2); - display: none; - z-index: 499 -} - -.wy-body-mask.on { - display: block -} - -footer { - color: gray -} - -footer p { - margin-bottom: 12px -} - -footer span.commit code, -footer span.commit .rst-content tt, -.rst-content footer span.commit tt { - padding: 0px; - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace; - font-size: 1em; - background: none; - border: none; - color: gray -} - -.rst-footer-buttons { - *zoom: 1 -} - -.rst-footer-buttons:before, -.rst-footer-buttons:after { - width: 100% -} - -.rst-footer-buttons:before, -.rst-footer-buttons:after { - display: table; - content: "" -} - -.rst-footer-buttons:after { - clear: both -} - -.rst-breadcrumbs-buttons { - margin-top: 12px; - *zoom: 1 -} - -.rst-breadcrumbs-buttons:before, -.rst-breadcrumbs-buttons:after { - display: table; - content: "" -} - -.rst-breadcrumbs-buttons:after { - clear: both -} - -#search-results .search li { - margin-bottom: 24px; - border-bottom: solid 1px #e1e4e5; - padding-bottom: 24px -} - -#search-results .search li:first-child { - border-top: solid 1px #e1e4e5; - padding-top: 24px -} - -#search-results .search li a { - font-size: 120%; - margin-bottom: 12px; - display: inline-block -} - -#search-results .context { - color: gray; - font-size: 90% -} - -.genindextable li>ul { - margin-left: 24px -} - -@media screen and (max-width: 768px) { - .wy-body-for-nav { - background: #fcfcfc - } - .wy-nav-top { - display: block - } - .wy-nav-side { - left: -300px - } - .wy-nav-side.shift { - width: 85%; - left: 0 - } - .wy-side-scroll { - width: auto - } - .wy-side-nav-search { - width: auto - } - .wy-menu.wy-menu-vertical { - width: auto - } - .wy-nav-content-wrap { - margin-left: 0 - } - .wy-nav-content-wrap .wy-nav-content { - padding: 1.618em - } - .wy-nav-content-wrap.shift { - position: fixed; - min-width: 100%; - left: 85%; - top: 0; - height: 100%; - overflow: hidden - } -} - -@media screen and (min-width: 1100px) { - .wy-nav-content-wrap { - background: rgba(0, 0, 0, 0.05) - } - .wy-nav-content { - margin: 0; - background: #fcfcfc - } -} - -@media print { - .rst-versions, - footer, - .wy-nav-side { - display: none - } - .wy-nav-content-wrap { - margin-left: 0 - } -} - -.rst-versions { - position: fixed; - bottom: 0; - left: 0; - width: 300px; - color: #fcfcfc; - background: #1f1d1d; - font-family: "Lato", "proxima-nova", "Helvetica Neue", Arial, sans-serif; - z-index: 400 -} - -.rst-versions a { - color: #2980B9; - text-decoration: none -} - -.rst-versions .rst-badge-small { - display: none -} - -.rst-versions .rst-current-version { - padding: 12px; - background-color: #272525; - display: block; - text-align: right; - font-size: 90%; - cursor: pointer; - color: #27AE60; - *zoom: 1 -} - -.rst-versions .rst-current-version:before, -.rst-versions .rst-current-version:after { - display: table; - content: "" -} - -.rst-versions .rst-current-version:after { - clear: both -} - -.rst-versions .rst-current-version .fa, -.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand, -.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand, -.rst-versions .rst-current-version .rst-content .admonition-title, -.rst-content .rst-versions .rst-current-version .admonition-title, -.rst-versions .rst-current-version .rst-content h1 .headerlink, -.rst-content h1 .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content h2 .headerlink, -.rst-content h2 .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content h3 .headerlink, -.rst-content h3 .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content h4 .headerlink, -.rst-content h4 .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content h5 .headerlink, -.rst-content h5 .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content h6 .headerlink, -.rst-content h6 .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content dl dt .headerlink, -.rst-content dl dt .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content p.caption .headerlink, -.rst-content p.caption .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content table>caption .headerlink, -.rst-content table>caption .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink, -.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink, -.rst-versions .rst-current-version .rst-content tt.download span:first-child, -.rst-content tt.download .rst-versions .rst-current-version span:first-child, -.rst-versions .rst-current-version .rst-content code.download span:first-child, -.rst-content code.download .rst-versions .rst-current-version span:first-child, -.rst-versions .rst-current-version .icon { - color: #fcfcfc -} - -.rst-versions .rst-current-version .fa-book, -.rst-versions .rst-current-version .icon-book { - float: left -} - -.rst-versions .rst-current-version .icon-book { - float: left -} - -.rst-versions .rst-current-version.rst-out-of-date { - background-color: #E74C3C; - color: #fff -} - -.rst-versions .rst-current-version.rst-active-old-version { - background-color: #F1C40F; - color: #000 -} - -.rst-versions.shift-up { - height: auto; - max-height: 100%; - overflow-y: scroll -} - -.rst-versions.shift-up .rst-other-versions { - display: block -} - -.rst-versions .rst-other-versions { - font-size: 90%; - padding: 12px; - color: gray; - display: none -} - -.rst-versions .rst-other-versions hr { - display: block; - height: 1px; - border: 0; - margin: 20px 0; - padding: 0; - border-top: solid 1px #413d3d -} - -.rst-versions .rst-other-versions dd { - display: inline-block; - margin: 0 -} - -.rst-versions .rst-other-versions dd a { - display: inline-block; - padding: 6px; - color: #fcfcfc -} - -.rst-versions.rst-badge { - width: auto; - bottom: 20px; - right: 20px; - left: auto; - border: none; - max-width: 300px; - max-height: 90% -} - -.rst-versions.rst-badge .icon-book { - float: none -} - -.rst-versions.rst-badge .fa-book, -.rst-versions.rst-badge .icon-book { - float: none -} - -.rst-versions.rst-badge.shift-up .rst-current-version { - text-align: right -} - -.rst-versions.rst-badge.shift-up .rst-current-version .fa-book, -.rst-versions.rst-badge.shift-up .rst-current-version .icon-book { - float: left -} - -.rst-versions.rst-badge.shift-up .rst-current-version .icon-book { - float: left -} - -.rst-versions.rst-badge .rst-current-version { - width: auto; - height: 30px; - line-height: 30px; - padding: 0 6px; - display: block; - text-align: center -} - -@media screen and (max-width: 768px) { - .rst-versions { - width: 85%; - display: none - } - .rst-versions.shift { - display: block - } -} - -.rst-content img { - max-width: 100%; - height: auto -} - -.rst-content div.figure { - margin-bottom: 24px -} - -.rst-content div.figure p.caption { - font-style: italic -} - -.rst-content div.figure p:last-child.caption { - margin-bottom: 0px -} - -.rst-content div.figure.align-center { - text-align: center -} - -.rst-content .section>img, -.rst-content .section>a>img { - margin-bottom: 24px -} - -.rst-content abbr[title] { - text-decoration: none -} - -.rst-content.style-external-links a.reference.external:after { - font-family: FontAwesome; - content: ""; - color: #b3b3b3; - vertical-align: super; - font-size: 60%; - margin: 0 .2em -} - -.rst-content blockquote { - margin-left: 24px; - line-height: 24px; - margin-bottom: 24px -} - -.rst-content pre.literal-block { - white-space: pre; - margin: 0; - padding: 12px 12px; - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace; - display: block; - overflow: auto -} - -.rst-content pre.literal-block, -.rst-content div[class^='highlight'] { - border: 1px solid #e1e4e5; - overflow-x: auto; - margin: 1px 0 24px 0 -} - -.rst-content pre.literal-block div[class^='highlight'], -.rst-content div[class^='highlight'] div[class^='highlight'] { - padding: 0px; - border: none; - background: var(--hgrey-light); - margin: 0 -} - -.rst-content div[class^='highlight'] td.code { - width: 100% -} - -.rst-content .linenodiv pre { - border-right: solid 1px #e6e9ea; - margin: 0; - padding: 12px 12px; - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace; - user-select: none; - pointer-events: none -} - -.rst-content div[class^='highlight'] pre { - white-space: pre; - margin: 0; - padding: 12px 12px; - display: block; - overflow: auto -} - -.rst-content div[class^='highlight'] pre .hll { - display: block; - margin: 0 -12px; - padding: 0 12px -} - -.rst-content pre.literal-block, -.rst-content div[class^='highlight'] pre, -.rst-content .linenodiv pre { - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace; - font-size: 12px; - line-height: 1.4 -} - -.rst-content .code-block-caption { - font-style: italic; - font-size: 85%; - line-height: 1; - padding: 1em 0; - text-align: center -} - -@media print { - .rst-content .codeblock, - .rst-content div[class^='highlight'], - .rst-content div[class^='highlight'] pre { - white-space: pre-wrap - } -} - -.rst-content .note .last, -.rst-content .attention .last, -.rst-content .caution .last, -.rst-content .danger .last, -.rst-content .error .last, -.rst-content .hint .last, -.rst-content .important .last, -.rst-content .tip .last, -.rst-content .warning .last, -.rst-content .seealso .last, -.rst-content .admonition-todo .last, -.rst-content .admonition .last { - margin-bottom: 0 -} - -.rst-content .admonition-title:before { - margin-right: 4px -} - -.rst-content .admonition table { - border-color: rgba(0, 0, 0, 0.1) -} - -.rst-content .admonition table td, -.rst-content .admonition table th { - background: transparent !important; - border-color: rgba(0, 0, 0, 0.1) !important -} - -.rst-content .section ol.loweralpha, -.rst-content .section ol.loweralpha li { - list-style: lower-alpha -} - -.rst-content .section ol.upperalpha, -.rst-content .section ol.upperalpha li { - list-style: upper-alpha -} - -.rst-content .section ol p, -.rst-content .section ul p { - margin-bottom: 2px -} - -.rst-content .section ol p:last-child, -.rst-content .section ul p:last-child { - margin-bottom: 24px -} - -.rst-content .line-block { - margin-left: 0px; - margin-bottom: 24px; - line-height: 24px -} - -.rst-content .line-block .line-block { - margin-left: 24px; - margin-bottom: 0px -} - -.rst-content .topic-title { - font-weight: bold; - margin-bottom: 12px -} - -.rst-content .toc-backref { - color: #404040 -} - -.rst-content .align-right { - float: right; - margin: 0px 0px 24px 24px -} - -.rst-content .align-left { - float: left; - margin: 0px 24px 24px 0px -} - -.rst-content .align-center { - margin: auto -} - -.rst-content .align-center:not(table) { - display: block -} - -.rst-content h1 .headerlink, -.rst-content h2 .headerlink, -.rst-content .toctree-wrapper p.caption .headerlink, -.rst-content h3 .headerlink, -.rst-content h4 .headerlink, -.rst-content h5 .headerlink, -.rst-content h6 .headerlink, -.rst-content dl dt .headerlink, -.rst-content p.caption .headerlink, -.rst-content table>caption .headerlink, -.rst-content .code-block-caption .headerlink { - visibility: hidden; - font-size: 14px -} - -.rst-content h1 .headerlink:after, -.rst-content h2 .headerlink:after, -.rst-content .toctree-wrapper p.caption .headerlink:after, -.rst-content h3 .headerlink:after, -.rst-content h4 .headerlink:after, -.rst-content h5 .headerlink:after, -.rst-content h6 .headerlink:after, -.rst-content dl dt .headerlink:after, -.rst-content p.caption .headerlink:after, -.rst-content table>caption .headerlink:after, -.rst-content .code-block-caption .headerlink:after { - content: ""; - font-family: FontAwesome -} - -.rst-content h1:hover .headerlink:after, -.rst-content h2:hover .headerlink:after, -.rst-content .toctree-wrapper p.caption:hover .headerlink:after, -.rst-content h3:hover .headerlink:after, -.rst-content h4:hover .headerlink:after, -.rst-content h5:hover .headerlink:after, -.rst-content h6:hover .headerlink:after, -.rst-content dl dt:hover .headerlink:after, -.rst-content p.caption:hover .headerlink:after, -.rst-content table>caption:hover .headerlink:after, -.rst-content .code-block-caption:hover .headerlink:after { - visibility: visible -} - -.rst-content table>caption .headerlink:after { - font-size: 12px -} - -.rst-content .centered { - text-align: center -} - -.rst-content .sidebar { - float: right; - width: 40%; - display: block; - margin: 0 0 24px 24px; - padding: 24px; - background: #f3f6f6; - border: solid 1px #e1e4e5 -} - -.rst-content .sidebar p, -.rst-content .sidebar ul, -.rst-content .sidebar dl { - font-size: 90% -} - -.rst-content .sidebar .last { - margin-bottom: 0 -} - -.rst-content .sidebar .sidebar-title { - display: block; - font-family: "Roboto Slab", "ff-tisa-web-pro", "Georgia", Arial, sans-serif; - font-weight: bold; - background: #e1e4e5; - padding: 6px 12px; - margin: -24px; - margin-bottom: 24px; - font-size: 100% -} - -.rst-content .highlighted { - background: #F1C40F; - display: inline-block; - font-weight: bold; - padding: 0 6px -} - -.highlight { - background: var(--hgrey-light) -} - -.rst-content .footnote-reference, -.rst-content .citation-reference { - vertical-align: baseline; - position: relative; - top: -0.4em; - line-height: 0; - font-size: 90% -} - -.rst-content table.docutils.citation, -.rst-content table.docutils.footnote { - background: none; - border: none; - color: gray -} - -.rst-content table.docutils.citation td, -.rst-content table.docutils.citation tr, -.rst-content table.docutils.footnote td, -.rst-content table.docutils.footnote tr { - border: none; - background-color: transparent !important; - white-space: normal -} - -.rst-content table.docutils.citation td.label, -.rst-content table.docutils.footnote td.label { - padding-left: 0; - padding-right: 0; - vertical-align: top -} - -.rst-content table.docutils.citation tt, -.rst-content table.docutils.citation code, -.rst-content table.docutils.footnote tt, -.rst-content table.docutils.footnote code { - color: #555 -} - -.rst-content .wy-table-responsive.citation, -.rst-content .wy-table-responsive.footnote { - margin-bottom: 0 -} - -.rst-content .wy-table-responsive.citation+:not(.citation), -.rst-content .wy-table-responsive.footnote+:not(.footnote) { - margin-top: 24px -} - -.rst-content .wy-table-responsive.citation:last-child, -.rst-content .wy-table-responsive.footnote:last-child { - margin-bottom: 24px -} - -.rst-content table.docutils th { - border-color: #e1e4e5 -} - -.rst-content table.docutils td .last, -.rst-content table.docutils td .last :last-child { - margin-bottom: 0 -} - -.rst-content table.field-list { - border: none -} - -.rst-content table.field-list td { - border: none -} - -.rst-content table.field-list td p { - font-size: inherit; - line-height: inherit -} - -.rst-content table.field-list td>strong { - display: inline-block -} - -.rst-content table.field-list .field-name { - padding-right: 10px; - text-align: left; - white-space: nowrap -} - -.rst-content table.field-list .field-body { - text-align: left -} - -.rst-content tt, -.rst-content tt, -.rst-content code { - color: #000; - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace; - padding: 1px 1px -} - -.rst-content tt big, -.rst-content tt em, -.rst-content tt big, -.rst-content code big, -.rst-content tt em, -.rst-content code em { - font-size: 100% !important; - line-height: normal -} - -.rst-content tt.literal, -.rst-content tt.literal, -.rst-content code.literal { - font-weight: normal; - color: #000000 -} - -.rst-content tt.xref, -a .rst-content tt, -.rst-content tt.xref, -.rst-content code.xref, -a .rst-content tt, -a .rst-content code { - font-weight: bold; - color: #404040 -} -.orangemarker { - color:white; - background:var(--horange); -} -.greymarker { - color:white; - background:var(--hgrey); -} -.bluemarker { - color:white; - background:var(--hblue); -} -.rst-content pre, -.rst-content kbd, -.rst-content samp { - font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", Courier, monospace -} - -.rst-content a tt, -.rst-content a tt, -.rst-content a code { - color: #2980B9 -} - -.rst-content dl { - margin-bottom: 24px -} - -.rst-content dl dt { - font-weight: bold; - margin-bottom: 12px -} - -.rst-content dl p, -.rst-content dl table{ - margin-bottom: 12px !important -} - -.rst-content dl ul, -.rst-content dl ol, - .rst-content dl li { - margin-top: 3px !important; - margin-bottom: 3px !important -} -.rst-content dl ul p, -.rst-content dl ol p, - .rst-content dl li p { - margin-top: 3px !important; - margin-bottom: 3px !important -} - -.rst-content dl ul blockquote, -.rst-content dl ol blockquote, - .rst-content dl li blockquote { - margin-bottom: 3px !important; - margin-left: 12px !important - -} - -.rst-content dl dd { - margin: 0 0 12px 24px; - line-height: 24px -} - -.rst-content dl:not(.docutils) { - margin-bottom: 24px -} - -.rst-content dl:not(.docutils) dt { - display: table; - margin: 6px 0; - font-size: 90%; - line-height: normal; - background: none; - border: none; - border-top: solid 3px var(--hgrey-light); - color: var(--horgane); - padding: 6px; - position: relative -} - -.rst-content dl:not(.docutils) dt:before { - color: var(--hgrey) -} - -.rst-content dl:not(.docutils) dt .headerlink { - color: #404040; - font-size: 100% !important -} - -.rst-content dl:not(.docutils) dl dt { - margin-bottom: 6px; - border: none; - border-left: solid 3px var(--horange); - background: var(--hgrey-light); - color: var(--black) -} - -.rst-content dl:not(.docutils) dl dt .headerlink { - color: #404040; - font-size: 100% !important -} - -.rst-content dl:not(.docutils) dt:first-child { - margin-top: 0 -} - -.rst-content dl:not(.docutils) tt, -.rst-content dl:not(.docutils) tt, -.rst-content dl:not(.docutils) code { - color: var(--black); - font-weight: bold -} - -.rst-content dl:not(.docutils) tt.descname, -.rst-content dl:not(.docutils) tt.descclassname, -.rst-content dl:not(.docutils) tt.descname, -.rst-content dl:not(.docutils) code.descname, -.rst-content dl:not(.docutils) tt.descclassname, -.rst-content dl:not(.docutils) code.descclassname { - background-color: transparent; - border: none; - padding: 0; - font-size: 100% !important -} - -.rst-content dl:not(.docutils) tt.descname, -.rst-content dl:not(.docutils) tt.descname, -.rst-content dl:not(.docutils) code.descname { - padding-right: 2px; - font-weight: bold -} - -.rst-content dl:not(.docutils) .optional { - display: inline-block; - padding: 0 4px; - color: #000; - font-weight: bold -} - -.rst-content dl:not(.docutils) .property { - display: inline-block; - color: var(--horange); - text-transform: uppercase; - padding-right: 8px -} - -.rst-content .viewcode-link, -.rst-content .viewcode-back { - display: inline-block; - color: #27AE60; - font-size: 80%; - padding-left: 24px -} - -.rst-content .viewcode-back { - display: block; - float: right -} - -.rst-content p.rubric { - margin-bottom: 12px; - font-weight: bold -} - -.rst-content tt.download, -.rst-content code.download { - background: inherit; - padding: inherit; - font-weight: normal; - font-family: inherit; - font-size: inherit; - color: inherit; - border: inherit; - white-space: inherit -} - -.rst-content tt.download span:first-child, -.rst-content code.download span:first-child { - -webkit-font-smoothing: subpixel-antialiased -} - -.rst-content tt.download span:first-child:before, -.rst-content code.download span:first-child:before { - margin-right: 4px -} - -.rst-content .guilabel { - border: 1px solid #7fbbe3; - background: #e7f2fa; - font-size: 80%; - font-weight: 700; - border-radius: 4px; - padding: 2.4px 6px; - margin: auto 2px -} - -.rst-content .versionmodified { - font-style: italic -} - -@media screen and (max-width: 480px) { - .rst-content .sidebar { - width: 100% - } -} - -span[id*='MathJax-Span'] { - color: #404040 -} - -.math { - text-align: center -} - -.keep-us-sustainable { - background-color: var(--horange); -} - -@font-face { - font-family: "Lato"; - src: url("../fonts/Lato/lato-regular.eot"); - src: url("../fonts/Lato/lato-regular.eot?#iefix") format("embedded-opentype"), url("../fonts/Lato/lato-regular.woff2") format("woff2"), url("../fonts/Lato/lato-regular.woff") format("woff"), url("../fonts/Lato/lato-regular.ttf") format("truetype"); - font-weight: 400; - font-style: normal -} - -@font-face { - font-family: "Lato"; - src: url("../fonts/Lato/lato-bold.eot"); - src: url("../fonts/Lato/lato-bold.eot?#iefix") format("embedded-opentype"), url("../fonts/Lato/lato-bold.woff2") format("woff2"), url("../fonts/Lato/lato-bold.woff") format("woff"), url("../fonts/Lato/lato-bold.ttf") format("truetype"); - font-weight: 700; - font-style: normal -} - -@font-face { - font-family: "Lato"; - src: url("../fonts/Lato/lato-bolditalic.eot"); - src: url("../fonts/Lato/lato-bolditalic.eot?#iefix") format("embedded-opentype"), url("../fonts/Lato/lato-bolditalic.woff2") format("woff2"), url("../fonts/Lato/lato-bolditalic.woff") format("woff"), url("../fonts/Lato/lato-bolditalic.ttf") format("truetype"); - font-weight: 700; - font-style: italic -} - -@font-face { - font-family: "Lato"; - src: url("../fonts/Lato/lato-italic.eot"); - src: url("../fonts/Lato/lato-italic.eot?#iefix") format("embedded-opentype"), url("../fonts/Lato/lato-italic.woff2") format("woff2"), url("../fonts/Lato/lato-italic.woff") format("woff"), url("../fonts/Lato/lato-italic.ttf") format("truetype"); - font-weight: 400; - font-style: italic -} - -@font-face { - font-family: "Roboto Slab"; - font-style: normal; - font-weight: 400; - src: url("../fonts/RobotoSlab/roboto-slab.eot"); - src: url("../fonts/RobotoSlab/roboto-slab-v7-regular.eot?#iefix") format("embedded-opentype"), url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff2") format("woff2"), url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff") format("woff"), url("../fonts/RobotoSlab/roboto-slab-v7-regular.ttf") format("truetype") -} - -@font-face { - font-family: "Roboto Slab"; - font-style: normal; - font-weight: 700; - src: url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot"); - src: url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot?#iefix") format("embedded-opentype"), url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff2") format("woff2"), url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff") format("woff"), url("../fonts/RobotoSlab/roboto-slab-v7-bold.ttf") format("truetype") -} - -.tutorial { - overflow: hidden; - border-radius: 10px; - margin-top: 20px; - margin-bottom: 20px; - box-shadow: 0 0 20px rgba(0, 0, 0, 0.1); - border: 1px solid var(--hgrey-light); -} - -.tutorial:hover { - color: white !important; - background-color: var(--horange); -} - -.tutorial:visited { - color: white !important; - background-color: var(--horange); -} - -.tutorial-image { - background-color: white; - margin: 20px; - padding: 20px; - width: 25%; - float: left; - border-radius: 7px; -} - -.tutorial-text { - padding: 20px; - float: left; - width: 65%; -} - -.highlight-output>.highlight { - background-color: var(--horange-light) !important; -} - -.highlight-text>.highlight { - background-color: var(--horange-light) !important; -} - -.case { - width: 40%; - float: left; - overflow: hidden; - border-radius: 10px; - margin: 20px; - border: 1px solid var(--hgrey-light); - box-shadow: 0 0 20px rgba(0, 0, 0, 0.1); -} - -.case:hover { - color: white !important; - background-color: var(--horange); -} - -.case-image { - background-color: white; - position: relative; - height: 200px; - overflow: hidden; -} - -.case-image:hover { - background-color: white; -} - -.case-image>img { - position: absolute; - top: 0; - bottom: 0; - left: 0; - right: 0; - margin: auto; - max-height: 170px; - padding: 15px; -} - -.case-text { - height: 200px; - padding: 20px; - border-top: 1px solid var(--hgrey-light); -} diff --git a/doc/source/_templates/.DS_Store b/doc/source/_templates/.DS_Store deleted file mode 100644 index 177b20f39f..0000000000 Binary files a/doc/source/_templates/.DS_Store and /dev/null differ diff --git a/doc/source/_templates/autoapi/.DS_Store b/doc/source/_templates/autoapi/.DS_Store deleted file mode 100644 index 47a2ad2696..0000000000 Binary files a/doc/source/_templates/autoapi/.DS_Store and /dev/null differ diff --git a/doc/source/_templates/autoapi/python/attribute.rst b/doc/source/_templates/autoapi/python/attribute.rst deleted file mode 100644 index ebaba555ad..0000000000 --- a/doc/source/_templates/autoapi/python/attribute.rst +++ /dev/null @@ -1 +0,0 @@ -{% extends "python/data.rst" %} diff --git a/doc/source/_templates/autoapi/python/class.rst b/doc/source/_templates/autoapi/python/class.rst deleted file mode 100644 index 3c5755056a..0000000000 --- a/doc/source/_templates/autoapi/python/class.rst +++ /dev/null @@ -1,28 +0,0 @@ -{% if obj.display %} -.. py:{{ obj.type }}:: {{ obj.short_name }}{% if obj.args %}({{ obj.args }}){% endif %} - - - {% if obj.bases %} - Bases: {% for base in obj.bases %}:class:`{{ base }}`{% if not loop.last %}, {% endif %}{% endfor %} - - - {% endif %} - {% if obj.docstring %} - {{ obj.docstring|prepare_docstring|indent(3) }} - {% endif %} - {% set visible_classes = obj.classes|selectattr("display")|list %} - {% for klass in visible_classes %} - {{ klass.rendered|indent(3) }} - {% endfor %} - {% set visible_attributes = obj.attributes|selectattr("display")|list %} - - {% for attribute in visible_attributes %} - {{ attribute.rendered|indent(3) }} - {% endfor %} - {% set visible_methods = obj.methods|selectattr("display")|list %} - .. role:: raw-html(raw) - :format: html - {% for method in visible_methods %} - {{ method.rendered|indent(3) }} - {% endfor %} -{% endif %} diff --git a/doc/source/_templates/autoapi/python/data.rst b/doc/source/_templates/autoapi/python/data.rst deleted file mode 100644 index 2ce3e1efbe..0000000000 --- a/doc/source/_templates/autoapi/python/data.rst +++ /dev/null @@ -1,7 +0,0 @@ -{% if obj.display %} -.. {{ obj.type }}:: {{ obj.name }} - {%+ if obj.value is not none or obj.annotation is not none %}:annotation:{% if obj.annotation %} :{{ obj.annotation }}{% endif %}{% if obj.value is not none %} = {{ obj.value }}{% endif %}{% endif %} - - - {{ obj.docstring|prepare_docstring|indent(3) }} -{% endif %} diff --git a/doc/source/_templates/autoapi/python/exception.rst b/doc/source/_templates/autoapi/python/exception.rst deleted file mode 100644 index 92f3d38fd5..0000000000 --- a/doc/source/_templates/autoapi/python/exception.rst +++ /dev/null @@ -1 +0,0 @@ -{% extends "python/class.rst" %} diff --git a/doc/source/_templates/autoapi/python/function.rst b/doc/source/_templates/autoapi/python/function.rst deleted file mode 100644 index 829f498a4a..0000000000 --- a/doc/source/_templates/autoapi/python/function.rst +++ /dev/null @@ -1,14 +0,0 @@ -{% if obj.display %} -.. function:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} - - {% if sphinx_version >= (2, 1) %} - {% for property in obj.properties %} - :{{ property }}: - {% endfor %} - {% endif %} - - {% if obj.docstring %} - {{ obj.docstring|prepare_docstring|indent(3) }} - {% else %} - {% endif %} -{% endif %} diff --git a/doc/source/_templates/autoapi/python/method.rst b/doc/source/_templates/autoapi/python/method.rst deleted file mode 100644 index 6e712f38b5..0000000000 --- a/doc/source/_templates/autoapi/python/method.rst +++ /dev/null @@ -1,12 +0,0 @@ -{%- if obj.display %} -{% if sphinx_version >= (2, 1) %} -.. method:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} - -{% else %} -.. {{ obj.method_type }}:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} -{% endif %} - - {% if obj.docstring %} - {{ obj.docstring|prepare_docstring|indent(3) }} - {% endif %} -{% endif %} diff --git a/doc/source/_templates/autoapi/python/module.rst b/doc/source/_templates/autoapi/python/module.rst deleted file mode 100644 index 98dff40513..0000000000 --- a/doc/source/_templates/autoapi/python/module.rst +++ /dev/null @@ -1,97 +0,0 @@ -{% if not obj.display %} -:orphan: - -{% endif %} -:mod:`{{ obj.name.replace('.core', '') }}` -======={{ "=" * obj.name|length }} -.. py:module:: {{ obj.name }} - -{% if obj.docstring %} -.. autoapi-nested-parse:: - - {{ obj.docstring|prepare_docstring|indent(3) }} - -{% endif %} - -{% block subpackages %} -{% set visible_subpackages = obj.subpackages|selectattr("display")|list %} -{% if visible_subpackages %} -Subpackages ------------ -.. toctree:: - :titlesonly: - :maxdepth: 3 - -{% for subpackage in visible_subpackages %} - {{ subpackage.short_name }}/index.rst -{% endfor %} - - -{% endif %} -{% endblock %} -{% block submodules %} -{% set visible_submodules = obj.submodules|selectattr("display")|list %} -{% if visible_submodules %} -Submodules ----------- -.. toctree:: - :titlesonly: - :maxdepth: 1 - -{% for submodule in visible_submodules %} - {{ submodule.short_name }}/index.rst -{% endfor %} - - -{% endif %} -{% endblock %} -{% block content %} -{% if obj.all is not none %} -{% set visible_children = obj.children|selectattr("short_name", "in", obj.all)|list %} -{% elif obj.type is equalto("package") %} -{% set visible_children = obj.children|selectattr("display")|list %} -{% else %} -{% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %} -{% endif %} -{% if visible_children %} -{{ obj.type|title }} Contents -{{ "-" * obj.type|length }}--------- - -{% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %} -{% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %} -{% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %} -{% block classes %} -{% if visible_classes %} -Classes -~~~~~~~ - -.. autoapisummary:: - -{% for klass in visible_classes %} - {{ klass.id }} -{% endfor %} - - -{% endif %} -{% endblock %} - -{% block functions %} -{% if visible_functions %} -Functions -~~~~~~~~~ - -.. autoapisummary:: - -{% for function in visible_functions %} - {{ function.id }} -{% endfor %} - - -{% endif %} -{% endblock %} -{% endif %} -{% for obj_item in visible_children %} -{{ obj_item.rendered|indent(0) }} -{% endfor %} -{% endif %} -{% endblock %} diff --git a/doc/source/_templates/autoapi/python/package.rst b/doc/source/_templates/autoapi/python/package.rst deleted file mode 100644 index fb9a64965e..0000000000 --- a/doc/source/_templates/autoapi/python/package.rst +++ /dev/null @@ -1 +0,0 @@ -{% extends "python/module.rst" %} diff --git a/doc/source/case_studies.rst b/doc/source/case_studies.rst deleted file mode 100644 index 184e11571f..0000000000 --- a/doc/source/case_studies.rst +++ /dev/null @@ -1,38 +0,0 @@ -Case Studies -============ - -.. container:: case - - .. container:: case-image - - .. image:: _static/images/fzj_logo.svg - - .. container:: case-text - - **TerrSysMP** - - The IBG-3 at Research Centre Juelich uses Heat to postprocess TerrSysMP hydrological flow simulations. - -.. container:: case - - .. container:: case-image - - .. image:: _static/images/dlr_logo.svg - - .. container:: case-text - - **Rocket Science** - - The German Aerospace Center (DLR) uses Heat to analyze combustion phases of rockets in high-speed camera videos. - -.. container:: case - - .. container:: case-image - - .. image:: _static/images/kit_logo.svg - - .. container:: case-text - - **Protein Simulations** - - At Karlsruhe Institute of Technology researchers identifies protein folding states in MD-simulations with Heat. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index c2da12b04f..0000000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,424 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Heat documentation build configuration file, created by -sphinx-quickstart on Wed Aug 29 09:02:49 2018. - -This file is execfile()d with the current directory set to its -containing dir. - -Note that not all possible configuration values are present in this -autogenerated file. - -All configuration values have a default; values that are commented out -serve to show the default. - -If extensions (or modules to document with autodoc) are in another directory, -add these directories to sys.path here. If the directory is relative to the -documentation root, use os.path.abspath to make it absolute, like shown here. -""" - -import os -import sys - -# sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath("../../heat")) - - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "autoapi.extension", - "sphinx.ext.autodoc", - "sphinx.ext.viewcode", - "sphinx.ext.inheritance_diagram", - "sphinx_rtd_theme", - "sphinx.ext.napoleon", - "sphinx.ext.mathjax", - "sphinx_copybutton", - "nbsphinx", -] - -# Document Python Code -autoapi_type = "python" -autoapi_dirs = ["../../heat/"] -autoapi_ignore = ["*/operations.py", "*/tests/*"] -autoapi_template_dir = "_templates/autoapi" - - -def skip_util_params(app, what, name, obj, skip, options): - """ - Undocumented Global Objects will not be rendered for the documentation - """ - if what == "data" and obj.is_undoc_member: - skip = True - return skip - - -def skip_util_functions(app, what, name, obj, skip, options): - """ - Init and doc functions will not be rendered for the documentation - """ - if "__init__" in name: - skip = True - elif "__doc__" in name and obj.is_undoc_member: - skip = True - return skip - - -def setup(sphinx): - """ - Connect the skipping functions - """ - sphinx.connect("autoapi-skip-member", skip_util_params) - sphinx.connect("autoapi-skip-member", skip_util_functions) - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] -napoleon_numpy_docstring = True -napoleon_use_ivar = True - -copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " -copybutton_prompt_is_regexp = True - -html_show_sourcelink = True - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The encoding of source files. -# -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -from datetime import datetime - -project = "Heat" -copyright = f"{datetime.now().year}, Helmholtz Analytics Framework Consortium" -author = "Helmholtz Analytics Framework Consortium" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -sys.path.insert(0, "../../heat/core") -import version as ht_version - -version = f"{ht_version.major}.{ht_version.minor}.{ht_version.micro}" -# The full version, including alpha/beta/rc tags. -if ht_version.extension: - release = f"{ht_version.major}.{ht_version.minor}.{ht_version.micro}-{ht_version.extension}" -else: - release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = "en" - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# -# today = '' -# -# Else, today_fmt is used as the format for a strftime call. -# -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["IGNORE"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "sphinx_rtd_theme" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -html_theme_options = {"logo_only": True, "style_nav_header_background": "white"} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -# -# html_title = 'Heat v0.0.1' - -# A shorter title for the navigation bar. Default is the same as html_title. -# -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# -html_logo = "_static/images/logo_emblem.png" - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] -html_css_files = ["css/custom.css"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# -# html_extra_path = [] - -# If not None, a 'Last updated on:' timestamp is inserted at every page -# bottom, using the given strftime format. -# The empty string is equivalent to '%b %d, %Y'. -# -# html_last_updated_fmt = None - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# -# html_additional_pages = {} - -# If false, no module index is generated. -# -# html_domain_indices = True - -# If false, no index is generated. -# -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' -# -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -# -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "Heatdoc" - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "Heat.tex", - "Heat Documentation", - "Helmholtz Analytics Framework Consortium", - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# -# latex_use_parts = False - -# If true, show page references after internal links. -# -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# -# latex_appendices = [] - -# It false, will not define \strong, \code, itleref, \crossref ... but only -# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added -# packages. -# -# latex_keep_old_macro_names = True - -# If false, no module index is generated. -# -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "heat", "Heat Documentation", [author], 1)] - -# If true, show URL addresses after external links. -# -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "Heat", - "Heat Documentation", - author, - "Heat", - "One line description of project.", - "Miscellaneous", - ) -] - -# Documents to append as an appendix to all manuals. -# -# texinfo_appendices = [] - -# If false, no module index is generated. -# -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# -# texinfo_no_detailmenu = False - -# NBSPHINX -nbsphinx_execute = "never" -nbsphinx_thumbnails = { - "tutorials/notebooks/0_setup/0_setup_jsc": "_static/images/jsc_logo.png", - "tutorials/notebooks/0_setup/0_setup_local": "_static/images/local_laptop.png", - "tutorials/notebooks/0_setup/0_setup_haicore": "_static/images/nhr_verein_logo.jpg", - "tutorials/notebooks/1_basics": "_static/images/logo_emblem.png", - "tutorials/notebooks/2_internals": "_static/images/tutorial_split_dndarray.svg", - "tutorials/notebooks/3_loading_preprocessing": "_static/images/jupyter.png", - "tutorials/notebooks/4_matrix_factorizations": "_static/images/hSVD_bench_rank5.png", - # "tutorials/notebooks/5_clustering": "_static/images/tutorial_split_dndarray.svg", - "tutorials/notebooks/6_profiling": "_static/images/perun_logo.svg", -} diff --git a/doc/source/documentation_howto.rst b/doc/source/documentation_howto.rst deleted file mode 100644 index a570a7cf30..0000000000 --- a/doc/source/documentation_howto.rst +++ /dev/null @@ -1,139 +0,0 @@ -.. role:: orangemarker -.. role:: greymarker -.. role:: bluemarker - -Writing Heat Documentation -========================== - -In order to maintain proper, uniform and understandable API documentation of Heat, a few style guidelines are -enforced. The following sections summarize the key features of Heats API documentation. - -Prerequisites -------------- - -The Heat documentation is build using Sphinx (Version 3.0.3), with a custom derivation of the Sphinx-RTD-Theme -(defined in `_static/css/custom.css`). -There are three main colors available for formatting: - -* :orangemarker:`Orange: RGB(240, 120, 30)` -* :greymarker:`Grey: RGB(90, 105, 110)` -* :bluemarker:`Blue: RGB(0, 90, 160)` - -All configurations regarding the documentation build are set in `doc/source/conf.py`. -API Documentation is generated using the `sphinx-autoapi extension `_ . This is -done via custom templates, defined in `source/_templates/autoapi/python`. - -To build the documentation locally please run the following commands in the Heat home directory: - -* ``pip install -r doc/requirements.txt`` -* ``sphinx-build -T -E -b html doc/source doc/build/html`` - -The second command will build a local rendering of the documentation in the ``doc/build/html/autoapi`` folder. These -should be checked before creating pull requests - -Docstring Guidelines --------------------- - -Dostrings are written using the NumPy Documentation style (see sphinx-contributions `napoleon -`_ ). -Apart from that, formatting happens via reStructuredText (reST). For a full reference on reST see `here `_ - -Docstring Content -^^^^^^^^^^^^^^^^^ - -* Write clear on concise docstrings, especially in the function and parameter descriptions -* Use type hints for: - - * Parameters - * Return types - -* Cross-referencing of major Heat classes (``DNDarray``, ``Communication``, ``Device``, ``data_type``) - - * Import major classes directly (e.g. ``from .dndarray import DNDarray``, ``from .devices import Device``) - * In descriptions, use cross references when useful (full module path with tilde): ``:class:`~heat.core.dndarray.DNDarray``` - or ``:function:`~heat.core.arithmetics.add``` - * use ``from __future__ import annotations`` for module internal crossreferencing: see e.g. - `naive_bayes/gaussianNB.py: partial_fit - -* In narrative form always refer to DNDarrays as array and not as tensor. The latter is exclusively reserved for PyTorch tensors. -* Use code-style markdown to annotate functions, parameters or globally defined variables (e.g. ``None``, ``True``, ``NotImplementedError`` etc.) in description texts. -* Math-style markdown can be used to typeset formulas - - -Docstrings Format -^^^^^^^^^^^^^^^^^ - -Method Template - The following example shows the standard formatting of a function docstring :: - - def foo(x: DNDarray, y: str, k: int = 0) -> DNDarray - """ - A description of the function behaviour and return value (not the type): What does the function do? - Any additional information can be given here, either in narrative form or in bullet points like such: - * Item 1 \n - * Item 2 \n - - Parameters - ----------- - x : DNDarray - Parameter desription of x - y : str - Parameter description of y. Can be either 'a', 'b' or 'c' - k : int, optional - - Notes - ----------- - Notes on the function should be given in the "Notes" section (not in the function description - - References - ----------- - [1] Webpage references \n - [2] Paper references. \n - [3] Do not use indentations at linebreaks for a reference - - Warnings - ----------- - Warnings on the function should be given in the "Warnings" section (not in the function description - - Raises - ----------- - If the function raises any "unexpected" Errors/Exceptions that the user might not be aware of, these should be - mentioned here. This does not include standard exceptions like type errors from input sanitation or similar - - See Also - ----------- - Referencencs to other functions can be given here (e.g for aliasing) - - Examples - ---------- - >>> import heat as ht - >>> T = ht.array([[1,2],[3,4]], dtype=ht.float) - >>> ht.add(T, 2) - tensor([[3., 4.], - [5., 6.]]) - >>> T + 2 - tensor([[3., 4.], - [5., 6.]]) - """ - -For classes, the docstring goes right under the class definition (as opposed to in the __init__ function). This -way, all attributes that are passed for class initialization are documented properly, with type and default -value annotation - -Parameter Definitions - * Defaults are defined in the function Parameters - * Shape definitions go at the very end of the Parameter description in the following format: `Shape = (x, y, ...)` - * For classes, the initialization parameters are defined as section ``Attributes`` - * Different Parameter types are separated by `or`, not commas - * For detailed instructions on type hints for parameter and return type annotation (such as ``Union``, ``List``, - ``Tuple``, etc.) - See `typing `_ (PEP 484) - -Examples - * Examples should only be separated by empty lines, if there is a clear distinction between the two example types. - Note that every empty line in the examples will create a new example code block. This is fine for 2-3 separated - blocks, but do not separate 15 different examples into individual blocks. - * There must not be a colon after Examples - * No comments in the examples (on number of processes or what the example shows). Put these in coding examples - under ``Notes`` diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst deleted file mode 100644 index 1749c0eb52..0000000000 --- a/doc/source/getting_started.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. _Installation: - -Getting Started -============== - -Heat is a Python package for accelerated and distributed tensor computations. Internally, it is based on `PyTorch `_. Consequently, all operating systems that support Python and PyTorch also support a Heat installation. Currently, this list contains at least Linux, MacOS and Windows. However, most of our development is done under Linux and interoperability should therefore be optimal. - -Prerequisites -------------- - -Python -^^^^^^ - -Heat requires Python 3.7 or greater. You can check your Python by running: - -.. code:: bash - - python3 --version - -If you do not have a recent installation on you system, you may want to upgrade it. - -`Ubuntu `_/`Debian `_/`Mint `_ - -.. code:: bash - - sudo apt-get update && sudo apt-get install python3 - -`Fedora `_/`CentOS `_/`RHEL `_ - -.. code:: bash - - sudo dnf update python3 - -If you have no administrator privileges on your system, because you are working on a cluster for example, make sure to check its *user guide*, the module system (``module spider python``) or get in touch with the administrators. - -Optional Dependencies -^^^^^^^^^^^^^^^^^^^^^ - -You can accelerate computations with Heat in different ways. For GPU acceleration ensure that you have a `CUDA `_ installation on your system. Distributed computations require an MPI stack on your computer. We recommend `MVAPICH `_ or `OpenMPI `_. Finally, for parallel data I/O, Heat offers interface to `HDF5 `_ and `NetCDF `_. You can obtain these packages using your operating system's package manager. - -Installation ------------- - -Virtual Environments -^^^^^^^^^^^^^^^^^^^^ - -We highly recommend to use `virtual environments (venv) `_ for managing your Python packages. A virtual environment is a self-contained directory tree for a particular Python version and its packages. It allows you not only to install packages without administrator privileges, install `pip `_, Python's package manager, but also to have multiple package environments with different package versions in parallel. - -You can find the complete manual for venv in the `Python documentation `_. Below is a small code snippet that creates a new virtual environment in your home directory (``~/.virtualenvs/heat``). The subsequent command enables the environment. You can access the Python interpreter by typing ``python`` and PIP with ``pip``. - -.. code:: bash - - python3 -m venv ~/.virtualenvs/heatenv - source ~/.virtualenvs/heatenv/bin/activate - -You can deactivate a virtual environment by executing: - -.. code:: bash - - deactivate - -pip -^^^ - -Official Heat releases are made available on the `Python Package Index (PyPI) `_. You obtain the latest version by running: - -.. code:: bash - - pip install heat - -Optionally, you can enable and install HDF5 and/or NetCDF support by adding the respective extra requirements as follows. - -.. code:: bash - - pip install 'heat[hdf5,netcdf]' - -Verification -^^^^^^^^^^^^ - -To ensure that Heat was installed correctly, you can run this tiny code snippet that creates a vector with 10 entries. - -.. code:: bash - - python -c "import heat as ht; print(ht.arange(10))" - -You should see the following output - -.. code:: bash - - DNDarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=ht.int32, device=cpu:0, split=None) - -Building From Source --------------------- - -For most users a Heat installation from pip will be the most simple. However, if you want to test out the latest features or even want to contribute to Heat, you will need to build from source. At first, clone our repository by running: - -.. code:: bash - - git clone https://github.com/helmholtz-analytics/heat.git - -Afterwards, change to the cloned source code directory and run the setup scripts. - -.. code:: bash - - $ cd heat - $ pip install -e '.[hdf5, netcdf]' - -Support Channels ----------------- - -We use `StackOverflow `_ as a forum for questions about Heat. -If you do not find an answer to your question, then please ask a new question there and be sure to -tag it with "pyheat". diff --git a/doc/source/index.rst b/doc/source/index.rst deleted file mode 100644 index 08b96be0f3..0000000000 --- a/doc/source/index.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. Heat documentation master file, created by - sphinx-quickstart on Wed Aug 29 09:02:49 2018. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Heat -- The Helmholtz Analytics Toolkit -======================================= - -Release: |release| - -Heat is a distributed tensor framework for high performance data analytics. - -.. toctree:: - :caption: Table of Contents - :maxdepth: 1 - - introduction - getting_started - tutorials/tutorials - case_studies - documentation_howto - -Also visit us on `GitHub `_ for more examples, docs, code and contributions. - - -Indices -======= - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/tutorials/tutorial_30_minutes.rst b/doc/source/tutorials/tutorial_30_minutes.rst deleted file mode 100644 index 82cfeacb2a..0000000000 --- a/doc/source/tutorials/tutorial_30_minutes.rst +++ /dev/null @@ -1,184 +0,0 @@ -Feel the Heat - A 30 Minutes Welcome -==================================== - -Heat is a flexible and seamless open-source software for high performance data analytics and machine learning in Python. - -Our main audience are scientists, engineers and AI enthusiats with computational and numerical problems exceeding the boundaries of a laptop or workstation. - -Getting Started ---------------- - -Make sure that you have Heat installed on your system. For detailed description of your options, see our full :ref:`Getting Started ` section. For now, this should suffice: - -.. code:: bash - - pip install heat - -DNDarrays ---------- - -DNDarrays mimick NumPy’s ndarrays interface as close as possible. On top of that they can also be used to accelerate computations with GPUs or MPI in distributed cluster systems. Let's try it out: - -.. code:: python - - import heat as ht - -The following creates a :math:`3\times 4` matrix with zeros. - -.. code:: python - - ht.zeros((3,4,)) - -Output: - -.. code:: text - - DNDarray([[0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) - - -Or a vector with the numbers from :math:`0-9` ascending. - -.. code:: python - - ht.arange(10) - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=ht.int32, device=cpu:0, split=None) - - -The following snippet creates a column vector with :math:`5` elements, each position filled with the value :math:`9` and the ``ht.int64`` data type. - -.. code:: python - - ht.full((1, 5,), fill_value=9, dtype=ht.int64) - -Output: - -.. code:: text - - DNDarray([[9, 9, 9, 9, 9]], dtype=ht.int64, device=cpu:0, split=None) - - -Finally, let's load some user defined data. - -.. note:: - - Heat takes care of automatically inferring the shape, i.e. the tensor dimensions, and data types from the user provided input. - -.. code:: python - - ht.array([[0, 1, 2], [0.1, 0.2, 3]]) - -Output: - -.. code:: text - - DNDarray([[0.0000, 1.0000, 2.0000], - [0.1000, 0.2000, 3.0000]], dtype=ht.float32, device=cpu:0, split=None) - -Operations ----------- - -Heat supports several mathematical operations, ranging from simple element-wise functions, binary arithmetic operations, and linear algebra, to more powerful reductions. In the following example we add two matrices of same size. - -.. code:: python - - ht.full((3, 4,), fill_value=9) + ht.ones((3, 4,)) - -Output: - -.. code:: text - - DNDarray([[10., 10., 10., 10.], - [10., 10., 10., 10.], - [10., 10., 10., 10.]], dtype=ht.float32, device=cpu:0, split=None) - -Instead of operators, we can also use a functional approach. - -.. code:: python - - ht.add(ht.full((3, 4,), fill_value=9), ht.ones((3, 4,))) - -Output: - -.. code:: text - - DNDarray([[10., 10., 10., 10.], - [10., 10., 10., 10.], - [10., 10., 10., 10.]], dtype=ht.float32, device=cpu:0, split=None) - - -If there is no obvious operator for a function, you can also call a method on the ``DNDarray``. - -.. code:: python - - ht.arange(5).sin() - -Output: - -.. code:: text - - DNDarray([ 0.0000, 0.8415, 0.9093, 0.1411, -0.7568], dtype=ht.float32, device=cpu:0, split=None) - -Just like other numerical computation libraries, Heat supports broadcasting. It describes how two ``DNDarrays`` with different dimensions (also called shape) can still be combined in arithmetic operations given certain constraints. For example, we can add a scalar to a matrix. - -.. code:: python - - ht.zeros((3, 4,)) + 5.0 - -Output: - -.. code:: text - - DNDarray([[5., 5., 5., 5.], - [5., 5., 5., 5.], - [5., 5., 5., 5.]], dtype=ht.float32, device=cpu:0, split=None) - -The scalar has been element-wise repeated for every entry within the matrix. We can do the same with matrices and vectors as well - - -.. code:: python - - ht.zeros((3, 4,)) + ht.arange(4) - -Output: - -.. code:: text - - DNDarray([[0., 1., 2., 3.], - [0., 1., 2., 3.], - [0., 1., 2., 3.]], dtype=ht.float32, device=cpu:0, split=None) - -The vector has been repeated for every row of the left-hand side matrix. A full description of broadcasting rules can be found in `NumPy's manual `_. While talking about it, Heat is designed as seamless drop-in replacement for NumPy. There still might be cases, e.g. working with native Python code, when you want to convert a ``DNDarray`` to an ``ndarray`` instead. - - -.. code:: python - - ht.arange(5).numpy() - -Output: - -.. code:: text - - array([0, 1, 2, 3, 4], dtype=int32) - -And vice versa: - -.. code:: python - - import numpy as np - ht.array(np.arange(5)) - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4], dtype=ht.int64, device=cpu:0, split=None) - -.. seealso:: - Read up more later on hundreds of other functions in our `API reference `_. Or find out about them interactively by using the ``help()`` function in your Python interpreter. diff --git a/doc/source/tutorials/tutorial_clustering.rst b/doc/source/tutorials/tutorial_clustering.rst deleted file mode 100644 index ccceb4248b..0000000000 --- a/doc/source/tutorials/tutorial_clustering.rst +++ /dev/null @@ -1,169 +0,0 @@ -Cluster Analysis -================ - -This tutorial will demonstrate analysis with k-means and k-medians from the ``cluster`` module. -We will use matplotlib for visualization of data and results. :: - - import heat as ht - import matplotlib.pyplot as plt - - -Spherical Clouds of Datapoints ------------------------------- -For a simple demonstration of the clustering process and the differences between the algorithms, we will create an -artificial dataset, consisting of two circularly shaped clusters positioned at :math:`(x_1=2, y_1=2)` and :math:`(x_2=-2, y_2=-2)` in 2D space. -For each cluster we will sample 100 arbitrary points from a circle with radius of :math:`R = 1.0` by drawing random numbers -for the spherical coordinates :math:`( r\in [0,R], \phi \in [0,2\pi])`, translating these to cartesian coordinates -and shifting them by :math:`+2` for cluster ``c1`` and :math:`-2` for cluster ``c2``. The resulting concatenated dataset ``data`` has shape -:math:`(200, 2)` and is distributed among the ``p`` processes along axis 0 (sample axis) - -.. code:: python - - num_ele = 100 - R = 1.0 - - # Create default spherical point cloud - # Sample radius between 0 and 1, and phi between 0 and 2pi - r = ht.random.rand(num_ele, split=0) * R - phi = ht.random.rand(num_ele, split=0) * 2 * ht.constants.PI - - # Transform spherical coordinates to cartesian coordinates - x = r * ht.cos(phi) - y = r * ht.sin(phi) - - - # Stack the sampled points and shift them to locations (2,2) and (-2, -2) - cluster1 = ht.stack((x + 2, y + 2), axis=1) - cluster2 = ht.stack((x - 2, y - 2), axis=1) - - data = ht.concatenate((cluster1, cluster2), axis=0) - -Let's plot the data for illustration. In order to do so with matplotlib, we need to unsplit the data (gather it from -all processes) and transform it into a numpy array. Plotting can only be done on rank 0. - -.. code:: python - - data_np = ht.resplit(data, axis=None).numpy() - if ht.MPI_WORLD.rank == 0: - plt.plot(data_np[:,0], data_np[:,1], 'bo') - plt.show() - -This will render something like - -.. image:: ../_static/images/data.png - -Now we perform the clustering analysis with kmeans. We chose 'kmeans++' as an intelligent way of sampling the -initial centroids. - -.. code:: python - - kmeans = ht.cluster.KMeans(n_clusters=2, init="kmeans++") - labels = kmeans.fit_predict(data).squeeze() - centroids = kmeans.cluster_centers_ - - # Select points assigned to clusters c1 and c2 - c1 = data[ht.where(labels == 0), :] - c2 = data[ht.where(labels == 1), :] - # After slicing, the arrays are not distributed equally among the processes anymore; we need to balance - c1.balance_() - c2.balance_() - - print(f"Number of points assigned to c1: {c1.shape[0]} \n" - f"Number of points assigned to c2: {c2.shape[0]} \n" - f"Centroids = {centroids}") - -.. code:: text - - Number of points assigned to c1: 100 - Number of points assigned to c2: 100 - Centroids = DNDarray([[ 2.0169, 2.0713], - [-1.9831, -1.9287]], dtype=ht.float32, device=cpu:0, split=None) - -Let's plot the assigned clusters and the respective centroids: - -.. code:: python - - c1_np = c1.numpy() - c2_np = c2.numpy() - - if ht.MPI_WORLD.rank == 0: - plt.plot(c1_np[:,0], c1_np[:,1], 'x', color='#f0781e') - plt.plot(c2_np[:,0], c2_np[:,1], 'x', color='#5a696e') - plt.plot(centroids[0,0],centroids[0,1], '^', markersize=10, markeredgecolor='black', color='#f0781e' ) - plt.plot(centroids[1,0],centroids[1,1], '^', markersize=10, markeredgecolor='black',color='#5a696e') - plt.show() - -.. image:: ../_static/images/clustering.png - -We can also cluster the data with kmedians. The respective advanced initial centroid sampling is called 'kmedians++'. - -.. code:: python - - kmedians = ht.cluster.KMedians(n_clusters=2, init="kmedians++") - labels = kmedians.fit_predict(data).squeeze() - centroids = kmedians.cluster_centers_ - - # Select points assigned to clusters c1 and c2 - c1 = data[ht.where(labels == 0), :] - c2 = data[ht.where(labels == 1), :] - # After slicing, the arrays are not distributed equally among the processes anymore; we need to balance - c1.balance_() - c2.balance_() - - print(f"Number of points assigned to c1: {c1.shape[0]} \n" - f"Number of points assigned to c2: {c2.shape[0]} \n" - f"Centroids = {centroids}") - -Plotting the assigned clusters and the respective centroids: - -.. code:: python - - c1_np = c1.numpy() - c2_np = c2.numpy() - if ht.MPI_WORLD.rank == 0: - plt.plot(c1_np[:,0], c1_np[:,1], 'x', color='#f0781e') - plt.plot(c2_np[:,0], c2_np[:,1], 'x', color='#5a696e') - plt.plot(centroids[0,0],centroids[0,1], '^', markersize=10, markeredgecolor='black', color='#f0781e' ) - plt.plot(centroids[1,0],centroids[1,1], '^', markersize=10, markeredgecolor='black',color='#5a696e') - plt.show() - -.. image:: ../_static/images/clustering_kmeans.png - -The Iris Dataset ------------------------------- -The _iris_ dataset is a well known example for clustering analysis. It contains 4 measured features for samples from -three different types of iris flowers. A subset of 150 samples is included in formats h5, csv and netcdf in Heat, -located under 'heat/heat/datasets', and can be loaded in a distributed manner with Heat's parallel -dataloader - -.. code:: python - - iris = ht.load("heat/datasets/iris.csv", sep=";", split=0) - - -Fitting the dataset with kmeans: - -.. code:: python - - k = 3 - kmeans = ht.cluster.KMeans(n_clusters=k, init="kmeans++") - kmeans.fit(iris) - -Let's see what the results are. In theory, there are 50 samples of each of the 3 iris types - -.. code:: python - - labels = kmeans.predict(iris).squeeze() - - # Select points assigned to clusters c1 and c2 - c1 = iris[ht.where(labels == 0), :] - c2 = iris[ht.where(labels == 1), :] - c3 = iris[ht.where(labels == 2), :] - # After slicing, the arrays are not distributed equally among the processes anymore; we need to balance - c1.balance_() - c2.balance_() - c3.balance_() - - print(f"Number of points assigned to c1: {c1.shape[0]} \n" - f"Number of points assigned to c2: {c2.shape[0]} \n" - f"Number of points assigned to c3: {c3.shape[0]}") diff --git a/doc/source/tutorials/tutorial_notebook_gallery.rst b/doc/source/tutorials/tutorial_notebook_gallery.rst deleted file mode 100644 index 67c67ab40b..0000000000 --- a/doc/source/tutorials/tutorial_notebook_gallery.rst +++ /dev/null @@ -1,25 +0,0 @@ -Notebook gallery -================ - -Setup notebooks -~~~~~~~~~~~~~~~ - -Example notebooks explaining how to setup an MPI enabled notebook to work with heat in an interactive way. - -.. nbgallery:: - notebooks/0_setup/0_setup_local - notebooks/0_setup/0_setup_jsc - notebooks/0_setup/0_setup_haicore - -Example notebooks -~~~~~~~~~~~~~~~~~ - -This notebooks contain heat examples that have been used in interactive tutorials. - -.. nbgallery:: - notebooks/1_basics - notebooks/2_internals - notebooks/3_loading_preprocessing - notebooks/4_matrix_factorizations - notebooks/5_clustering - notebooks/6_profiling diff --git a/doc/source/tutorials/tutorial_parallel_computation.rst b/doc/source/tutorials/tutorial_parallel_computation.rst deleted file mode 100644 index 2a10777726..0000000000 --- a/doc/source/tutorials/tutorial_parallel_computation.rst +++ /dev/null @@ -1,253 +0,0 @@ -Parallel Computation -==================== - -One of Heat's strength lies in its ability to heavily parallelize computations. By default, Heat utilizes all available cores in your processor via OpenMP multi-threading. However, it is also possible to further speed-up processing with GPU acceleration or by scale-out in a cluster system using MPI. In the following tutorial you will learn how to exploit Heat's parallel processing features. - -GPU Acceleration ----------------- - -.. warning:: - Please make sure that you have an NVidia GPU in your system for the following code snippets. - -GPUs can accelerate applications running on the CPU by offloading some or all of the time consuming portions of the code. This is particularly for highly compute-intensive programs with low to medium memory consumption. From a user's perspective, the application runs faster because it's using the massively parallel processing power of the GPU to boost performance. Heat allows you to allocate ``DNDarrays`` on the GPU as follows. - -.. code:: python - - ht.arange(5, device="gpu") - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cuda:0, split=None) - -As you can see the device information has changed from ``cpu:0`` to ``cuda:0``, denoting that the ``DNDarray``'s data is now residing in GPU memory of the `CUDA `_ compute backend. - -We can move data back and forth the CPU and GPU by calling the respective function on the ``DNDarray``. - -.. code:: python - - a = ht.arange(5, device="gpu") - a.cpu() - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) - -Manually, allocating or moving all ``DNDarrays`` between CPU and GPU can quickly become tedious. Hence, Heat allows you to set a default device for all computations. - -.. code:: python - - ht.use_device('gpu') - ht.zeros((3, 4,)) - -Output: - -.. code:: text - - DNDarray([[0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.]], dtype=ht.float32, device=cuda:0, split=None) - -Finally, operations on ``DNDarrays`` on GPUs can be performed as we are used to from its CPU counterpart. - -.. code:: python - - ht.use_device('gpu') - ht.full((3, 4,), fill_value=9) + ht.ones((3, 4,)) - -Output: - -.. code:: text - - DNDarray([[10., 10., 10., 10.], - [10., 10., 10., 10.], - [10., 10., 10., 10.]], dtype=ht.float32, device=cuda:0, split=None) - -Distributed Computing ---------------------- - -.. warning:: - For the following code examples, make sure you have `MPI `_ installed. - -With Heat you can even compute in distributed memory environments with multiple computation nodes, like modern high-performance cluster systems. For this, Heat makes use of the fact that operations performed on multi-dimensional arrays tend to be identical for all data items. Hence, they can be processed in data-parallel manner. Heat partitions the total number of data items equally among all processing nodes. A ``DNDarray`` assumes the role of a virtual overlay over these node-local data portions and manages them for you while offering the same interface. Consequently, operations can now be executed in parallel. Each processing node applies them locally to their own data chunk. If necessary, partial results are communicated and automatically combined behind the scenes for correct global results. - -.. image:: ../_static/images/split_array.svg - :align: center - :width: 80% - -Data chunking in Heat is always done along a singular axis, i.e. a one-dimensional domain decomposition. You can specify this axis by using the ``split`` parameter in operations and ``DNDarray`` creation functions. The picture above shows the result of setting different ``split`` axis on a three-dimensional volume and three processing nodes called :math:`p_0, p_1` and :math:`p_2`. A Heat ``DNDarray`` without any split, i.e. ``split=None`` (default), results in redundant copy on each computation node. - -.. note:: - In the following example we assume three execution nodes. We distinguish between them in the output by showing them as ``[node/total nodes]``. - -.. note:: - - If you're running the following examples in a distributed computation environment, please modify your program invocation from ``python ./my_script.py`` to ``mpirun -p python ./my_script.py``. By the way, invoking a Heat program like this on your laptop or workstation also works. - -.. note :: - Most of the examples throughout the documentation display the split ``DNDarrays``. This is not the default behavior when displaying a ``DNDarray``, and the full array will be printed multiple times (based on the number of processes). This behaviour can be changed using the ``ht.local_printing()`` option or by printing the local arrays for each process with the property `larray `_. - -.. code:: python - - ht.arange(5, split=0) - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=0) - - [0/3] DNDarray([0, 1], dtype=ht.int32, device=cpu:0, split=0) - [1/3] DNDarray([2, 3], dtype=ht.int32, device=cpu:0, split=0) - [2/3] DNDarray([4], dtype=ht.int32, device=cpu:0, split=0) - -This can also be done along other axes for arrays with larger dimensions. - -.. code:: python - - ht.zeros((3, 4,), split=1) - -Output: - -.. code:: text - - DNDarray([[0., 0., 0., 0.], - [0., 0., 0., 0.], - [0., 0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) - - - [0/3] DNDarray([[0., 0.], - [0., 0.], - [0., 0.]], dtype=ht.int32, device=cpu:0, split=0) - [1/3] DNDarray([[0.], - [0.], - [0.]], dtype=ht.int32, device=cpu:0, split=0) - [2/3] DNDarray([[0.], - [0.], - [0.]], dtype=ht.int32, device=cpu:0, split=0) - -As previously explained, specifying no ``split`` axis or setting it explicitly to ``None`` results in a redundant copy on each node. - -.. code:: python - - ht.arange(5, split=None) - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) - - - [0/3] DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) - [1/3] DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) - [2/3] DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) - -You may also modify the data partitioning of a Heat array by using the ``resplit()`` function. This allows you to repartition the data as you so choose. Please note, that this should be used sparingly and for small data amounts only, as it entails data communication over network. - -.. code:: python - - a = ht.arange(5, split=None) - a.resplit(0) - -Output: - -.. code:: text - - DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=0) - - [0/3] DNDarray([0, 1], dtype=ht.int32, device=cpu:0, split=0) - [1/3] DNDarray([2, 3], dtype=ht.int32, device=cpu:0, split=0) - [2/3] DNDarray([4], dtype=ht.int32, device=cpu:0, split=0) - -The result of an operation on a Heat tensor will in most cases preserve the split of the respective operands. However, in some cases the split axis might change. For example, a transpose of a Heat ``DNDarray`` will equally transpose the split axis. Furthermore, a reduction operations, e.g. `sum()` that is performed across the split axis, might remove data partitions entirely. - -.. code:: python - - a = ht.ones((3, 4,), split=0) - a.sum() - -Output: - -.. code:: text - - DNDarray(12., dtype=ht.float32, device=cpu:0, split=None) - -The previously ``split=0`` matrix is ``split=None`` after the reduction operation. Obviously, we can also perform operations between (differently) split ``DNDarrays``. - -.. code:: python - - a = ht.ones((3, 4,), split=1) - b = ht.arange(4) - - a + b - -Output: - -.. code:: text - - DNDarray([[1., 2., 3., 4.], - [1., 2., 3., 4.], - [1., 2., 3., 4.]], dtype=ht.float32, device=cpu:0, split=1) - - [0/3] DNDarray([1., 2., 3., 4.], dtype=ht.int32, device=cpu:0, split=None) - [1/3] DNDarray([1., 2., 3., 4.], dtype=ht.int32, device=cpu:0, split=None) - [2/3] DNDarray([1., 2., 3., 4.], dtype=ht.int32, device=cpu:0, split=None) - -Technical Details -^^^^^^^^^^^^^^^^^ - -On a technical level, Heat is inspired by the so-called `Bulk Synchronous Parallel (BSP) `_ processing model. Computations proceed in a series of hierarchical supersteps, each consisting of a number of node-local computations and subsequent communications. In contrast to the classical BSP model, communicated data is available immediately, rather than after the next global synchronization. In Heat, global synchronization only occurs for collective MPI calls as well as at the program start and termination. - -.. image:: ../_static/images/bsp.svg - :align: center - :width: 60% - -Distributed Interactive Interpreter -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Heat ships with a distributed interactive Python interpreter that allows you to prototype and debug distributed applications. It can be found in the Heat sources in the path `scripts/interactive.py` or you just grab it directly - -.. code:: bash - - wget https://raw.githubusercontent.com/helmholtz-analytics/heat/master/scripts/interactive.py - -You can start the distributed interactive interpreter by invoking the following command. - -.. code:: bash - - mpirun -s all -np python interactive.py - -.. note:: - - The interactive interpreter does only support a subset of all control commands. - - -Parallel Performance --------------------- - -When working with parallel and distributed computation in Heat there are some best practices for you to know about. The following list covers the major ones. - -Dos -^^^ - -* Use the high-level Heat API - * computational kernels are optimized - * Python constructs (e.g. loops) may be slow -* Split large data amounts - * often this along the 'observations/samples/time' dimension - * large intermediate matrices -* Have redundant copies (``split=None``) of small, frequently accessed matrices - -Dont's -^^^^^^ - -* Avoid extensive data copying, e.g. - * operations with operands of different splits (except ``None``) - * reshape() that actually change the array dimensions (adding extra dimensions with size 1 is fine) -* Overly use the GPU - * computation-intensive operations are usually a good fit - * operations extensively accessing memory only (e.g. sorting) are not diff --git a/doc/source/tutorials/tutorials.rst b/doc/source/tutorials/tutorials.rst deleted file mode 100644 index 59b68fb2bf..0000000000 --- a/doc/source/tutorials/tutorials.rst +++ /dev/null @@ -1,86 +0,0 @@ -Heat Tutorials -============== - -.. toctree:: - :hidden: - - tutorial_30_minutes - tutorial_parallel_computation - tutorial_clustering - tutorial_notebook_gallery - - -.. container:: tutorial - - .. container:: tutorial-image - - .. image:: ../_static/images/tutorial_logo.svg - :target: tutorial_30_minutes.html - - .. raw:: html - - -
- Feel the Heat — A 30 Minutes Welcome -

-

Understand the fundamentals of the Heat framework and vectorized array computing.

- Excellent for beginners. -
-
- - -.. container:: tutorial - - .. container:: tutorial-image - - .. image:: ../_static/images/tutorial_split_dndarray.svg - :target: tutorial_parallel_computation.html - - .. raw:: html - - -
- Turn up the Heat - Parallel Computing -

-

Speed up Heat even further with GPUs acceleration or distributed MPI computation.

- Well-suited for beginners. -
-
- - -.. container:: tutorial - - .. container:: tutorial-image - - .. image:: ../_static/images/tutorial_clustering.svg - :target: tutorial_clustering.html - - .. raw:: html - - -
- Keep the Heat - Cluster Analysis -

-

Automatically identify clusters in your data by employing unsupervised clustering methods.

- For intermediate analysts. -
-
- - -.. container:: tutorial - - .. container:: tutorial-image - - .. image:: ../_static/images/jupyter.png - :target: tutorial_notebook_gallery.html - - .. raw:: html - - -
- Example notebooks -

-

Ideal for people that like using jupyter notebook or other interactive environments.

- Excellent for beginners. -
-
diff --git a/doc/styles/extra.css b/doc/styles/extra.css new file mode 100644 index 0000000000..e0884522e9 --- /dev/null +++ b/doc/styles/extra.css @@ -0,0 +1,350 @@ +.tut-icon { + width: 120px; + display: inline-block; + vertical-align: middle; + margin-right: 0.5rem; +} + +.cases { + display: grid; + grid-template-columns: 1fr; + gap: 40px; +} + +.case { + display: grid; + grid-template-columns: 96px 1fr; + gap: 40px; + align-items: center; + padding: 30px; + border: 1px solid var(--md-default-fg-color--lightest, #e5e7eb); + border-radius: 12px; + background: var(--md-default-bg-color, #fff); + box-shadow: 0 1px 2px rgb(0 0 0 / 6%); + transition: box-shadow 0.2s ease, transform 0.1s ease; +} + +.case:hover { + box-shadow: 0 6px 18px rgb(0 0 0 / 12%); + transform: translateY(-1px); +} + +.case-logo { + width: 96px; + height: 96px; + object-fit: contain; + filter: saturate(0.9) contrast(1.05); +} + +.case-title { + color: var(--md-default-fg-color--light); + font-size: 1.5em; + line-height: 1.3; + margin: 0 0 1.25em; + font-weight: 300; + letter-spacing: -0.01em; +} + +.case-body p { + margin: 0; + color: var(--md-default-fg-color--light, #6b7280); +} + +@media (max-width: 680px) { + .case { + grid-template-columns: 64px 1fr; + } + .case-logo { + width: 64px; + height: 64px; + } +} + +.md-content { + max-width: 1080px; +} + +.ht-cards { + display: grid; + grid-template-columns: repeat(2, minmax(320px, 1fr)); + gap: 40px; +} + +.ht-card { + border: 1px solid + color-mix( + in oklab, + var(--md-default-fg-color--lightest, #e5e7eb) 80%, + transparent + ); + border-radius: 14px; + background: var(--md-default-bg-color, #fff); + box-shadow: 0 2px 10px rgb(0 0 0 / 6%); + transition: box-shadow 0.2s ease, transform 0.12s ease, border-color 0.2s ease; +} + +.ht-card:hover, +.ht-card:has(.ht-card-link:focus-visible) { + box-shadow: 0 10px 24px rgb(0 0 0 / 12%); + transform: translateY(-2px); + border-color: color-mix( + in oklab, + var(--md-primary-fg-color, #3f51b5) 35%, + transparent + ); +} + +.ht-card-link { + display: flex; + flex-direction: column; + gap: 10px; + padding: 18px 20px; + text-decoration: none; + color: inherit; + border-radius: 14px; + outline: none; + height: 100%; + min-height: 220px; +} + +.ht-card-head { + display: flex; + gap: 16px; + align-items: center; + min-height: 76px; +} + +.ht-head-left { + flex: 1 1 33%; + display: flex; + align-items: center; + justify-content: center; +} +.ht-head-right { + flex: 1 1 67%; + display: flex; + align-items: center; +} + +.ht-head-logo { + width: 72px; + height: 72px; + object-fit: contain; + border-radius: 10px; + padding: 11px; + box-shadow: inset 0 0 0 1px rgb(0 0 0 / 5%); +} +.ht-head-title { + color: var(--md-default-fg-color--light); + font-size: 1.25em; + line-height: 1.3; + font-weight: 300; + letter-spacing: -0.01em; +} + +.ht-card-desc { + color: var(--md-default-fg-color--light, #6b7280); + line-height: 1.45; +} +.ht-card-mini { + margin-top: auto; + color: var(--md-default-fg-color--lighter, #9ca3af); + font-size: 15px; + font-style: italic; +} + +@media (max-width: 720px) { + .ht-cards { + grid-template-columns: 1fr; + gap: 16px; + } + .ht-card-link { + padding: 16px; + gap: 8px; + } + .ht-head-logo { + width: 64px; + height: 64px; + } + .ht-card-head { + min-height: 68px; + } + .ht-head-title { + font-size: 1.02rem; + } +} + +.centered { + display: block; + margin-left: auto; + margin-right: auto; + text-align: center; +} + +.hg-grid { + display: grid; + grid-template-columns: repeat(2, minmax(320px, 1fr)); + gap: 22px; +} + +.hg-card { + border: 1px solid + color-mix( + in oklab, + var(--md-default-fg-color--lightest, #e5e7eb) 80%, + transparent + ); + border-radius: 16px; + background: var(--md-default-bg-color, #fff); + box-shadow: 0 2px 10px rgb(0 0 0 / 6%); + transition: box-shadow 0.2s ease, transform 0.12s ease, border-color 0.2s ease; +} +.hg-card:hover, +.hg-card:has(.hg-link:focus-visible) { + box-shadow: 0 12px 26px rgb(0 0 0 / 12%); + transform: translateY(-2px); + border-color: color-mix( + in oklab, + var(--md-primary-fg-color, #3f51b5) 35%, + transparent + ); +} + +.hg-link { + display: flex; + flex-direction: column; + align-items: center; + gap: 12px; + padding: 22px 24px; + text-decoration: none; + color: inherit; + border-radius: 16px; + min-height: 180px; +} + +.hg-icon { + width: 6rem !important; + height: 6rem !important; + object-fit: contain; + display: block; +} + +.hg-title { + color: var(--md-default-fg-color--light); + font-size: 1.2em; + line-height: 1.3; + margin: 10px; + font-weight: 300; + letter-spacing: -0.01em; + text-align: center; +} + +@media (max-width: 720px) { + .hg-grid { + grid-template-columns: 1fr; + gap: 16px; + } + .hg-link { + padding: 18px; + gap: 10px; + min-height: 160px; + } + .hg-icon { + width: 80px; + height: 80px; + } +} + +.md-typeset .orangemaker { + color: rgb(240, 120, 30); + font-weight: 600; +} +.md-typeset .greymarker { + color: rgb(90, 105, 110); + font-weight: 600; +} +.md-typeset .bluemarker { + color: rgb(0, 90, 160); + font-weight: 600; +} +pre > code, +.md-content__inner pre code { + background: linear-gradient(135deg, #fdfdff 0%, #f8f9fa 50%, #f1f3f4 100%) !important; + border: 1px solid color-mix(in oklab, var(--md-default-fg-color--lightest) 85%, transparent) !important; + border-left: 5px solid var(--md-primary-fg-color) !important; + border-radius: 12px !important; + padding: 1.6rem !important; + margin: 2rem 0 !important; + box-shadow: + 0 4px 20px rgb(0 0 0 / 8%), + 0 0 0 1px rgb(255 255 255 / 80%) inset !important; + font-size: 0.98em !important; + line-height: 1.6 !important; + display: block !important; + max-width: 100% !important; + overflow-x: auto !important; + word-break: break-word !important; + white-space: pre-wrap !important; + box-sizing: border-box !important; + scrollbar-gutter: stable !important; +} + +pre > code::-webkit-scrollbar { height: 6px !important; } +pre > code::-webkit-scrollbar-track { + background: color-mix(in oklab, var(--md-default-bg-color) 90%, transparent) !important; + border-radius: 3px !important; +} +pre > code::-webkit-scrollbar-thumb { + background: color-mix(in oklab, var(--md-primary-fg-color) 40%, transparent) !important; + border-radius: 3px !important; +} +pre > code::-webkit-scrollbar-thumb:hover { + background: color-mix(in oklab, var(--md-primary-fg-color) 60%, transparent) !important; +} + + +[href*="/api/"] pre > code, +.api-page pre > code { + border-left-width: 6px !important; + box-shadow: + 0 8px 32px color-mix(in oklab, var(--md-primary-fg-color) 12%, transparent), + 0 0 0 1px rgb(255 255 255 / 90%) inset !important; +} + +.field-list { + background: linear-gradient(145deg, #fafbfc 0%, #f8fafc 100%) !important; + padding: 2rem !important; + border-radius: 16px !important; + box-shadow: + 0 8px 32px rgb(0 0 0 / 6%), + 0 1px 4px rgb(0 0 0 / 4%) inset !important; + margin: 2.5rem 0 !important; + border-left: 5px solid color-mix(in oklab, var(--md-default-fg-color--lighter) 70%, transparent) !important; +} + +.field-list dt { + background: linear-gradient(135deg, #e3f2fd 0%, #f0f7ff 100%) !important; + color: var(--md-primary-fg-color) !important; + padding: 1.1rem 1.4rem !important; + font-weight: 600 !important; + font-size: 0.97em !important; + border-radius: 10px !important; + margin: 0 0 1.2rem 0 !important; + letter-spacing: -0.01em !important; +} + +.field-list dd { + padding: 1.2rem 1.4rem !important; + border-left: 4px solid color-mix(in oklab, var(--md-primary-fg-color) 25%, transparent) !important; + margin-left: 0 !important; + background: rgba(255 255 255 / 70%) !important; + border-radius: 10px !important; + backdrop-filter: blur(10px) !important; +} + +.field-list dt:hover { + background: linear-gradient(135deg, var(--md-primary-fg-color--light-blue) 0%, #bbdefb 100%) !important; + transform: translateX(4px) scale(1.015) !important; + box-shadow: 0 6px 20px color-mix(in oklab, var(--md-primary-fg-color) 20%, transparent) !important; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important; +} diff --git a/doc/source/tutorials/notebooks/0_setup/0_setup_conda.sh b/doc/tutorials/notebooks/0_setup/0_setup_conda.sh similarity index 100% rename from doc/source/tutorials/notebooks/0_setup/0_setup_conda.sh rename to doc/tutorials/notebooks/0_setup/0_setup_conda.sh diff --git a/doc/source/tutorials/notebooks/0_setup/0_setup_haicore.ipynb b/doc/tutorials/notebooks/0_setup/0_setup_haicore.ipynb similarity index 98% rename from doc/source/tutorials/notebooks/0_setup/0_setup_haicore.ipynb rename to doc/tutorials/notebooks/0_setup/0_setup_haicore.ipynb index 6e4662a701..95b27b9f1c 100644 --- a/doc/source/tutorials/notebooks/0_setup/0_setup_haicore.ipynb +++ b/doc/tutorials/notebooks/0_setup/0_setup_haicore.ipynb @@ -36,20 +36,17 @@ "tags": [] }, "source": [ - "\n", - "\n", - "\n", "## Introduction\n", "---\n", "
\n", "Note:\n", "This notebook expects that you will be working on the JupyterLab hosted in HAICORE, at the Karlsruhe Institute of Technology.\n", "\n", - "If you want to run the tutorial on your local machine, or on another systems, please refer to the local setup notebook in this repository for reference, or to our notebook gallery for more examples.\n", + "If you want to run the tutorial on your local machine, or on another systems, please refer to the local setup notebook in this repository for reference, or to our notebook gallery for more examples.\n", "
\n", "\n", - "
\n", - " \n", + "
\n", + " \n", "
\n", "\n", "\n", diff --git a/doc/source/tutorials/notebooks/0_setup/0_setup_jsc.ipynb b/doc/tutorials/notebooks/0_setup/0_setup_jsc.ipynb similarity index 92% rename from doc/source/tutorials/notebooks/0_setup/0_setup_jsc.ipynb rename to doc/tutorials/notebooks/0_setup/0_setup_jsc.ipynb index ee00ae6115..03cf3224c5 100644 --- a/doc/source/tutorials/notebooks/0_setup/0_setup_jsc.ipynb +++ b/doc/tutorials/notebooks/0_setup/0_setup_jsc.ipynb @@ -21,7 +21,7 @@ "metadata": {}, "source": [ "\n", - "
\n", + "
\n", " \n", "
\n", "\n", @@ -189,9 +189,9 @@ "metadata": {}, "source": [ "
\n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", "
" ] }, diff --git a/doc/source/tutorials/notebooks/0_setup/0_setup_local.ipynb b/doc/tutorials/notebooks/0_setup/0_setup_local.ipynb similarity index 100% rename from doc/source/tutorials/notebooks/0_setup/0_setup_local.ipynb rename to doc/tutorials/notebooks/0_setup/0_setup_local.ipynb diff --git a/doc/source/tutorials/notebooks/0_setup/0_setup_pip.sh b/doc/tutorials/notebooks/0_setup/0_setup_pip.sh similarity index 100% rename from doc/source/tutorials/notebooks/0_setup/0_setup_pip.sh rename to doc/tutorials/notebooks/0_setup/0_setup_pip.sh diff --git a/doc/source/tutorials/notebooks/1_basics.ipynb b/doc/tutorials/notebooks/1_basics.ipynb similarity index 99% rename from doc/source/tutorials/notebooks/1_basics.ipynb rename to doc/tutorials/notebooks/1_basics.ipynb index 73d3c48b84..d9baa88786 100644 --- a/doc/source/tutorials/notebooks/1_basics.ipynb +++ b/doc/tutorials/notebooks/1_basics.ipynb @@ -56,7 +56,7 @@ "source": [ "## Connecting to ipyparallel cluster\n", "\n", - "We have started an `ipcluster` with 4 engines at the end of the [Setup notebook](0_setup/0_setup_local.ipynb).\n", + "We have started an `ipcluster` with 4 engines at the end of the [Setup notebook](../0_setup/0_setup_local/).\n", "\n", "Let's start the interactive session with a look into the `heat` data object. But first, we need to import the `ipyparallel` client." ] @@ -3137,7 +3137,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In the [next notebook](2_internals.ipynb), let's have a look at Heat's most important internal functions." + "In the [next notebook](../2_internals/), let's have a look at Heat's most important internal functions." ] } ], diff --git a/doc/source/tutorials/notebooks/2_internals.ipynb b/doc/tutorials/notebooks/2_internals.ipynb similarity index 99% rename from doc/source/tutorials/notebooks/2_internals.ipynb rename to doc/tutorials/notebooks/2_internals.ipynb index 27f823ba78..2a5a480f66 100644 --- a/doc/source/tutorials/notebooks/2_internals.ipynb +++ b/doc/tutorials/notebooks/2_internals.ipynb @@ -44,7 +44,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If no engines are found, go back to the [Intro](0_setup/0_setup_local.ipynb) for instructions." + "If no engines are found, go back to the [Intro](../0_setup/0_setup_local/) for instructions." ] }, { @@ -1387,9 +1387,9 @@ "source": [ "In the next notebooks, we'll show you how we use Heat's distributed-array infrastructure to scale complex data analysis workflows to large datasets and high-performance computing resources.\n", "\n", - "- [Data loading and preprocessing](3_loading_preprocessing.ipynb)\n", - "- [Matrix factorization algorithms](4_matrix_factorizations.ipynb)\n", - "- [Clustering algorithms](5_clustering.ipynb)" + "- [Data loading and preprocessing](../3_loading_preprocessing/)\n", + "- [Matrix factorization algorithms](../4_matrix_factorizations/)\n", + "- [Clustering algorithms](../5_clustering/)" ] } ], diff --git a/doc/source/tutorials/notebooks/3_loading_preprocessing.ipynb b/doc/tutorials/notebooks/3_loading_preprocessing.ipynb similarity index 99% rename from doc/source/tutorials/notebooks/3_loading_preprocessing.ipynb rename to doc/tutorials/notebooks/3_loading_preprocessing.ipynb index 9db5a38216..d8ac0424c9 100644 --- a/doc/source/tutorials/notebooks/3_loading_preprocessing.ipynb +++ b/doc/tutorials/notebooks/3_loading_preprocessing.ipynb @@ -41,7 +41,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Your ipcluster should still be running (see the [Intro](0_setup/0_setup_local.ipynb)). Let's test it:" + "Your ipcluster should still be running (see the [Intro](../0_setup/0_setup_local/)). Let's test it:" ] }, { diff --git a/doc/source/tutorials/notebooks/4_matrix_factorizations.ipynb b/doc/tutorials/notebooks/4_matrix_factorizations.ipynb similarity index 96% rename from doc/source/tutorials/notebooks/4_matrix_factorizations.ipynb rename to doc/tutorials/notebooks/4_matrix_factorizations.ipynb index 3a862220e4..d17f07aed8 100644 --- a/doc/source/tutorials/notebooks/4_matrix_factorizations.ipynb +++ b/doc/tutorials/notebooks/4_matrix_factorizations.ipynb @@ -404,9 +404,9 @@ "metadata": {}, "source": [ "
\n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", "
\n" ] }, diff --git a/doc/source/tutorials/notebooks/5_clustering.ipynb b/doc/tutorials/notebooks/5_clustering.ipynb similarity index 99% rename from doc/source/tutorials/notebooks/5_clustering.ipynb rename to doc/tutorials/notebooks/5_clustering.ipynb index 2a603bad6d..1e97d3c079 100644 --- a/doc/source/tutorials/notebooks/5_clustering.ipynb +++ b/doc/tutorials/notebooks/5_clustering.ipynb @@ -7,7 +7,7 @@ "Cluster Analysis\n", "================\n", "\n", - "This tutorial is an interactive version of our static [clustering tutorial on ReadTheDocs](https://heat.readthedocs.io/en/stable/tutorial_clustering.html). \n", + "This tutorial is an interactive version of our static [clustering tutorial on ReadTheDocs](../../tutorial_clustering/). \n", "\n", "We will demonstrate memory-distributed analysis with k-means and k-medians from the ``heat.cluster`` module. As usual, we will run the analysis on a small dataset for demonstration. We need to have an `ipcluster` running to distribute the computation.\n", "\n", @@ -463,7 +463,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Feel free to try out the other [loading options](https://heat.readthedocs.io/en/stable/autoapi/heat/core/io/index.html#heat.core.io.load) as well.\n", + "Feel free to try out the other [loading options](/api/heat/core/io/) as well.\n", "\n", "Fitting the dataset with `kmeans`:" ] diff --git a/doc/source/tutorials/notebooks/6_profiling.ipynb b/doc/tutorials/notebooks/6_profiling.ipynb similarity index 100% rename from doc/source/tutorials/notebooks/6_profiling.ipynb rename to doc/tutorials/notebooks/6_profiling.ipynb diff --git a/doc/tutorials/tutorial.md b/doc/tutorials/tutorial.md new file mode 100644 index 0000000000..88eb01bde0 --- /dev/null +++ b/doc/tutorials/tutorial.md @@ -0,0 +1,66 @@ +# Heat Tutorials + +Below are quick links to the heat tutorials. Click a card to open the page.​ + +
+ + + + + + + + +
diff --git a/doc/tutorials/tutorial_30_minutes.md b/doc/tutorials/tutorial_30_minutes.md new file mode 100644 index 0000000000..315201ef13 --- /dev/null +++ b/doc/tutorials/tutorial_30_minutes.md @@ -0,0 +1,175 @@ +# Feel the Heat - A 30 Minutes Welcome + +Heat is a flexible and seamless open-source software for high performance data analytics and machine learning in Python. + +Our main audience are scientists, engineers and AI enthusiats with computational and numerical problems exceeding the boundaries of a laptop or workstation. + +## Getting Started + +Make sure that you have Heat installed on your system. For detailed description of your options, see our full {ref}`Getting Started ` section. For now, this should suffice: + +```bash +pip install heat +``` + +## DNDarrays + +DNDarrays mimick NumPy’s ndarrays interface as close as possible. On top of that they can also be used to accelerate computations with GPUs or MPI in distributed cluster systems. Let's try it out: + +```python +import heat as ht +``` + +The following creates a $3\times 4$ matrix with zeros. + +```python +ht.zeros((3,4,)) +``` + +Output: + +```text +DNDarray([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) +``` + +Or a vector with the numbers from $0-9$ ascending. + +```python +ht.arange(10) +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=ht.int32, device=cpu:0, split=None) +``` + +The following snippet creates a column vector with $5$ elements, each position filled with the value $9$ and the `ht.int64` data type. + +```python +ht.full((1, 5,), fill_value=9, dtype=ht.int64) +``` + +Output: + +```text +DNDarray([[9, 9, 9, 9, 9]], dtype=ht.int64, device=cpu:0, split=None) +``` + +Finally, let's load some user defined data. + +!!!note +Heat takes care of automatically inferring the shape, i.e. the tensor dimensions, and data types from the user provided input. + + +```python +ht.array([[0, 1, 2], [0.1, 0.2, 3]]) +``` + +Output: + +```text +DNDarray([[0.0000, 1.0000, 2.0000], + [0.1000, 0.2000, 3.0000]], dtype=ht.float32, device=cpu:0, split=None) +``` + +## Operations + +Heat supports several mathematical operations, ranging from simple element-wise functions, binary arithmetic operations, and linear algebra, to more powerful reductions. In the following example we add two matrices of same size. + +```python +ht.full((3, 4,), fill_value=9) + ht.ones((3, 4,)) +``` + +Output: + +```text +DNDarray([[10., 10., 10., 10.], + [10., 10., 10., 10.], + [10., 10., 10., 10.]], dtype=ht.float32, device=cpu:0, split=None) +``` + +Instead of operators, we can also use a functional approach. + +```python +ht.add(ht.full((3, 4,), fill_value=9), ht.ones((3, 4,))) +``` + +Output: + +```text +DNDarray([[10., 10., 10., 10.], + [10., 10., 10., 10.], + [10., 10., 10., 10.]], dtype=ht.float32, device=cpu:0, split=None) +``` + +If there is no obvious operator for a function, you can also call a method on the `DNDarray`. + +```python +ht.arange(5).sin() +``` + +Output: + +```text +DNDarray([ 0.0000, 0.8415, 0.9093, 0.1411, -0.7568], dtype=ht.float32, device=cpu:0, split=None) +``` + +Just like other numerical computation libraries, Heat supports broadcasting. It describes how two `DNDarrays` with different dimensions (also called shape) can still be combined in arithmetic operations given certain constraints. For example, we can add a scalar to a matrix. + +```python +ht.zeros((3, 4,)) + 5.0 +``` + +Output: + +```text +DNDarray([[5., 5., 5., 5.], + [5., 5., 5., 5.], + [5., 5., 5., 5.]], dtype=ht.float32, device=cpu:0, split=None) +``` + +The scalar has been element-wise repeated for every entry within the matrix. We can do the same with matrices and vectors as well + +```python +ht.zeros((3, 4,)) + ht.arange(4) +``` + +Output: + +```text +DNDarray([[0., 1., 2., 3.], + [0., 1., 2., 3.], + [0., 1., 2., 3.]], dtype=ht.float32, device=cpu:0, split=None) +``` + +The vector has been repeated for every row of the left-hand side matrix. A full description of broadcasting rules can be found in [NumPy's manual](https://numpy.org/devdocs/user/theory.broadcasting.html). While talking about it, Heat is designed as seamless drop-in replacement for NumPy. There still might be cases, e.g. working with native Python code, when you want to convert a `DNDarray` to an `ndarray` instead. + +```python +ht.arange(5).numpy() +``` + +Output: + +```text +array([0, 1, 2, 3, 4], dtype=int32) +``` + +And vice versa: + +```python +import numpy as np +ht.array(np.arange(5)) +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4], dtype=ht.int64, device=cpu:0, split=None) +``` + + +!!!seealso +Read up more later on hundreds of other functions in our [API Reference](../api/heat/core/index.md). Or find out about them interactively by using the `help()` function in your Python interpreter. diff --git a/doc/tutorials/tutorial_clustering.md b/doc/tutorials/tutorial_clustering.md new file mode 100644 index 0000000000..f58115ccb5 --- /dev/null +++ b/doc/tutorials/tutorial_clustering.md @@ -0,0 +1,167 @@ +# Cluster Analysis + +This tutorial will demonstrate analysis with k-means and k-medians from the `cluster` module. +We will use matplotlib for visualization of data and results. + +``` +import heat as ht +import matplotlib.pyplot as plt +``` + +## Spherical Clouds of Datapoints + +For a simple demonstration of the clustering process and the differences between the algorithms, we will create an +artificial dataset, consisting of two circularly shaped clusters positioned at $(x_1=2, y_1=2)$ and $(x_2=-2, y_2=-2)$ in 2D space. +For each cluster we will sample 100 arbitrary points from a circle with radius of $R = 1.0$ by drawing random numbers +for the spherical coordinates $( r\in [0,R], \phi \in [0,2\pi])$, translating these to cartesian coordinates +and shifting them by $+2$ for cluster `c1` and $-2$ for cluster `c2`. The resulting concatenated dataset `data` has shape +$(200, 2)$ and is distributed among the `p` processes along axis 0 (sample axis) + +```python +num_ele = 100 +R = 1.0 + +# Create default spherical point cloud +# Sample radius between 0 and 1, and phi between 0 and 2pi +r = ht.random.rand(num_ele, split=0) * R +phi = ht.random.rand(num_ele, split=0) * 2 * ht.constants.PI + +# Transform spherical coordinates to cartesian coordinates +x = r * ht.cos(phi) +y = r * ht.sin(phi) + + +# Stack the sampled points and shift them to locations (2,2) and (-2, -2) +cluster1 = ht.stack((x + 2, y + 2), axis=1) +cluster2 = ht.stack((x - 2, y - 2), axis=1) + +data = ht.concatenate((cluster1, cluster2), axis=0) +``` + +Let's plot the data for illustration. In order to do so with matplotlib, we need to unsplit the data (gather it from +all processes) and transform it into a numpy array. Plotting can only be done on rank 0. + +```python +data_np = ht.resplit(data, axis=None).numpy() +if ht.MPI_WORLD.rank == 0: + plt.plot(data_np[:,0], data_np[:,1], 'bo') + plt.show() +``` + +This will render something like +![Split array](../images/data.png){ .centered } + +Now we perform the clustering analysis with kmeans. We chose 'kmeans++' as an intelligent way of sampling the +initial centroids. + +```python +kmeans = ht.cluster.KMeans(n_clusters=2, init="kmeans++") +labels = kmeans.fit_predict(data).squeeze() +centroids = kmeans.cluster_centers_ + +# Select points assigned to clusters c1 and c2 +c1 = data[ht.where(labels == 0), :] +c2 = data[ht.where(labels == 1), :] +# After slicing, the arrays are not distributed equally among the processes anymore; we need to balance +c1.balance_() +c2.balance_() + +print(f"Number of points assigned to c1: {c1.shape[0]} \n" + f"Number of points assigned to c2: {c2.shape[0]} \n" + f"Centroids = {centroids}") +``` + +```text +Number of points assigned to c1: 100 +Number of points assigned to c2: 100 +Centroids = DNDarray([[ 2.0169, 2.0713], + [-1.9831, -1.9287]], dtype=ht.float32, device=cpu:0, split=None) +``` + +Let's plot the assigned clusters and the respective centroids: + +```python +c1_np = c1.numpy() +c2_np = c2.numpy() + +if ht.MPI_WORLD.rank == 0: + plt.plot(c1_np[:,0], c1_np[:,1], 'x', color='#f0781e') + plt.plot(c2_np[:,0], c2_np[:,1], 'x', color='#5a696e') + plt.plot(centroids[0,0],centroids[0,1], '^', markersize=10, markeredgecolor='black', color='#f0781e' ) + plt.plot(centroids[1,0],centroids[1,1], '^', markersize=10, markeredgecolor='black',color='#5a696e') + plt.show() +``` + +![Split array](../images/clustering.png){ .centered } + +We can also cluster the data with kmedians. The respective advanced initial centroid sampling is called 'kmedians++'. + +```python +kmedians = ht.cluster.KMedians(n_clusters=2, init="kmedians++") +labels = kmedians.fit_predict(data).squeeze() +centroids = kmedians.cluster_centers_ + +# Select points assigned to clusters c1 and c2 +c1 = data[ht.where(labels == 0), :] +c2 = data[ht.where(labels == 1), :] +# After slicing, the arrays are not distributed equally among the processes anymore; we need to balance +c1.balance_() +c2.balance_() + +print(f"Number of points assigned to c1: {c1.shape[0]} \n" + f"Number of points assigned to c2: {c2.shape[0]} \n" + f"Centroids = {centroids}") +``` + +Plotting the assigned clusters and the respective centroids: + +```python +c1_np = c1.numpy() +c2_np = c2.numpy() +if ht.MPI_WORLD.rank == 0: + plt.plot(c1_np[:,0], c1_np[:,1], 'x', color='#f0781e') + plt.plot(c2_np[:,0], c2_np[:,1], 'x', color='#5a696e') + plt.plot(centroids[0,0],centroids[0,1], '^', markersize=10, markeredgecolor='black', color='#f0781e' ) + plt.plot(centroids[1,0],centroids[1,1], '^', markersize=10, markeredgecolor='black',color='#5a696e') + plt.show() +``` + +![Split array](../images/clustering_kmeans.png){ .centered } + +## The Iris Dataset + +The \_iris\_ dataset is a well known example for clustering analysis. It contains 4 measured features for samples from +three different types of iris flowers. A subset of 150 samples is included in formats h5, csv and netcdf in Heat, +located under 'heat/heat/datasets', and can be loaded in a distributed manner with Heat's parallel +dataloader + +```python +iris = ht.load("heat/datasets/iris.csv", sep=";", split=0) +``` + +Fitting the dataset with kmeans: + +```python +k = 3 +kmeans = ht.cluster.KMeans(n_clusters=k, init="kmeans++") +kmeans.fit(iris) +``` + +Let's see what the results are. In theory, there are 50 samples of each of the 3 iris types + +```python +labels = kmeans.predict(iris).squeeze() + +# Select points assigned to clusters c1 and c2 +c1 = iris[ht.where(labels == 0), :] +c2 = iris[ht.where(labels == 1), :] +c3 = iris[ht.where(labels == 2), :] +# After slicing, the arrays are not distributed equally among the processes anymore; we need to balance +c1.balance_() +c2.balance_() +c3.balance_() + +print(f"Number of points assigned to c1: {c1.shape[0]} \n" + f"Number of points assigned to c2: {c2.shape[0]} \n" + f"Number of points assigned to c3: {c3.shape[0]}") +``` diff --git a/doc/tutorials/tutorial_notebook_gallery.md b/doc/tutorials/tutorial_notebook_gallery.md new file mode 100644 index 0000000000..722c2d07b0 --- /dev/null +++ b/doc/tutorials/tutorial_notebook_gallery.md @@ -0,0 +1,72 @@ +## Setup notebooks +Example notebooks explaining how to setup an MPI enabled notebook to work with heat in an interactive way. + + + +## Example notebooks +These notebooks contain heat examples that have been used in interactive tutorials. + + diff --git a/doc/tutorials/tutorial_parallel_computation.md b/doc/tutorials/tutorial_parallel_computation.md new file mode 100644 index 0000000000..6a5c7b67ed --- /dev/null +++ b/doc/tutorials/tutorial_parallel_computation.md @@ -0,0 +1,244 @@ +# Parallel Computation + +One of Heat's strength lies in its ability to heavily parallelize computations. By default, Heat utilizes all available cores in your processor via OpenMP multi-threading. However, it is also possible to further speed-up processing with GPU acceleration or by scale-out in a cluster system using MPI. In the following tutorial you will learn how to exploit Heat's parallel processing features. + +## GPU Acceleration + +!!!warning +Please make sure that you have an NVidia GPU in your system for the following code snippets. + + +GPUs can accelerate applications running on the CPU by offloading some or all of the time consuming portions of the code. This is particularly for highly compute-intensive programs with low to medium memory consumption. From a user's perspective, the application runs faster because it's using the massively parallel processing power of the GPU to boost performance. Heat allows you to allocate `DNDarrays` on the GPU as follows. + +```python +ht.arange(5, device="gpu") +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cuda:0, split=None) +``` + +As you can see the device information has changed from `cpu:0` to `cuda:0`, denoting that the `DNDarray`'s data is now residing in GPU memory of the [CUDA](https://developer.nvidia.com/cuda-zone) compute backend. + +We can move data back and forth the CPU and GPU by calling the respective function on the `DNDarray`. + +```python +a = ht.arange(5, device="gpu") +a.cpu() +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) +``` + +Manually, allocating or moving all `DNDarrays` between CPU and GPU can quickly become tedious. Hence, Heat allows you to set a default device for all computations. + +```python +ht.use_device('gpu') +ht.zeros((3, 4,)) +``` + +Output: + +```text +DNDarray([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=ht.float32, device=cuda:0, split=None) +``` + +Finally, operations on `DNDarrays` on GPUs can be performed as we are used to from its CPU counterpart. + +```python +ht.use_device('gpu') +ht.full((3, 4,), fill_value=9) + ht.ones((3, 4,)) +``` + +Output: + +```text +DNDarray([[10., 10., 10., 10.], + [10., 10., 10., 10.], + [10., 10., 10., 10.]], dtype=ht.float32, device=cuda:0, split=None) +``` + +## Distributed Computing + +!!!warning +For the following code examples, make sure you have [MPI](https://computing.llnl.gov/tutorials/mpi/) installed. + + +With Heat you can even compute in distributed memory environments with multiple computation nodes, like modern high-performance cluster systems. For this, Heat makes use of the fact that operations performed on multi-dimensional arrays tend to be identical for all data items. Hence, they can be processed in data-parallel manner. Heat partitions the total number of data items equally among all processing nodes. A `DNDarray` assumes the role of a virtual overlay over these node-local data portions and manages them for you while offering the same interface. Consequently, operations can now be executed in parallel. Each processing node applies them locally to their own data chunk. If necessary, partial results are communicated and automatically combined behind the scenes for correct global results. + +![Split array](../images/split_array.svg){ .centered width="80%" } + +Data chunking in Heat is always done along a singular axis, i.e. a one-dimensional domain decomposition. You can specify this axis by using the `split` parameter in operations and `DNDarray` creation functions. The picture above shows the result of setting different `split` axis on a three-dimensional volume and three processing nodes called $p_0, p_1$ and $p_2$. A Heat `DNDarray` without any split, i.e. `split=None` (default), results in redundant copy on each computation node. + +!!!note +In the following example we assume three execution nodes. We distinguish between them in the output by showing them as `[node/total nodes]`. + + +!!!note +If you're running the following examples in a distributed computation environment, please modify your program invocation from `python ./my_script.py` to `mpirun -p python ./my_script.py`. By the way, invoking a Heat program like this on your laptop or workstation also works. + + +!!!note +Most of the examples throughout the documentation display the split `DNDarrays`. This is not the default behavior when displaying a `DNDarray`, and the full array will be printed multiple times (based on the number of processes). This behaviour can be changed using the `ht.local_printing()` option or by printing the local arrays for each process with the property [larray](../api/heat/core/dndarray.md). + + +```python +ht.arange(5, split=0) +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=0) + +[0/3] DNDarray([0, 1], dtype=ht.int32, device=cpu:0, split=0) +[1/3] DNDarray([2, 3], dtype=ht.int32, device=cpu:0, split=0) +[2/3] DNDarray([4], dtype=ht.int32, device=cpu:0, split=0) +``` + +This can also be done along other axes for arrays with larger dimensions. + +```python +ht.zeros((3, 4,), split=1) +``` + +Output: + +```text +DNDarray([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=ht.float32, device=cpu:0, split=None) + + +[0/3] DNDarray([[0., 0.], + [0., 0.], + [0., 0.]], dtype=ht.int32, device=cpu:0, split=0) +[1/3] DNDarray([[0.], + [0.], + [0.]], dtype=ht.int32, device=cpu:0, split=0) +[2/3] DNDarray([[0.], + [0.], + [0.]], dtype=ht.int32, device=cpu:0, split=0) +``` + +As previously explained, specifying no `split` axis or setting it explicitly to `None` results in a redundant copy on each node. + +```python +ht.arange(5, split=None) +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) + + +[0/3] DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) +[1/3] DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) +[2/3] DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=None) +``` + +You may also modify the data partitioning of a Heat array by using the `resplit()` function. This allows you to repartition the data as you so choose. Please note, that this should be used sparingly and for small data amounts only, as it entails data communication over network. + +```python +a = ht.arange(5, split=None) +a.resplit(0) +``` + +Output: + +```text +DNDarray([0, 1, 2, 3, 4], dtype=ht.int32, device=cpu:0, split=0) + +[0/3] DNDarray([0, 1], dtype=ht.int32, device=cpu:0, split=0) +[1/3] DNDarray([2, 3], dtype=ht.int32, device=cpu:0, split=0) +[2/3] DNDarray([4], dtype=ht.int32, device=cpu:0, split=0) +``` + +The result of an operation on a Heat tensor will in most cases preserve the split of the respective operands. However, in some cases the split axis might change. For example, a transpose of a Heat `DNDarray` will equally transpose the split axis. Furthermore, a reduction operations, e.g. `sum()` that is performed across the split axis, might remove data partitions entirely. + +```python +a = ht.ones((3, 4,), split=0) +a.sum() +``` + +Output: + +```text +DNDarray(12., dtype=ht.float32, device=cpu:0, split=None) +``` + +The previously `split=0` matrix is `split=None` after the reduction operation. Obviously, we can also perform operations between (differently) split `DNDarrays`. + +```python +a = ht.ones((3, 4,), split=1) +b = ht.arange(4) + +a + b +``` + +Output: + +```text +DNDarray([[1., 2., 3., 4.], + [1., 2., 3., 4.], + [1., 2., 3., 4.]], dtype=ht.float32, device=cpu:0, split=1) + +[0/3] DNDarray([1., 2., 3., 4.], dtype=ht.int32, device=cpu:0, split=None) +[1/3] DNDarray([1., 2., 3., 4.], dtype=ht.int32, device=cpu:0, split=None) +[2/3] DNDarray([1., 2., 3., 4.], dtype=ht.int32, device=cpu:0, split=None) +``` + +### Technical Details + +On a technical level, Heat is inspired by the so-called [Bulk Synchronous Parallel (BSP)](https://en.wikipedia.org/wiki/Bulk_synchronous_parallel) processing model. Computations proceed in a series of hierarchical supersteps, each consisting of a number of node-local computations and subsequent communications. In contrast to the classical BSP model, communicated data is available immediately, rather than after the next global synchronization. In Heat, global synchronization only occurs for collective MPI calls as well as at the program start and termination. + +![Split array](../images/bsp.svg){ .centered width="60%" } + +### Distributed Interactive Interpreter + +Heat ships with a distributed interactive Python interpreter that allows you to prototype and debug distributed applications. It can be found in the Heat sources in the path `scripts/interactive.py` or you just grab it directly + +```bash +wget https://raw.githubusercontent.com/helmholtz-analytics/heat/master/scripts/interactive.py +``` + +You can start the distributed interactive interpreter by invoking the following command. + +```bash +mpirun -s all -np python interactive.py +``` + +!!!note +The interactive interpreter does only support a subset of all control commands. + + +## Parallel Performance + +When working with parallel and distributed computation in Heat there are some best practices for you to know about. The following list covers the major ones. + +### Dos + +> - Use the high-level Heat API + : - computational kernels are optimized + - Python constructs (e.g. loops) may be slow +> - Split large data amounts + : - often this along the 'observations/samples/time' dimension + - large intermediate matrices +> - Have redundant copies (`split=None`) of small, frequently accessed matrices + +### Dont's + +> - Avoid extensive data copying, e.g. + : - operations with operands of different splits (except `None`) + - reshape() that actually change the array dimensions (adding extra dimensions with size 1 is fine) +> - Overly use the GPU + : - computation-intensive operations are usually a good fit + - operations extensively accessing memory only (e.g. sorting) are not diff --git a/heat/nn/__init__.py b/heat/nn/__init__.py index 7da9d072a3..80f38da9b1 100644 --- a/heat/nn/__init__.py +++ b/heat/nn/__init__.py @@ -10,10 +10,18 @@ from . import functional +__all__ = ["functional"] if sys.version_info.minor >= 7: from .data_parallel import * + try: + from . import data_parallel as _dp + + __all__.extend(getattr(_dp, "__all__", [])) + except Exception: + pass + functional.__getattr__ = functional.func_getattr def __getattr__(name): diff --git a/main.py b/main.py new file mode 100644 index 0000000000..06c56ed8d0 --- /dev/null +++ b/main.py @@ -0,0 +1,23 @@ +"""MkDocs Macros hook that reads heat/core/version.py and sets env.variables['version'] and ['release'] for documentation builds.""" + +from pathlib import Path +import importlib.util + + +def define_env(env): + """Populate MkDocs Macros variables 'version' and 'release' from heat/core/version.py; reads major, minor, micro and optional extension, then sets env.variables accordingly for use in templates.""" + version_path = Path(__file__).parent / "heat" / "core" / "version.py" + spec = importlib.util.spec_from_file_location("heat_core_version", version_path) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + + major = getattr(mod, "major") + minor = getattr(mod, "minor") + micro = getattr(mod, "micro") + extension = getattr(mod, "extension", "") + + base = f"{major}.{minor}.{micro}" + release = f"{base}-{extension}" if extension else base + + env.variables["version"] = base + env.variables["release"] = release diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000000..8cc7a9aa42 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,279 @@ +site_name: Heat +site_url: https://heat.readthedocs.io + +theme: + name: material + custom_dir: doc/styles + features: + - navigation.tabs + - content.code.copy + - search.suggest + - search.highlight + +docs_dir: doc + +plugins: + - search + - mkdocstrings + - git-revision-date-localized + - macros + - mkdocs-jupyter: + execute: false + +markdown_extensions: + - admonition + - pymdownx.details + - pymdownx.superfences + - pymdownx.details + - pymdownx.tabbed + - pymdownx.arithmatex: + generic: true + - attr_list + - md_in_html + - toc + +extra_javascript: + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + +extra_css: + - styles/extra.css + +nav: + - Home: index.md + - Introduction: introduction.md + - Getting Started: getting_started.md + - Heat Tutorials: + - Overview: tutorials/tutorial.md + - 30 Minutes Welcome: tutorials/tutorial_30_minutes.md + - Parallel Computation: tutorials/tutorial_parallel_computation.md + - Cluster Analysis: tutorials/tutorial_clustering.md + - Notebook Gallery: tutorials/tutorial_notebook_gallery.md + - Setup: + - Local Setup: tutorials/notebooks/0_setup/0_setup_local.ipynb + - JSC Setup: tutorials/notebooks/0_setup/0_setup_jsc.ipynb + - HAICORE Setup: tutorials/notebooks/0_setup/0_setup_haicore.ipynb + - Examples: + - Heat Basics: tutorials/notebooks/1_basics.ipynb + - Internals: tutorials/notebooks/2_internals.ipynb + - Loading and Preprocessing: tutorials/notebooks/3_loading_preprocessing.ipynb + - Matrix Factorizations: tutorials/notebooks/4_matrix_factorizations.ipynb + - Clustering: tutorials/notebooks/5_clustering.ipynb + - Profiling: tutorials/notebooks/6_profiling.ipynb + - Documentation Guide: documentation_howto.md + - Case Studies: case_studies.md + - API Reference: + - Heat: + - Index: api/heat/index.md + - CLI: api/heat/cli.md + - Tests: + - Index: api/heat/tests/index.md + - Test CLI: api/heat/tests/test_cli.md + + - Core: + - Index: api/heat/core/index.md + - Arithmetics: api/heat/core/arithmetics.md + - Base: api/heat/core/base.md + - Communication: api/heat/core/communication.md + - Complex Math: api/heat/core/complex_math.md + - Constants: api/heat/core/constants.md + - Devices: api/heat/core/devices.md + - DNDarray: api/heat/core/dndarray.md + - Exponential: api/heat/core/exponential.md + - Factories: api/heat/core/factories.md + - Indexing: api/heat/core/indexing.md + - IO: api/heat/core/io.md + - Linalg: + - Index: api/heat/core/linalg/index.md + - Basics: api/heat/core/linalg/basics.md + - Eigh: api/heat/core/linalg/eigh.md + - Polar: api/heat/core/linalg/polar.md + - QR: api/heat/core/linalg/qr.md + - Solver: api/heat/core/linalg/solver.md + - SVD: api/heat/core/linalg/svd.md + - SVD Tools: api/heat/core/linalg/svdtools.md + - Tests: + - Index: api/heat/core/linalg/tests/index.md + - Test Basics: api/heat/core/linalg/tests/test_basics.md + - Test Eigh: api/heat/core/linalg/tests/test_eigh.md + - Test Polar: api/heat/core/linalg/tests/test_polar.md + - Test QR: api/heat/core/linalg/tests/test_qr.md + - Test Solver: api/heat/core/linalg/tests/test_solver.md + - Test SVD: api/heat/core/linalg/tests/test_svd.md + - Test SVDTools: api/heat/core/linalg/tests/test_svdtools.md + - Logical: api/heat/core/logical.md + - Manipulations: api/heat/core/manipulations.md + - Memory: api/heat/core/memory.md + - Printing: api/heat/core/printing.md + - Random: api/heat/core/random.md + - Relational: api/heat/core/relational.md + - Rounding: api/heat/core/rounding.md + - Sanitation: api/heat/core/sanitation.md + - Signal: api/heat/core/signal.md + - Statistics: api/heat/core/statistics.md + - Stride Tricks: api/heat/core/stride_tricks.md + - Tests: + - Index: api/heat/core/tests/index.md + - Test Arithmetics: api/heat/core/tests/test_arithmetics.md + - Test Communication: api/heat/core/tests/test_communication.md + - Test Complex Math: api/heat/core/tests/test_complex_math.md + - Test Constants: api/heat/core/tests/test_constants.md + - Test Devices: api/heat/core/tests/test_devices.md + - Test DNDarray: api/heat/core/tests/test_dndarray.md + - Test Exponential: api/heat/core/tests/test_exponential.md + - Test Factories: api/heat/core/tests/test_factories.md + - Test Indexing: api/heat/core/tests/test_indexing.md + - Test IO: api/heat/core/tests/test_io.md + - Test Logical: api/heat/core/tests/test_logical.md + - Test Manipulations: api/heat/core/tests/test_manipulations.md + - Test Memory: api/heat/core/tests/test_memory.md + - Test Operations: api/heat/core/tests/test_operations.md + - Test Printing: api/heat/core/tests/test_printing.md + - Test Random: api/heat/core/tests/test_random.md + - Test Relational: api/heat/core/tests/test_relational.md + - Test Rounding: api/heat/core/tests/test_rounding.md + - Test Sanitation: api/heat/core/tests/test_sanitation.md + - Test Signal: api/heat/core/tests/test_signal.md + - Test Statistics: api/heat/core/tests/test_statistics.md + - Test Stride Tricks: api/heat/core/tests/test_stride_tricks.md + - Test Tiling: api/heat/core/tests/test_tiling.md + - Test Trigonometrics: api/heat/core/tests/test_trigonometrics.md + - Test Types: api/heat/core/tests/test_types.md + - Test Vmap: api/heat/core/tests/test_vmap.md + - Test Suites: + - Index: api/heat/core/tests/test_suites/index.md + - Basic Test: api/heat/core/tests/test_suites/basic_test.md + - Test Basic Test: api/heat/core/tests/test_suites/test_basic_test.md + - Tiling: api/heat/core/tiling.md + - Trigonometrics: api/heat/core/trigonometrics.md + - Types: api/heat/core/types.md + - Version: api/heat/core/version.md + - Vmap: api/heat/core/vmap.md + + - Cluster: + - Index: api/heat/cluster/index.md + - BatchParallelClustering: api/heat/cluster/batchparallelclustering.md + - KMeans: api/heat/cluster/kmeans.md + - KMedians: api/heat/cluster/kmedians.md + - KMedoids: api/heat/cluster/kmedoids.md + - Spectral: api/heat/cluster/spectral.md + - Tests: + - Index: api/heat/cluster/tests/index.md + - Test BatchParallelClustering: api/heat/cluster/tests/test_batchparallelclustering.md + - Test KMeans: api/heat/cluster/tests/test_kmeans.md + - Test KMedians: api/heat/cluster/tests/test_kmedians.md + - Test KMedoids: api/heat/cluster/tests/test_kmedoids.md + - Test Spectral: api/heat/cluster/tests/test_spectral.md + + - Classification: + - Index: api/heat/classification/index.md + - KNeighborsClassifier: api/heat/classification/kneighborsclassifier.md + - Tests: + - Index: api/heat/classification/tests/index.md + - Test KNN: api/heat/classification/tests/test_knn.md + + - Datasets: + - Index: api/heat/datasets/index.md + + - Decomposition: + - Index: api/heat/decomposition/index.md + - DMD: api/heat/decomposition/dmd.md + - PCA: api/heat/decomposition/pca.md + - Tests: + - Index: api/heat/decomposition/tests/index.md + - Test DMD: api/heat/decomposition/tests/test_dmd.md + - Test PCA: api/heat/decomposition/tests/test_pca.md + + - FFT: + - Index: api/heat/fft/index.md + - FFT: api/heat/fft/fft.md + - Tests: + - Index: api/heat/fft/tests/index.md + - Test FFT: api/heat/fft/tests/test_fft.md + + - Graph: + - Index: api/heat/graph/index.md + - Laplacian: api/heat/graph/laplacian.md + - Tests: + - Index: api/heat/graph/tests/index.md + - Test Laplacian: api/heat/graph/tests/test_laplacian.md + + - Naive Bayes: + - Index: api/heat/naive_bayes/index.md + - GaussianNB: api/heat/naive_bayes/gaussianNB.md + - Tests: + - Index: api/heat/naive_bayes/tests/index.md + - Test GaussianNB: api/heat/naive_bayes/tests/test_gaussiannb.md + + - Neural Networks: + - Index: api/heat/nn/index.md + - Data Parallel: api/heat/nn/data_parallel.md + - Functional: api/heat/nn/functional.md + - Tests: + - Index: api/heat/nn/tests/index.md + - Test Data Parallel: api/heat/nn/tests/test_data_parallel.md + - Test NN: api/heat/nn/tests/test_nn.md + + - Optim: + - Index: api/heat/optim/index.md + - DP Optimizer: api/heat/optim/dp_optimizer.md + - LR Scheduler: api/heat/optim/lr_scheduler.md + - Utils: api/heat/optim/utils.md + - Tests: + - Index: api/heat/optim/tests/index.md + - Test DP Optimizer: api/heat/optim/tests/test_dp_optimizer.md + - Test Optim: api/heat/optim/tests/test_optim.md + - Test Utils: api/heat/optim/tests/test_utils.md + + - Preprocessing: + - Index: api/heat/preprocessing/index.md + - Preprocessing: api/heat/preprocessing/preprocessing.md + - Tests: + - Index: api/heat/preprocessing/tests/index.md + - Test Preprocessing: api/heat/preprocessing/tests/test_preprocessing.md + + - Regression: + - Index: api/heat/regression/index.md + - Lasso: api/heat/regression/lasso.md + - Tests: + - Index: api/heat/regression/tests/index.md + - Test Lasso: api/heat/regression/tests/test_lasso.md + + - Sparse: + - Index: api/heat/sparse/index.md + - Arithmetics: api/heat/sparse/arithmetics.md + - DCSX Matrix: api/heat/sparse/dcsx_matrix.md + - Factories: api/heat/sparse/factories.md + - Manipulations: api/heat/sparse/manipulations.md + - Tests: + - Index: api/heat/sparse/tests/index.md + - Test Arithmetics CSR: api/heat/sparse/tests/test_arithmetics_csr.md + - Test DCSCMatrix: api/heat/sparse/tests/test_dcscmatrix.md + - Test DCSRMatrix: api/heat/sparse/tests/test_dcsrmatrix.md + - Test Factories: api/heat/sparse/tests/test_factories.md + - Test Manipulations: api/heat/sparse/tests/test_manipulations.md + + - Spatial: + - Index: api/heat/spatial/index.md + - Distance: api/heat/spatial/distance.md + - Tests: + - Index: api/heat/spatial/tests/index.md + - Test Distances: api/heat/spatial/tests/test_distances.md + + - Utils: + - Index: api/heat/utils/index.md + - Vision Transforms: api/heat/utils/vision_transforms.md + - Data: + - Index: api/heat/utils/data/index.md + - DataTools: api/heat/utils/data/datatools.md + - MatrixGallery: api/heat/utils/data/matrixgallery.md + - MNIST: api/heat/utils/data/mnist.md + - Partial Dataset: api/heat/utils/data/partial_dataset.md + - Spherical: api/heat/utils/data/spherical.md + - Tests: + - Index: api/heat/utils/data/tests/index.md + - Test MatrixGallery: api/heat/utils/data/tests/test_matrixgallery.md + - Test Partial Dataset: api/heat/utils/data/tests/test_partial_dataset.md + - Test Spherical: api/heat/utils/data/tests/test_spherical.md + - Tests: + - Index: api/heat/utils/tests/index.md + - Test Vision Transforms: api/heat/utils/tests/test_vision_transforms.md