Skip to content

Commit fced603

Browse files
Refactor probnum.typing (#599)
* Rename `IntArgType` -> `IntLike` * Rename `FloatArgType` -> `FloatLike` * Rename `ShapeArgType` -> `ShapeLike` * Documentation for `DTypeLike` * Rename `ArrayLikeGetitemArgType` -> `ArrayIndicesLike` * Rename `ScalarArgType` -> `ScalarLike` * Documentation for `ArrayLike` * Rename `LinearOperatorArgType` -> `LinearOperatorLike` * Remove `ToleranceDiffusionType` and `DenseOutputLocationArgType` * Restructure API types * Add annotations future import * `DTypeArgType` -> `DTypeLike` * Fix refactoring bug * isort fix * isort fix
1 parent b69587b commit fced603

File tree

71 files changed

+505
-537
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+505
-537
lines changed

docs/source/development/adding_to_the_api_documentation.ipynb

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -45,27 +45,27 @@
4545
"import probnum # pylint: disable=unused-import\n",
4646
"from probnum import linops, randvars, utils\n",
4747
"from probnum.linalg.solvers.matrixbased import SymmetricMatrixBasedSolver\n",
48-
"from probnum.typing import LinearOperatorArgType\n",
48+
"from probnum.typing import LinearOperatorLike\n",
4949
"\n",
5050
"# pylint: disable=too-many-branches\n",
5151
"\n",
5252
"\n",
5353
"def problinsolve(\n",
5454
" A: Union[\n",
55-
" LinearOperatorArgType,\n",
56-
" \"randvars.RandomVariable[LinearOperatorArgType]\",\n",
55+
" LinearOperatorLike,\n",
56+
" \"randvars.RandomVariable[LinearOperatorLike]\",\n",
5757
" ],\n",
5858
" b: Union[np.ndarray, \"randvars.RandomVariable[np.ndarray]\"],\n",
5959
" A0: Optional[\n",
6060
" Union[\n",
61-
" LinearOperatorArgType,\n",
62-
" \"randvars.RandomVariable[LinearOperatorArgType]\",\n",
61+
" LinearOperatorLike,\n",
62+
" \"randvars.RandomVariable[LinearOperatorLike]\",\n",
6363
" ]\n",
6464
" ] = None,\n",
6565
" Ainv0: Optional[\n",
6666
" Union[\n",
67-
" LinearOperatorArgType,\n",
68-
" \"randvars.RandomVariable[LinearOperatorArgType]\",\n",
67+
" LinearOperatorLike,\n",
68+
" \"randvars.RandomVariable[LinearOperatorLike]\",\n",
6969
" ]\n",
7070
" ] = None,\n",
7171
" x0: Optional[Union[np.ndarray, \"randvars.RandomVariable[np.ndarray]\"]] = None,\n",

docs/source/development/implementing_a_probnum_method.ipynb

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151
"source": [
5252
"### Method `probsolve_qp`\n",
5353
"\n",
54-
"We will now take a closer look at the interface of our 1D noisy quadratic optimization method. At a basic level `probsolve_qp` takes a function of the type `Callable[[FloatArgType], FloatArgType]`. This hints that the optimization objective is a 1D function. Our prior knowledge about the parameters $(a,b,c)$ is encoded in the random variable `fun_params0`. However, we want to also give a user the option to not specify any prior knowledge or just a guess about the parameter values, hence this argument is optional or can be an `np.ndarray`. \n",
54+
"We will now take a closer look at the interface of our 1D noisy quadratic optimization method. At a basic level `probsolve_qp` takes a function of the type `Callable[[FloatLike], FloatLike]`. This hints that the optimization objective is a 1D function. Our prior knowledge about the parameters $(a,b,c)$ is encoded in the random variable `fun_params0`. However, we want to also give a user the option to not specify any prior knowledge or just a guess about the parameter values, hence this argument is optional or can be an `np.ndarray`. \n",
5555
"\n",
5656
"The interface also has an `assume_fun` argument, which allows specification of the variant of the probabilistic numerical method to use based on the assumptions about the problem. For convenience, this can be inferred from the problem itself. The actual implementation of the PN method variant which is initialized in a modular fashion is separate from the interface and will be explained later. Finally, the actual optimization routine is called and the result is returned."
5757
]
@@ -67,7 +67,7 @@
6767
"\n",
6868
"import probnum as pn\n",
6969
"from probnum import randvars, linops\n",
70-
"from probnum.typing import FloatArgType, IntArgType\n",
70+
"from probnum.typing import FloatLike, IntLike\n",
7171
"\n",
7272
"rng = np.random.default_rng(seed=123)"
7373
]
@@ -83,14 +83,14 @@
8383
"# %load -s probsolve_qp quadopt_example/_probsolve_qp\n",
8484
"def probsolve_qp(\n",
8585
" rng: np.random.Generator,\n",
86-
" fun: Callable[[FloatArgType], FloatArgType],\n",
86+
" fun: Callable[[FloatLike], FloatLike],\n",
8787
" fun_params0: Optional[Union[np.ndarray, randvars.RandomVariable]] = None,\n",
8888
" assume_fun: Optional[str] = None,\n",
89-
" tol: FloatArgType = 10 ** -5,\n",
90-
" maxiter: IntArgType = 10 ** 4,\n",
89+
" tol: FloatLike = 10 ** -5,\n",
90+
" maxiter: IntLike = 10 ** 4,\n",
9191
" noise_cov: Optional[Union[np.ndarray, linops.LinearOperator]] = None,\n",
9292
" callback: Optional[\n",
93-
" Callable[[FloatArgType, FloatArgType, randvars.RandomVariable], None]\n",
93+
" Callable[[FloatLike, FloatLike, randvars.RandomVariable], None]\n",
9494
" ] = None,\n",
9595
") -> Tuple[float, randvars.RandomVariable, randvars.RandomVariable, Dict]:\n",
9696
" \"\"\"Probabilistic 1D Quadratic Optimization.\n",
@@ -316,24 +316,24 @@
316316
"# Type aliases for quadratic optimization\n",
317317
"QuadOptPolicyType = Callable[\n",
318318
" [\n",
319-
" Callable[[FloatArgType], FloatArgType],\n",
319+
" Callable[[FloatLike], FloatLike],\n",
320320
" randvars.RandomVariable,\n",
321321
" ],\n",
322-
" FloatArgType,\n",
322+
" FloatLike,\n",
323323
"]\n",
324324
"QuadOptObservationOperatorType = Callable[\n",
325-
" [Callable[[FloatArgType], FloatArgType], FloatArgType], FloatArgType\n",
325+
" [Callable[[FloatLike], FloatLike], FloatLike], FloatLike\n",
326326
"]\n",
327327
"QuadOptBeliefUpdateType = Callable[\n",
328328
" [\n",
329329
" randvars.RandomVariable,\n",
330-
" FloatArgType,\n",
331-
" FloatArgType,\n",
330+
" FloatLike,\n",
331+
" FloatLike,\n",
332332
" ],\n",
333333
" randvars.RandomVariable,\n",
334334
"]\n",
335335
"QuadOptStoppingCriterionType = Callable[\n",
336-
" [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntArgType],\n",
336+
" [Callable[[FloatLike], FloatLike], randvars.RandomVariable, IntLike],\n",
337337
" Tuple[bool, Union[str, None]],\n",
338338
"]\n",
339339
"\n",
@@ -430,7 +430,7 @@
430430
" self.stopping_criteria = stopping_criteria\n",
431431
"\n",
432432
" def has_converged(\n",
433-
" self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntArgType\n",
433+
" self, fun: Callable[[FloatLike], FloatLike], iteration: IntLike\n",
434434
" ) -> Tuple[bool, Union[str, None]]:\n",
435435
" \"\"\"Check whether the optimizer has converged.\n",
436436
"\n",
@@ -451,7 +451,7 @@
451451
"\n",
452452
" def optim_iterator(\n",
453453
" self,\n",
454-
" fun: Callable[[FloatArgType], FloatArgType],\n",
454+
" fun: Callable[[FloatLike], FloatLike],\n",
455455
" ) -> Tuple[float, float, randvars.RandomVariable]:\n",
456456
" \"\"\"Generator implementing the optimization iteration.\n",
457457
"\n",
@@ -486,7 +486,7 @@
486486
"\n",
487487
" def optimize(\n",
488488
" self,\n",
489-
" fun: Callable[[FloatArgType], FloatArgType],\n",
489+
" fun: Callable[[FloatLike], FloatLike],\n",
490490
" callback: Optional[\n",
491491
" Callable[[float, float, randvars.RandomVariable], None]\n",
492492
" ] = None,\n",
@@ -584,10 +584,10 @@
584584
"internal representation of those same objects. Canonical examples are different kinds of integer or float types, which might be passed by a user. These are all unified internally.\n",
585585
"\n",
586586
"```python\n",
587-
"IntArgType = Union[int, numbers.Integral, np.integer]\n",
588-
"FloatArgType = Union[float, numbers.Real, np.floating]\n",
587+
"IntLike = Union[int, numbers.Integral, np.integer]\n",
588+
"FloatLike = Union[float, numbers.Real, np.floating]\n",
589589
"\n",
590-
"ShapeArgType = Union[IntArgType, Iterable[IntArgType]]\n",
590+
"ShapeLike = Union[IntLike, Iterable[IntLike]]\n",
591591
"\"\"\"Type of a public API argument for supplying a shape. Values of this type should\n",
592592
"always be converted into :class:`ShapeType` using the function\n",
593593
":func:`probnum.utils.as_shape` before further internal processing.\"\"\"\n",
@@ -602,11 +602,11 @@
602602
"metadata": {},
603603
"outputs": [],
604604
"source": [
605-
"from probnum.typing import ShapeType, IntArgType, ShapeArgType\n",
605+
"from probnum.typing import ShapeType, IntLike, ShapeLike\n",
606606
"from probnum.utils import as_shape\n",
607607
"\n",
608608
"\n",
609-
"def extend_shape(shape: ShapeArgType, extension: IntArgType) -> ShapeType:\n",
609+
"def extend_shape(shape: ShapeLike, extension: IntLike) -> ShapeType:\n",
610610
" return as_shape(shape) + as_shape(extension)"
611611
]
612612
},
@@ -674,7 +674,7 @@
674674
"source": [
675675
"# %load -s explore_exploit_policy quadopt_example/policies\n",
676676
"def explore_exploit_policy(\n",
677-
" fun: Callable[[FloatArgType], FloatArgType],\n",
677+
" fun: Callable[[FloatLike], FloatLike],\n",
678678
" fun_params0: randvars.RandomVariable,\n",
679679
" rng: np.random.Generator,\n",
680680
") -> float:\n",
@@ -704,16 +704,16 @@
704704
"```python\n",
705705
"QuadOptPolicyType = Callable[\n",
706706
" [\n",
707-
" Callable[[FloatArgType], FloatArgType],\n",
707+
" Callable[[FloatLike], FloatLike],\n",
708708
" randvars.RandomVariable\n",
709709
" ],\n",
710-
" FloatArgType,\n",
710+
" FloatLike,\n",
711711
"]\n",
712712
"```\n",
713713
"The observation process for this problem is very simple. It just evaluates the objective function. \n",
714714
"```python\n",
715715
"QuadOptObservationOperatorType = Callable[\n",
716-
" [Callable[[FloatArgType], FloatArgType], FloatArgType], FloatArgType\n",
716+
" [Callable[[FloatLike], FloatLike], FloatLike], FloatLike\n",
717717
"]\n",
718718
"```\n",
719719
"One can imagine a different probabilistic optimization method which evaluates the gradient as well. In this case the different observation processes would all get the function, its gradient and an evaluation point / action as arguments."
@@ -727,7 +727,7 @@
727727
"source": [
728728
"# %load -s function_evaluation quadopt_example/observation_operators\n",
729729
"def function_evaluation(\n",
730-
" fun: Callable[[FloatArgType], FloatArgType], action: FloatArgType\n",
730+
" fun: Callable[[FloatLike], FloatLike], action: FloatLike\n",
731731
") -> np.float_:\n",
732732
" \"\"\"Observe a (noisy) function evaluation of the quadratic objective.\n",
733733
"\n",
@@ -758,8 +758,8 @@
758758
"QuadOptBeliefUpdateType = Callable[\n",
759759
" [\n",
760760
" randvars.RandomVariable,\n",
761-
" FloatArgType,\n",
762-
" FloatArgType,\n",
761+
" FloatLike,\n",
762+
" FloatLike,\n",
763763
" ],\n",
764764
" randvars.RandomVariable,\n",
765765
"]\n",
@@ -776,8 +776,8 @@
776776
"# %load -s gaussian_belief_update quadopt_example/belief_updates\n",
777777
"def gaussian_belief_update(\n",
778778
" fun_params0: randvars.RandomVariable,\n",
779-
" action: FloatArgType,\n",
780-
" observation: FloatArgType,\n",
779+
" action: FloatLike,\n",
780+
" observation: FloatLike,\n",
781781
" noise_cov: Union[np.ndarray, linops.LinearOperator],\n",
782782
") -> randvars.RandomVariable:\n",
783783
" \"\"\"Update the belief over the parameters with an observation.\n",
@@ -823,7 +823,7 @@
823823
"The stopping criteria are also implemented as simple methods, which return a `bool` determining convergence and a string giving the name of the criterion.\n",
824824
"```python\n",
825825
"QuadOptStoppingCriterionType = Callable[\n",
826-
" [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntArgType],\n",
826+
" [Callable[[FloatLike], FloatLike], randvars.RandomVariable, IntLike],\n",
827827
" Tuple[bool, Union[str, None]],\n",
828828
"]\n",
829829
"```\n",
@@ -838,11 +838,11 @@
838838
"source": [
839839
"# %load -s parameter_uncertainty quadopt_example/stopping_criteria\n",
840840
"def parameter_uncertainty(\n",
841-
" fun: Callable[[FloatArgType], FloatArgType],\n",
841+
" fun: Callable[[FloatLike], FloatLike],\n",
842842
" fun_params0: randvars.RandomVariable,\n",
843-
" current_iter: IntArgType,\n",
844-
" abstol: FloatArgType,\n",
845-
" reltol: FloatArgType,\n",
843+
" current_iter: IntLike,\n",
844+
" abstol: FloatLike,\n",
845+
" reltol: FloatLike,\n",
846846
") -> Tuple[bool, Union[str, None]]:\n",
847847
" \"\"\"Termination based on numerical uncertainty about the parameters.\n",
848848
"\n",

docs/source/development/quadopt_example/_probsolve_qp.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import probnum as pn
77
import probnum.utils as _utils
88
from probnum import linops, randvars
9-
from probnum.typing import FloatArgType, IntArgType
9+
from probnum.typing import FloatLike, IntLike
1010

1111
from .belief_updates import gaussian_belief_update
1212
from .observation_operators import function_evaluation
@@ -17,14 +17,14 @@
1717

1818
def probsolve_qp(
1919
rng: np.random.Generator,
20-
fun: Callable[[FloatArgType], FloatArgType],
20+
fun: Callable[[FloatLike], FloatLike],
2121
fun_params0: Optional[Union[np.ndarray, randvars.RandomVariable]] = None,
2222
assume_fun: Optional[str] = None,
23-
tol: FloatArgType = 10 ** -5,
24-
maxiter: IntArgType = 10 ** 4,
23+
tol: FloatLike = 10 ** -5,
24+
maxiter: IntLike = 10 ** 4,
2525
noise_cov: Optional[Union[np.ndarray, linops.LinearOperator]] = None,
2626
callback: Optional[
27-
Callable[[FloatArgType, FloatArgType, randvars.RandomVariable], None]
27+
Callable[[FloatLike, FloatLike, randvars.RandomVariable], None]
2828
] = None,
2929
) -> Tuple[float, randvars.RandomVariable, randvars.RandomVariable, Dict]:
3030
"""Probabilistic 1D Quadratic Optimization.

docs/source/development/quadopt_example/belief_updates.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77

88
import probnum as pn
99
from probnum import linops, randvars
10-
from probnum.typing import FloatArgType
10+
from probnum.typing import FloatLike
1111

1212

1313
def gaussian_belief_update(
1414
fun_params0: randvars.RandomVariable,
15-
action: FloatArgType,
16-
observation: FloatArgType,
15+
action: FloatLike,
16+
observation: FloatLike,
1717
noise_cov: Union[np.ndarray, linops.LinearOperator],
1818
) -> randvars.RandomVariable:
1919
"""Update the belief over the parameters with an observation.

docs/source/development/quadopt_example/observation_operators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
import numpy as np
66

77
from probnum import utils
8-
from probnum.typing import FloatArgType
8+
from probnum.typing import FloatLike
99

1010

1111
def function_evaluation(
12-
fun: Callable[[FloatArgType], FloatArgType], action: FloatArgType
12+
fun: Callable[[FloatLike], FloatLike], action: FloatLike
1313
) -> np.float_:
1414
"""Observe a (noisy) function evaluation of the quadratic objective.
1515

docs/source/development/quadopt_example/policies.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,11 @@
55
import numpy as np
66

77
from probnum import randvars
8-
from probnum.typing import FloatArgType
8+
from probnum.typing import FloatLike
99

1010

1111
def explore_exploit_policy(
12-
fun: Callable[[FloatArgType], FloatArgType],
12+
fun: Callable[[FloatLike], FloatLike],
1313
fun_params0: randvars.RandomVariable,
1414
rng: np.random.Generator,
1515
) -> float:
@@ -31,7 +31,7 @@ def explore_exploit_policy(
3131

3232

3333
def stochastic_policy(
34-
fun: Callable[[FloatArgType], FloatArgType],
34+
fun: Callable[[FloatLike], FloatLike],
3535
fun_params0: randvars.RandomVariable,
3636
rng: np.random.Generator,
3737
) -> float:

docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import probnum as pn
88
import probnum.utils as _utils
99
from probnum import linops, randvars
10-
from probnum.typing import FloatArgType, IntArgType
10+
from probnum.typing import FloatLike, IntLike
1111

1212
from .belief_updates import gaussian_belief_update
1313
from .observation_operators import function_evaluation
@@ -17,24 +17,24 @@
1717
# Type aliases for quadratic optimization
1818
QuadOptPolicyType = Callable[
1919
[
20-
Callable[[FloatArgType], FloatArgType],
20+
Callable[[FloatLike], FloatLike],
2121
randvars.RandomVariable,
2222
],
23-
FloatArgType,
23+
FloatLike,
2424
]
2525
QuadOptObservationOperatorType = Callable[
26-
[Callable[[FloatArgType], FloatArgType], FloatArgType], FloatArgType
26+
[Callable[[FloatLike], FloatLike], FloatLike], FloatLike
2727
]
2828
QuadOptBeliefUpdateType = Callable[
2929
[
3030
randvars.RandomVariable,
31-
FloatArgType,
32-
FloatArgType,
31+
FloatLike,
32+
FloatLike,
3333
],
3434
randvars.RandomVariable,
3535
]
3636
QuadOptStoppingCriterionType = Callable[
37-
[Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntArgType],
37+
[Callable[[FloatLike], FloatLike], randvars.RandomVariable, IntLike],
3838
Tuple[bool, Union[str, None]],
3939
]
4040

@@ -131,7 +131,7 @@ def __init__(
131131
self.stopping_criteria = stopping_criteria
132132

133133
def has_converged(
134-
self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntArgType
134+
self, fun: Callable[[FloatLike], FloatLike], iteration: IntLike
135135
) -> Tuple[bool, Union[str, None]]:
136136
"""Check whether the optimizer has converged.
137137
@@ -152,7 +152,7 @@ def has_converged(
152152

153153
def optim_iterator(
154154
self,
155-
fun: Callable[[FloatArgType], FloatArgType],
155+
fun: Callable[[FloatLike], FloatLike],
156156
) -> Tuple[float, float, randvars.RandomVariable]:
157157
"""Generator implementing the optimization iteration.
158158
@@ -187,7 +187,7 @@ def optim_iterator(
187187

188188
def optimize(
189189
self,
190-
fun: Callable[[FloatArgType], FloatArgType],
190+
fun: Callable[[FloatLike], FloatLike],
191191
callback: Optional[
192192
Callable[[float, float, randvars.RandomVariable], None]
193193
] = None,

0 commit comments

Comments
 (0)