diff --git a/pyadjoint/optimization/optimization.py b/pyadjoint/optimization/optimization.py index 259c6e8b..ec26edcd 100644 --- a/pyadjoint/optimization/optimization.py +++ b/pyadjoint/optimization/optimization.py @@ -1,3 +1,5 @@ +from functools import partial + import numpy as np from ..reduced_functional import ReducedFunctional @@ -59,8 +61,8 @@ def minimize_scipy_generic(rf_np, method, bounds=None, **kwargs): m = [p.tape_value() for p in rf_np.controls] m_global = rf_np.obj_to_array(m) J = rf_np.__call__ - dJ = lambda m: rf_np.derivative() - H = lambda x, p: rf_np.hessian(p) + dJ = lambda m: rf_np.derivative(apply_riesz=False) + H = lambda x, p: rf_np.hessian(p, apply_riesz=False) if "options" not in kwargs: kwargs["options"] = {} @@ -153,8 +155,8 @@ def minimize_custom(rf_np, bounds=None, **kwargs): m_global = rf_np.obj_to_array(m) J = rf_np.__call__ - dJ = lambda m: rf_np.derivative(m) - H = rf_np.hessian + dJ = partial(rf_np.derivative, apply_riesz=False) + H = partial(rf_np.hessian, apply_riesz=False) if bounds is not None: bounds = serialise_bounds(rf_np, bounds) diff --git a/pyadjoint/reduced_functional_numpy.py b/pyadjoint/reduced_functional_numpy.py index bf0d73bf..c3cdc6a2 100644 --- a/pyadjoint/reduced_functional_numpy.py +++ b/pyadjoint/reduced_functional_numpy.py @@ -60,12 +60,6 @@ def get_global(self, m): @no_annotations def derivative(self, adj_input=1.0, apply_riesz=True): - - if not apply_riesz: - raise ValueError( - "ReducedFunctionalNumpy only returns primal gradients." - ) - dJdm = self.rf.derivative(adj_input=adj_input, apply_riesz=apply_riesz) dJdm = Enlist(dJdm) @@ -80,12 +74,6 @@ def derivative(self, adj_input=1.0, apply_riesz=True): @no_annotations def hessian(self, m_dot_array, apply_riesz=True): - - if not apply_riesz: - raise ValueError( - "ReducedFunctionalNumpy only returns primal gradients." - ) - # Calling derivative is needed, see i.e. examples/stokes-shape-opt self.derivative() m_copies = [control.copy_data() for control in self.controls]