Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- 1D lumped elements (with zero lateral extent) are no longer allowed. Use a small finite lateral extent (e.g., `1e-6`) instead.
- Added `GaussianOverlapMonitor` and `AstigmaticGaussianOverlapMonitor` for decomposing electromagnetic fields onto Gaussian beam profiles.
- Added `GaussianPort` and `AstigmaticGaussianPort` for S-matrix calculations using Gaussian beam sources and overlap monitors.
- Added parallel adjoint scheduling (concurrent with forward simulation) for suitable monitors via `config.adjoint.parallel_all_port` (if `local gradient=True`).

### Changed
- `ModeSortSpec.sort_key` is now required with a default of `"n_eff"` (previously optional with `None` default). `ModeSortSpec.sort_order` is now optional with a default of `None`, which automatically selects the natural order based on `sort_key` and `sort_reference`: ascending when a reference is provided (closest first), otherwise descending for `n_eff` and polarization fractions (higher values first), ascending for `k_eff` and `mode_area` (lower values first).
Expand Down
4 changes: 4 additions & 0 deletions docs/configuration/reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,10 @@ remote defaults and emits a warning reminding you to enable local gradients.
- ``"adjoint_data"``
- Yes
- Directory (relative to the working directory) where intermediate gradient artifacts are stored when ``local_gradient`` is enabled.
* - ``parallel_all_port``
- ``False``
- Yes
- Launch canonical adjoint simulations for supported monitors in parallel with the forward solve when local gradients are enabled.
* - ``gradient_precision``
- ``"single"``
- No
Expand Down
13 changes: 10 additions & 3 deletions tests/test_components/autograd/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,9 +282,16 @@ def emulated_run_async_fwd(simulations, **run_kwargs) -> td.SimulationData:
batch_data_orig, task_ids_fwd = {}, {}
sim_fields_keys_dict = run_kwargs.pop("sim_fields_keys_dict", None)
for task_name, simulation in simulations.items():
run_kwargs_task = dict(run_kwargs)
if sim_fields_keys_dict is not None:
run_kwargs["sim_fields_keys"] = sim_fields_keys_dict[task_name]
sim_data_orig, task_name_fwd = emulated_run_fwd(simulation, task_name, **run_kwargs)
sim_fields_keys = sim_fields_keys_dict.get(task_name)
if sim_fields_keys is not None and "_parallel_adj_" not in task_name:
run_kwargs_task["sim_fields_keys"] = sim_fields_keys
else:
run_kwargs_task.pop("sim_fields_keys", None)
sim_data_orig, task_name_fwd = emulated_run_fwd(
simulation, task_name, **run_kwargs_task
)
batch_data_orig[task_name] = sim_data_orig
task_ids_fwd[task_name] = task_name_fwd

Expand Down Expand Up @@ -3005,7 +3012,7 @@ def objective(args):

# the following will raise a warning (and fail) if the dispersive material
# model is called without a frequency
with AssertLogLevel("INFO"):
with AssertLogLevel("WARNING"):
grad = ag.grad(objective)(params0)


Expand Down
Loading
Loading