diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index df9bdf0b..45f3f915 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,4 +1,8 @@ # .git-blame-ignore-revs # created as described in: https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view + # black-format files 07517c3353c392106cabae003d589946ea25918a +82f6df5ed34460374ce7c0fdca089d8caa570b9f +aa7050f973f36dc204ea495e105b5432223dc68d +a3dd7e5e9bc0bed404792e9b241f1639ade76f33 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..9295db3d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,30 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "[BUG]" +labels: '' +assignees: '' + +--- + +**Summary and expected behavior** + +**Code for reproduction (using python script or command line interface)** +``` +# paste your code here + +``` + +**Screenshots or steps for reproduction (using napari GUI)** + +**Include relevant logs which are created next to the output dir, name of the dataset, yaml file(s) if encountering reconstruction errors.** + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment:** + +Operating system: +Python version: +Python environment (command line, IDE, Jupyter notebook, etc): +Micro-Manager/pycromanager version: diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 00000000..f7917f4f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,12 @@ +--- +name: Documentation +about: Help us improve documentation +title: "[DOC]" +labels: '' +assignees: '' + +--- + +**Suggested improvement** + +**Optional: Pull request with better documentation** diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..9e04f113 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,18 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "[FEATURE]" +labels: '' +assignees: '' + +--- + +**Problem** + +**Proposed solution** + + +**Alternatives you have considered, if any** + +**Additional context** +Note relevant experimental conditions or datasets diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 16645e0c..9f49a51a 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -5,8 +5,6 @@ name: lint, style, and tests on: pull_request: - branches: - - main jobs: style: @@ -77,25 +75,25 @@ jobs: run: | isort --check waveorder - tests: - needs: [style, isort] # lint - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.10", "3.11", "3.12"] + # needs: [style, isort] # lint + # runs-on: ubuntu-latest + # strategy: + # matrix: + # python-version: ["3.10", "3.11", "3.12"] - steps: - - uses: actions/checkout@v3 + # steps: + # - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} + # - uses: actions/setup-python@v4 + # with: + # python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install ".[dev]" + # - name: Install dependencies + # run: | + # python -m pip install --upgrade pip + # pip install ".[all,dev]" - - name: Test with pytest - run: | - pytest -v --cov=./ --cov-report=xml + # - name: Test with pytest + # run: | + # pytest -v + # pytest -v --cov=./ --cov-report=xml diff --git a/.github/workflows/pytests.yml b/.github/workflows/pytests.yml deleted file mode 100644 index 8028c79d..00000000 --- a/.github/workflows/pytests.yml +++ /dev/null @@ -1,48 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: pytests - -on: [push] - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ['3.10', '3.11', '3.12'] - - steps: - - name: Checkout repo - uses: actions/checkout@v2 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install .[dev] - -# - name: Lint with flake8 -# run: | -# pip install flake8 -# # stop the build if there are Python syntax errors or undefined names -# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics -# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide -# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - - name: Test with pytest - run: | - pytest -v - pytest --cov=./ --cov-report=xml - -# - name: Upload coverage to Codecov -# uses: codecov/codecov-action@v1 -# with: -# flags: unittest -# name: codecov-umbrella -# fail_ci_if_error: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..ef91d701 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,45 @@ +name: test + +on: [push] + +jobs: + test: + name: ${{ matrix.platform }} py${{ matrix.python-version }} + runs-on: ${{ matrix.platform }} + strategy: + matrix: + platform: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.10", "3.11"] + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + # these libraries enable testing on Qt on linux + - uses: tlambert03/setup-qt-libs@v1 + + # strategy borrowed from vispy for installing opengl libs on windows + - name: Install Windows OpenGL + if: runner.os == 'Windows' + run: | + git clone --depth 1 https://github.com/pyvista/gl-ci-helpers.git + powershell gl-ci-helpers/appveyor/install_opengl.ps1 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install setuptools tox tox-gh-actions + + # https://github.com/napari/cookiecutter-napari-plugin/commit/cb9a8c152b68473e8beabf44e7ab11fc46483b5d + - name: Test + uses: aganders3/headless-gui@v1 + with: + run: python -m tox + + - name: Coverage + uses: codecov/codecov-action@v3 diff --git a/.gitignore b/.gitignore index 29fece40..328ad665 100644 --- a/.gitignore +++ b/.gitignore @@ -143,7 +143,7 @@ dmypy.json # written by setuptools_scm */_version.py -recOrder/_version.py +waveorder/_version.py *.autosave # images @@ -151,3 +151,7 @@ recOrder/_version.py *.png *.tif[f] *.pdf + +# example data +/examples/data_temp/* +/logs/* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29907895..71f950ec 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,3 @@ - repos: # basic pre-commit - repo: https://github.com/pre-commit/pre-commit-hooks @@ -31,4 +30,4 @@ repos: - repo: https://github.com/psf/black rev: 25.1.0 hooks: - - id: black + - id: black \ No newline at end of file diff --git a/CITATION.cff b/CITATION.cff index dc624506..3e593ee1 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -43,8 +43,7 @@ identifiers: - type: url value: 'https://www.napari-hub.org/plugins/recOrder-napari' description: >- - recOrder-napari plugin for label-free imaging that - depends on waveOrder library + waveorder plugin for label-free imaging (TODO: update URL) - type: doi value: 10.1364/BOE.455770 description: >- diff --git a/LICENSE b/LICENSE index 5f5c2e02..fda16f73 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2019, Chan Zuckerberg Biohub +Copyright (c) 2025, Chan Zuckerberg Biohub Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/README.md b/README.md index 9a3e1d53..8ddb4739 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -# waveorder +

+ Image +

[![Python package index](https://img.shields.io/pypi/v/waveorder.svg)](https://pypi.org/project/waveorder) [![PyPI monthly downloads](https://img.shields.io/pypi/dm/waveorder.svg)](https://pypistats.org/packages/waveorder) @@ -8,15 +10,21 @@ ![GitHub forks](https://img.shields.io/github/forks/mehta-lab/waveorder) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/waveorder) +Label-agnostic computational microscopy of architectural order. -This computational imaging library enables wave-optical simulation and reconstruction of optical properties that report microscopic architectural order. +# Overview -## Computational label-agnostic imaging +`waveorder` is a generalist framework for label-agnostic computational microscopy of architectural order, i.e., density, alignment, and orientation of biomolecules with the spatial resolution down to the diffraction limit. The framework implements wave-optical simulations and corresponding reconstruction algorithms for diverse label-free and fluorescence computational imaging methods that enable quantitative imaging of the architecture of dynamic cell systems. + +Our goal is to enable modular and user-friendly implementations of computational microscopy methods for dynamic imaging across the scales of organelles, cells, and tissues. + + +The framework is described in the following [preprint](https://arxiv.org/abs/2412.09775). https://github.com/user-attachments/assets/4f9969e5-94ce-4e08-9f30-68314a905db6 +
`waveorder` enables simulations and reconstructions of label-agnostic microscopy data as described in the following [preprint](https://arxiv.org/abs/2412.09775) -
Chandler et al. 2024

 @article{chandler_2024,
@@ -31,23 +39,38 @@ https://github.com/user-attachments/assets/4f9969e5-94ce-4e08-9f30-68314a905db6
 
-Specifically, `waveorder` enables simulation and reconstruction of 2D or 3D: +# Computational Microscopy Methods -1. __phase, projected retardance, and in-plane orientation__ from a polarization-diverse volumetric brightfield acquisition ([QLIPP](https://elifesciences.org/articles/55502)), + `waveorder` framework enables simulations and reconstructions of data for diverse one-photon (single-scattering based) computational microscopy methods, summarized below. -2. __phase__ from a volumetric brightfield acquisition ([2D phase](https://www.osapublishing.org/ao/abstract.cfm?uri=ao-54-28-8566)/[3D phase](https://www.osapublishing.org/ao/abstract.cfm?uri=ao-57-1-a205)), +## Label-free microscopy -3. __phase__ from an illumination-diverse volumetric acquisition ([2D](https://www.osapublishing.org/oe/fulltext.cfm?uri=oe-23-9-11394&id=315599)/[3D](https://www.osapublishing.org/boe/fulltext.cfm?uri=boe-7-10-3940&id=349951) differential phase contrast), +### Quantitative label-free imaging with phase and polarization (QLIPP) -4. __fluorescence density__ from a widefield volumetric fluorescence acquisition (fluorescence deconvolution). +Acquisition, calibration, background correction, reconstruction, and applications of QLIPP are described in the following [E-Life Paper](https://elifesciences.org/articles/55502): -The [examples](https://github.com/mehta-lab/waveorder/tree/main/examples) demonstrate simulations and reconstruction for 2D QLIPP, 3D PODT, 3D fluorescence deconvolution, and 2D/3D PTI methods. +[![Unveiling the invisible](https://github.com/mehta-lab/recOrder/blob/main/docs/images/comms_video_screenshot.png?raw=true)](https://www.youtube.com/watch?v=JEZAaPeZhck) + +
+ Guo et al. 2020 +

+@article{guo_2020,
+	author = {Guo, Syuan-Ming and Yeh, Li-Hao and Folkesson, Jenny and Ivanov, Ivan E. and Krishnan, Anitha P. and Keefe, Matthew G. and Hashemi, Ezzat and Shin, David and Chhun, Bryant B. and Cho, Nathan H. and Leonetti, Manuel D. and Han, May H. and Nowakowski, Tomasz J. and Mehta, Shalin B.},
+	title = {Revealing architectural order with quantitative label-free imaging and deep learning},
+	journal = {eLife},
+	volume = {9},
+	pages = {e55502},
+	year = {2020},
+	doi = {10.7554/eLife.55502}
+}
+
+
-If you are interested in deploying QLIPP, phase from brightfield, or fluorescence deconvolution for label-agnostic imaging at scale, checkout our [napari plugin](https://www.napari-hub.org/plugins/recOrder-napari), [`recOrder-napari`](https://github.com/mehta-lab/recOrder). +### Permittivity tensor imaging (PTI) -## Permittivity tensor imaging +PTI provides volumetric reconstructions of mean permittivity ($\propto$ material density), differential permittivity ($\propto$ material anisotropy), 3D orientation, and optic sign. The following figure summarizes PTI acquisition and reconstruction with a small optical section of the mouse brain tissue: -Additionally, `waveorder` enabled the development of a new label-free imaging method, __permittivity tensor imaging (PTI)__, that measures density and 3D orientation of biomolecules with diffraction-limited resolution. These measurements are reconstructed from polarization-resolved images acquired with a sequence of oblique illuminations. +![Data_flow](https://github.com/mehta-lab/waveorder/blob/main/readme.png?raw=true) The acquisition, calibration, background correction, reconstruction, and applications of PTI are described in the following [paper](https://doi.org/10.1101/2020.12.15.422951) published in Nature Methods: @@ -70,18 +93,104 @@ The acquisition, calibration, background correction, reconstruction, and applica
-PTI provides volumetric reconstructions of mean permittivity ($\propto$ material density), differential permittivity ($\propto$ material anisotropy), 3D orientation, and optic sign. The following figure summarizes PTI acquisition and reconstruction with a small optical section of the mouse brain tissue: -![Data_flow](https://github.com/mehta-lab/waveorder/blob/main/readme.png?raw=true) +### Quantitative phase imaging (QPI) from defocus +__phase__ from a volumetric brightfield acquisition ([2D phase](https://www.osapublishing.org/ao/abstract.cfm?uri=ao-54-28-8566)/[3D phase](https://www.osapublishing.org/ao/abstract.cfm?uri=ao-57-1-a205)) + +![Image](https://github.com/user-attachments/assets/caf7714b-59ee-40bb-9ee4-f13db4feece6) -## Examples -The [examples](https://github.com/mehta-lab/waveorder/tree/main/examples) illustrate simulations and reconstruction for 2D QLIPP, 3D phase from brightfield, and 2D/3D PTI methods. -If you are interested in deploying QLIPP or phase from brightbrield, or fluorescence deconvolution for label-agnostic imaging at scale, checkout our [napari plugin](https://www.napari-hub.org/plugins/recOrder-napari), [`recOrder-napari`](https://github.com/mehta-lab/recOrder). +
+ Jenkins and Gaylord 2015 (2D QPI from defocus) +

+	@article{Jenkins:15,
+	author = {Micah H. Jenkins and Thomas K. Gaylord},
+	journal = {Appl. Opt.},
+	keywords = {Phase retrieval; Partial coherence in imaging; Interferometric imaging ; Imaging systems; Microlens arrays; Optical transfer functions; Phase contrast; Spatial resolution; Three dimensional imaging},
+	number = {28},
+	pages = {8566--8579},
+	publisher = {Optica Publishing Group},
+	title = {Quantitative phase microscopy via optimized inversion of the phase optical transfer function},
+	volume = {54},
+	month = {Oct},
+	year = {2015},
+	url = {https://opg.optica.org/ao/abstract.cfm?URI=ao-54-28-8566},
+	doi = {10.1364/AO.54.008566},
+}
+
+
+ +
+ Soto, Rodrigo, and Alieva 2018 (3D QPI from defocus) +

+@article{Soto:18,
+author = {Juan M. Soto and Jos\'{e} A. Rodrigo and Tatiana Alieva},
+journal = {Appl. Opt.},
+keywords = {Coherence and statistical optics; Image reconstruction techniques; Optical transfer functions; Optical inspection; Three-dimensional microscopy; Acoustooptic modulators; Illumination design; Inverse design; LED sources; Three dimensional imaging; Three dimensional reconstruction},
+number = {1},
+pages = {A205--A214},
+publisher = {Optica Publishing Group},
+title = {Optical diffraction tomography with fully and partially coherent illumination in high numerical aperture label-free microscopy \[Invited\]},
+volume = {57},
+month = {Jan},
+year = {2018},
+url = {https://opg.optica.org/ao/abstract.cfm?URI=ao-57-1-A205},
+doi = {10.1364/AO.57.00A205},
+}
+
+
+ +### QPI with differential phase contrast + __phase__ from differential phase contrast + +***Work in progress*** + +* [2D](https://www.osapublishing.org/oe/fulltext.cfm?uri=oe-23-9-11394&id=315599) DPC +* [3D](https://www.osapublishing.org/boe/fulltext.cfm?uri=boe-7-10-3940&id=349951) DPC + + +## Fluorescence microscopy + +### Widefield deconvolution microscopy +__fluorescence density__ from a widefield volumetric fluorescence acquisition. + +
+ Swedlow 2013 +

+@article{Swedlow:13,
+author = {Swedlow, John R.},
+journal = {Methods Cell Biol.},
+title = {Quantitative fluorescence microscopy and image deconvolution},
+year = {2013},
+volume = {114},
+pages = {407--26},
+doi = {10.1016/B978-0-12-407761-4.00017-8}
+}
+
+
+ +### Oblique plane light-sheet microscopy +__fluorescence density__ from oblique plane light-sheet microscopy. + +
+ Ivanov, Hirata-Miyasaki, Chandler et al. 2024 +

+@article{ivanov_2024,
+author = {Ivanov, Ivan E. and Hirata-Miyasaki, Eduardo and Chandler, Talon and Cheloor-Kovilakam, Rasmi and Liu, Ziwen and Pradeep, Soorya and Liu, Chad and Bhave, Madhura and Khadka, Sudip and Arias, Carolina and Leonetti, Manuel D. and Huang, Bo and Mehta, Shalin B.},
+title = {Mantis: High-throughput 4D imaging and analysis of the molecular and physical architecture of cells},
+journal = {PNAS Nexus},
+volume = {3},
+number = {9},
+pages = {pgae323},
+year = {2024},
+doi = {10.1093/pnasnexus/pgae323}
+
+
+ ## Citation -Please cite this repository, along with the relevant preprint or paper, if you use or adapt this code. The citation information can be found by clicking "Cite this repository" button in the About section in the right sidebar. +Please cite this repository, along with the relevant publications and preprints, if you use or adapt this code. The citation information can be found by clicking "Cite this repository" button in the About section in the right sidebar. ## Installation @@ -98,21 +207,17 @@ Install `waveorder` from PyPI: pip install waveorder ``` -Use `waveorder` in your scripts: - +(Optional) Install all visualization dependencies (napari, jupyter), clone the repository, and run an example script: ```sh -python ->>> import waveorder -``` - -(Optional) Install example dependencies, clone the repository, and run an example script: -```sh -pip install waveorder[examples] +pip install "waveorder[all]" git clone https://github.com/mehta-lab/waveorder.git python waveorder/examples/models/phase_thick_3d.py ``` (M1 users) `pytorch` has [incomplete GPU support](https://github.com/pytorch/pytorch/issues/77764), so please use `export PYTORCH_ENABLE_MPS_FALLBACK=1` -to allow some operators to fallback to CPU -if you plan to use GPU acceleration for polarization reconstruction. \ No newline at end of file +to allow some operators to fallback to CPU if you plan to use GPU acceleration for polarization reconstruction. + + +## Examples +The [examples](https://github.com/mehta-lab/waveorder/tree/main/docs/examples) illustrate simulations and reconstruction for 2D QLIPP, 3D phase from brightfield, and 2D/3D PTI methods. diff --git a/docs/QLIPP.md b/docs/QLIPP.md new file mode 100644 index 00000000..bf633095 --- /dev/null +++ b/docs/QLIPP.md @@ -0,0 +1,58 @@ +# waveorder + +`waveorder` is a collection of computational imaging methods. It currently provides QLIPP (quantitative label-free imaging with phase and polarization), phase from defocus, and fluorescence deconvolution. + +These are the kinds of data you can acquire with `waveorder` and QLIPP: + +https://user-images.githubusercontent.com/9554101/271128301-cc71da57-df6f-401b-a955-796750a96d88.mov + +https://user-images.githubusercontent.com/9554101/271128510-aa2180af-607f-4c0c-912c-c18dc4f29432.mp4 + +## What do I need to use `waveorder` +`waveorder` is to be used alongside a conventional widefield microscope. For QLIPP, the microscope must be fitted with an analyzer and a universal polarizer: + +https://user-images.githubusercontent.com/9554101/273073475-70afb05a-1eb7-4019-9c42-af3e07bef723.mp4 + +For phase-from-defocus or fluorescence deconvolution methods, the universal polarizer is optional. + +The overall structure of `waveorder` is shown in Panel B, highlighting the structure of the graphical user interface (GUI) through a napari plugin and the command-line interface (CLI) that allows users to perform reconstructions. + +![Flow Chart](./images/waveorder_Fig1_Overview.png) + +## Software Quick Start + +(Optional but recommended) install [anaconda](https://www.anaconda.com/products/distribution) and create a virtual environment: + +```sh +conda create -y -n waveorder python=3.10 +conda activate waveorder +``` + +Install `waveorder` with acquisition dependencies +(napari with PyQt6 and pycro-manager): + +```sh +pip install waveorder[all] +``` + +Install `waveorder` without napari, QtBindings (PyQt/PySide) and pycro-manager dependencies: + +```sh +pip install waveorder +``` + +Open `napari` with `waveorder`: + +```sh +napari -w waveorder +``` + +For more help, see [`waveorder`'s documentation](https://github.com/mehta-lab/waveorder/tree/main/docs). To install `waveorder` +on a microscope, see the [microscope installation guide](https://github.com/mehta-lab/waveorder/blob/main/docs/microscope-installation-guide.md). + +## Dataset + +[Slides](https://doi.org/10.5281/zenodo.5135889) and a [dataset](https://doi.org/10.5281/zenodo.5178487) shared during a workshop on QLIPP can be found on Zenodo, and the napari plugin's sample contributions (`File > Open Sample > waveorder` in napari). + +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5178487.svg)](https://doi.org/10.5281/zenodo.5178487) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5135889.svg)](https://doi.org/10.5281/zenodo.5135889) diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..526d6ff0 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,27 @@ +# Welcome to `waveorder`'s documentation + +We have organized our documentation by user type and intended task. + +## Software users + +**Reconstruct existing data:** start with the [reconstruction guide](./reconstruction-guide.md) and consult the [data schema](./data-schema.md) for `waveorder`'s data format. + +**Reconstruct with a GUI:** start with the [plugin's reconstruction guide](./napari-plugin-guide.md#reconstruction-tab). + +**Integrate `waveorder` into my software:** start with [`examples/`](./examples/) + +## Hardware users + +**Buy hardware for a polarized-light installation:** start with the [buyer's guide](./buyers-guide.md). + +**Install `waveorder` on my microscope:** start with the [microscope installation guide](./microscope-installation-guide.md). + +**Use the `napari plugin` to calibrate:** start with the [plugin guide](./napari-plugin-guide.md). + +**Understand `waveorder`'s calibration routine**: read the [calibration guide](./calibration-guide.md). + +## Software developers + +**Set up a development environment and test `waveorder`**: start with the [development guide](./development-guide.md). + +**Report an error in the documentation or code:** [open an issue](https://github.com/mehta-lab/waveorder/issues/new/choose) or [send us an email](mailto:shalin.mehta@czbiohub.org,talon.chandler@czbiohub.org). We appreciate your help! diff --git a/docs/__init__.py b/docs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/docs/buyers-guide.md b/docs/buyers-guide.md new file mode 100644 index 00000000..a6e5b324 --- /dev/null +++ b/docs/buyers-guide.md @@ -0,0 +1,39 @@ +# Buyer's Guide + +## Quantitative phase imaging: + +You can use a transmitted light source (LED or a lamp) and a condenser commonly available on almost all microscopes. In addition to the transmitted light imaging path, you will need a motorized stage for acquiring through-focus image stacks. + +## Quantitative polarization imaging (PolScope): + +The following list of components assumes that you already have a transmitted light source (LED or a lamp) and a condenser. + +Buyers have two options: +1. buy a complete hardware kit from the OpenPolScope project, or +2. assemble your own kit piece by piece. + +### Buy a kit from the OpenPolScope project + +- Read about the [OpenPolScope Hardware Kit](https://openpolscope.org/pages/OPS_Hardware.htm). +- Complete the [OpenPolScope information request form](https://openpolscope.org/pages/Info_Request_Form.htm). + +### Buy individual components + +The components are listed in the order in which they process light. See the build video here to see how to assemble these components on your microscope. + +https://github.com/user-attachments/assets/a0a8bffb-bf81-4401-9ace-3b4955436b57 + +| Part | Approximate Price | Notes | +|--------------------------|-------------------|-----------------------------| +| Illumination filter | $200 | We suggest [a Thorlabs CWL = 530 nm, FWHM = 10 nm notch filter](https://www.thorlabs.com/thorproduct.cfm?partnumber=FBH530-10).| +| Circular polarizer | $350 | We suggest [a Thorlabs 532 nm, left-hand circular polarizer](https://www.thorlabs.com/thorproduct.cfm?partnumber=CP1L532).| +| Liquid crystal compensator | $6,000 | Meadowlark optics LVR-42x52mm-VIS-ASSY or LVR-50x60mm-VIS-POL-ASSY. Although near-variants are listed in the [Meadowlowlark catalog](https://www.meadowlark.com/product/liquid-crystal-variable-retarder/), this is a custom part with two liquid crystals in a custom housing. [Contact Meadowlark](https://www.meadowlark.com/contact-us/) for a quote.| +| Liquid crystal control electronics | $2,000 | [Meadowlark optics D5020-20V](https://www.meadowlark.com/product/liquid-crystal-digital-interface-controller/). Choose the high-voltage 20V version. +| Liquid crystal adapter | $25-$500 | A 3D printed part that aligns the liquid crystal compensator in a microscope stand's illumination path. Check for your stand among the [OpenPolScope `.stl` files](https://github.com/amitabhverma/Microscope-LC-adapters/tree/main/stl_files) or [contact us](compmicro@czbiohub.org) for more options.| +| Circular analyzer (opposite handedness) | $350 | We suggest [a Thorlabs 532 nm, right-hand circular polarizer](https://www.thorlabs.com/thorproduct.cfm?partnumber=CP1R532).| + +If you need help selecting or assembling the components, please start an issue on this GitHub repository or contact us at compmicro@czbiohub.org. + +## Quantitative phase and polarization imaging (QLIPP): + +Combining the Z-stage and the PolScope components listed above enables joint phase and polarization imaging with `waveorder`. diff --git a/docs/calibration-guide.md b/docs/calibration-guide.md new file mode 100644 index 00000000..3692025e --- /dev/null +++ b/docs/calibration-guide.md @@ -0,0 +1,64 @@ +# Calibration guide +This guide describes `waveorder`'s calibration routine with details about its goals, parameters, and evaluation metrics. + +## Why calibrate? + +`waveorder` sends commands via Micro-Manager (or a TriggerScope) to apply voltages to the liquid crystals which modify the polarization of the light that illuminates the sample. `waveorder` could apply a fixed set of voltages so the user would never have to worry about these details, but this approach leads to extremely poor performance because + +- the sample, the sample holder, lenses, dichroics, and other optical elements introduce small changes in polarization, and +- the liquid crystals' voltage response drifts over time. + +Therefore, recalibrating the liquid crystals regularly (definitely between imaging sessions, often between different samples) is essential for acquiring optimal images. + +## Finding the extinction state + +Every calibration starts with a routine that finds the **extinction state**: the polarization state (and corresponding voltages) that minimizes the intensity that reaches the camera. If the analyzer is a right-hand-circular polarizer, then the extinction state is the set of voltages that correspond to left-hand-circular light in the sample. + +## Setting a goal for the remaining states: swing + +After finding the circular extinction state, the calibration routine finds the remaining states. The **swing** parameter sets the target ellipticity of the remaining states and is best understood using [the Poincare sphere](https://en.wikipedia.org/wiki/Unpolarized_light#Poincar%C3%A9_sphere), a diagram that organizes all pure polarization states onto the surface of a sphere. + + + +On the Poincare sphere, the extinction state corresponds to the north pole, and the swing value corresponds to the targeted line of [colatitude](https://en.m.wikipedia.org/wiki/File:Spherical_Coordinates_%28Colatitude,_Longitude%29.svg) for the remaining states. For example, a swing value of 0.25 (above left) sets the targeted polarization states to the states on the equator: a set of linear polarization states. Similarly, a swing value of 0.125 (above right) sets the targeted polarization states to the states on the line of colatitude 45 degrees ( $\pi$/4 radians) away from the north pole: a set of elliptical polarization states. + +The Poincare sphere is also useful for calculating the ratio of intensities measured before and after an analyzer illuminated with a polarized beam. First, find the point on the Poincare sphere that corresponds to the analyzer; in our case we have a right-circular analyzer corresponding to the south pole. Next, find the point that corresponds to the polarization state of the light incident on the analyzer; this could be any arbitrary point on the Poincare sphere. To find the ratio of intensities before and after the analyzer $I/I_0$, find the great-circle angle between the two points on the Poincare sphere, $\alpha$, and calculate $I/I_0 = \cos^2(\alpha/2)$. As expected, points that are close together transmit perfectly ( $\alpha = 0$ implies $I/I_0 = 1$), while antipodal points lead to extinction ( $\alpha = \pi$ implies $I/I_0 = 0$). + +This geometric construction illustrates that all non-extinction polarization states have the same intensity after the analyzer because they live on the same line of colatitude and have the same great-circle angle to the south pole (the analyzer). We use this fact to help us find our non-extinction states. + +Practically, we find our first non-extinction state immediately using the liquid crystal manufacturer's measurements from the factory. In other words, we apply a fixed voltage offset to the extinction-state voltages to find the first non-extinction state, and this requires no iteration or optimization. To find the remaining non-extinction states, we keep the polarization orientation fixed and search through neighboring states with different ellipticity to find states that transmit the same intensity as the first non-extinction state. + +## Evaluating a calibration: extinction ratio + +At the end of a calibration we report the **extinction ratio**, the ratio of the largest and smallest intensities that the imaging system can transmit above background. This metric measures the quality of the entire optical path including the liquid crystals and their calibrated states, and all depolarization, scattering, or absorption caused by optical elements in the light path will reduce the extinction ratio. + +## Calculating extinction ratio from measured intensities (advanced topic) + +To calculate the extinction ratio, we could optimize the liquid crystal voltages to maximize measured intensity then calculate the ratio of that result with the earlier extinction intensity, but this approach requires a time-consuming optimization and it does not characterize the quality of the calibrated states of the liquid crystals. + +Instead, we estimate the extinction ratio from the intensities we measure during the calibration process. Specifically, we measure the black-level intensity $I_{\text{bl}}$, the extinction intensity $I_{\text{ext}}$, and the intensity under the first elliptical state $I_{\text{ellip}}(S)$ where $S$ is the swing. We proceed to algebraically express the extinction ratio in terms of these three quantities. + +We can decompose $I_{\text{ellip}}(S)$ into a constant term $I_{\text{ellip}}(0) = I_{\text{ext}}$, and a modulation term given by + +$$I_{\text{ellip}}(S) = I_{\text{mod}}\sin^2(\pi S) + I_{\text{ext}},\qquad\qquad (1)$$ +where $I_{\text{mod}}$ is the modulation depth, and the $\sin^2(\pi S)$ term can be understood using the Poincare sphere (the intensity behind the circular analyzer is proportional to $\cos^2(\alpha/2)$ and for a given swing we have $\alpha = \pi - 2\pi S$ so $\cos^2(\frac{\pi - 2\pi S}{2}) = \sin^2(\pi S)$ ). + +Next, we decompose $I_{\text{ext}}$ into the sum of two terms, the black level intensity and a leakage intensity $I_{\text{leak}}$ +$$I_{\text{ext}} = I_{\text{bl}} + I_{\text{leak}}.\qquad\qquad (2)$$ + +The following diagram clarifies our definitions and shows how the measured $I_{\text{ellip}}(S)$ depends on the swing (green line). + + + +The extinction ratio is the ratio of the largest and smallest intensities that the imaging system can transmit above background, which is most easily expressed in terms of $I_{\text{mod}}$ and $I_{\text{leak}}$ +$$\text{Extinction Ratio} = \frac{I_{\text{mod}} + I_{\text{leak}}}{I_{\text{leak}}}.\qquad\qquad (3)$$ + +Substituting Eqs. (1) and (2) into Eq. (3) gives the extinction ratio in terms of the measured intensities +$$\text{Extinction Ratio} = \frac{1}{\sin^2(\pi S)}\frac{I_{\text{ellip}}(S) - I_{\text{ext}}}{I_{\text{ext}} - I_{\text{bl}}} + 1.$$ + +## Summary: `waveorder`'s step-by-step calibration procedure +1. Close the shutter, measure the black level, then reopen the shutter. +2. Find the extinction state by finding voltages that minimize the intensity that reaches the camera. +3. Use the swing value to immediately find the first elliptical state, and record the intensity on the camera. +4. For each remaining elliptical state, keep the polarization orientation fixed and optimize the voltages to match the intensity of the first elliptical state. +5. Store the voltages and calculate the extinction ratio. diff --git a/docs/data-schema.md b/docs/data-schema.md new file mode 100644 index 00000000..a92bc72f --- /dev/null +++ b/docs/data-schema.md @@ -0,0 +1,47 @@ +# Data schema + +This document defines the standard for data acquired with `waveorder`. + +## Raw directory organization + +Currently, we structure raw data in the following hierarchy: + +```text +working_directory/ # commonly YYYY_MM_DD_exp_name, but not enforced +├── polarization_calibration_0.txt +│ ... +├── polarization_calibration_.txt # i calibration repeats +│ +├── bg_0 +│ ... +├── bg_ # j background repeats +│   ├── background.zarr +│   ├── polarization_calibration.txt # copied into each bg folder +│   ├── reconstruction.zarr +│   ├── reconstruction_settings.yml # for use with `waveorder reconstruct` +│   └── transfer_function.zarr # for use with `waveorder apply-inv-tf` +│ +├── _snap_0 +├── _snap_1 +│   ├── raw_data.zarr +│   ├── reconstruction.zarr +│   ├── reconstruction_settings.yml +│   └── transfer_function.zarr +│ ... +├── _snap_ # k repeats with the first acquisition name +│   ├── raw_data.zarr +│   ├── reconstruction.zarr +│   ├── reconstruction_settings.yml +│   └── transfer_function.zarr +│ ... +│ +├── _snap_0 # l different acquisition names +│ ... +├── _snap_ # m repeats for this acquisition name +    ├── raw_data.zarr +    ├── reconstruction.zarr +    ├── reconstruction_settings.yml +    └── transfer_function.zarr +``` + +Each `.zarr` contains an [OME-NGFF v0.4](https://ngff.openmicroscopy.org/0.4/) in HCS format with a single field of view. diff --git a/docs/development-guide.md b/docs/development-guide.md new file mode 100644 index 00000000..4d16fdec --- /dev/null +++ b/docs/development-guide.md @@ -0,0 +1,105 @@ +# `waveorder` development guide + +## Install `waveorder` for development + +1. Install [conda](https://github.com/conda-forge/miniforge) and create a virtual environment: + + ```sh + conda create -y -n waveorder python=3.10 + conda activate waveorder + ``` + +2. Clone the `waveorder` directory: + + ```sh + git clone https://github.com/mehta-lab/waveorder.git + ``` + +3. Install `waveorder` in editable mode with development dependencies + + ```sh + cd waveorder + pip install -e ".[all,dev]" + ``` + +## Set up a development environment + +### Code linting + +We are not currently specifying a code linter as most modern Python code editors already have their own. If not, add a plugin to your editor to help catch bugs pre-commit! + +### Code formatting + +We use `black` to format Python code, and a specific version is installed as a development dependency. Use the `black` in the `waveorder` virtual environment, either from commandline or the editor of your choice. + +> *VS Code users*: Install the [Black Formatter](https://marketplace.visualstudio.com/items?itemName=ms-python.black-formatter) plugin. Press `^/⌘ ⇧ P` and type 'format document with...', choose the Black Formatter and start formatting! + +### Docstring style + +The [NumPy style](https://numpydoc.readthedocs.io/en/latest/format.html) docstrings are used in `waveorder`. + +> *VS Code users*: [this popular plugin](https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring) helps auto-generate most popular docstring styles (including `numpydoc`). + +## Run automated tests + +From within the `waveorder` directory run: + +```sh +pytest +``` + +Running `pytest` for the first time will download ~50 MB of test data from Zenodo, and subsequent runs will reuse the downloaded data. + +## Run manual tests + +Although many of `waveorder`'s tests are automated, many features require manual testing. The following is a summary of features that need to be tested manually before release: + +* Install a compatible version of Micro-Manager and check that `waveorder` can connect. +* Perform calibrations with and without an ROI; with and without a shutter configured in Micro-Manager, in 4- and 5-state modes; and in MM-Voltage, MM-Retardance, and DAC modes (if the TriggerScope is available). +* Test "Load Calibration" and "Calculate Extinction" buttons. +* Test "Capture Background" button. +* Test the "Acquire Birefringence" button on a background FOV. Does a background-corrected background acquisition give random orientations? +* Test the four "Acquire" buttons with varied combinations of 2D/3D, background correction settings, "Phase from BF" checkbox, and regularization parameters. +* Use the data you collected to test "Offline" mode reconstructions with varied combinations of parameters. + +## GUI development + +We use `QT Creator` for large parts of `waveorder`'s GUI. To modify the GUI, install `QT Creator` from [its website](https://www.qt.io/product/development-tools) or with `brew install --cask qt-creator` + +Open `./waveorder/plugin/gui.ui` in `QT Creator` and make your changes. + +Next, convert the `.ui` to a `.py` file with: + +```sh +pyuic5 -x gui.ui -o gui.py +``` + +Note: `pyuic5` is installed alongside `PyQt5`, so you can expect to find it installed in your `waveorder` conda environement. + +Finally, change the `gui.py` file's to import `qtpy` instead of `PyQt5` to adhere to [napari plugin best practices](https://napari.org/stable/plugins/best_practices.html#don-t-include-pyside2-or-pyqt5-in-your-plugin-s-dependencies). +On macOS, you can modify the file in place with: + +```sh +sed -i '' 's/from PyQt5/from qtpy/g' gui.py +``` + +> This is specific for BSD `sed`, omit `''` with GNU. + +Note: although much of the GUI is specified in the generated `gui.py` file, the `main_widget.py` file makes extensive modifications to the GUI. + +## Make `git blame` ignore formatting commits + +**Note:** `git --version` must be `>=2.23` to use this feature. + +If you would like `git blame` to ignore formatting commits, run this line: + +```sh + git config --global blame.ignoreRevsFile .git-blame-ignore-revs +``` + +The `\.git-blame-ignore-revs` file contains a list of commit hashes corresponding to formatting commits. +If you make a formatting commit, please add the commit's hash to this file. + +## Pre-release checklist +- merge `README.md` figures to `main`, then update the links to point to these uploaded figures. We do not upload figures to PyPI, so without this step the README figure will not appear on PyPI or napari-hub. +- update version numbers and links in [the microscope dependency guide](./microscope-installation-guide.md). diff --git a/docs/examples/README.md b/docs/examples/README.md new file mode 100644 index 00000000..96278b6d --- /dev/null +++ b/docs/examples/README.md @@ -0,0 +1,11 @@ +`waveorder` is undergoing a significant refactor, and this `examples/` folder serves as a good place to understand the current state of the repository. + +Most examples require `pip install waveorder[all]` for `napari` and `jupyter`. Visit the [napari installation guide](https://napari.org/dev/tutorials/fundamentals/installation.html) if napari installation fails. + +| Folder | Requires | Description | +|------------------|----------------------------|-------------------------------------------------------------------------------------------------------| +| `configs/` | `pip install waveorder[all]` | Demonstrates `waveorder`'s config-file-based command-line interface. | +| `models/` | `pip install waveorder[all]` | Demonstrates the latest functionality of `waveorder` through simulations and reconstructions using various models. | +| `maintenance/` | `pip install waveorder` | Examples of computational imaging methods enabled by functionality of waveorder; scripts are maintained with automated tests. | +| `visuals/` | `pip install waveorder[all]` | Visualizations of transfer functions and Green's tensors. | +| `deprecated/` | `pip install waveorder[all]`, complete datasets | Provides examples of real-data reconstructions; serves as documentation and is not actively maintained. | \ No newline at end of file diff --git a/docs/examples/configs/README.md b/docs/examples/configs/README.md new file mode 100644 index 00000000..c02d993a --- /dev/null +++ b/docs/examples/configs/README.md @@ -0,0 +1,70 @@ +# `waveorder` CLI examples + +`waveorder` uses a configuration-file-based command-line inferface (CLI) to +calculate transfer functions and apply these transfer functions to datasets. + +This page demonstrates `waveorder`'s CLI. + +## Getting started + +### 1. Check your installation +First, [install `waveorder`](../docs/software-installation-guide.md) and run +```bash +waveorder +``` +in a shell. If `waveorder` is installed correctly, you will see a usage string and +``` +waveorder: Computational Toolkit for Label-Free Imaging +``` + +### 2. Download and convert a test dataset +Next, [download the test data from zenodo (47 MB)](https://zenodo.org/record/6983916/files/recOrder_test_data.zip?download=1), and convert a dataset to the latest version of `.zarr` with +``` +cd /path/to/ +iohub convert -i /path/to/test_data/2022_08_04_recOrder_pytest_20x_04NA/2T_3P_16Z_128Y_256X_Kazansky_1/ +-o ./dataset.zarr +``` + +You can view the test dataset with +``` +napari ./dataset.zarr --plugin waveorder +``` + +### 3. Run a reconstruction +Run an example reconstruction with +``` +waveorder reconstruct ./dataset.zarr/0/0/0 -c /path/to/waveorder/examples/settings/birefringence-and-phase.yml -o ./reconstruction.zarr +``` +then view the reconstruction with +``` +napari ./reconstruction.zarr --plugin waveorder +``` + +Try modifying the configuration file to see how the regularization parameter changes the results. + +## FAQ +1. **Q: Which configuration file should I use?** + + If you are acquiring: + + **3D data with calibrated liquid-crystal polarizers via `waveorder`** use `birefringence.yml`. + + **3D fluorescence data** use `fluorescence.yml`. + + **3D brightfield data** use `phase.yml`. + + **Multi-modal data**, start by reconstructing the individual modaliities, each with a single config file and CLI call. Then combine the reconstructions by ***TODO: @Ziwen do can you help me append to the zarrs to help me fix this? *** + +2. **Q: Should I use `reconstruction_dimension` = 2 or 3? + + If your downstream processing requires 3D information or if you're unsure, then you should use `reconstruction_dimension = 3`. If your sample is very thin compared to the depth of field of the microscope, if you're in a noise-limited regime, or if your downstream processing requires 2D information, then you should use `reconstruction_dimension = 2`. Empirically, we have found that 2D reconstructions reduce the noise in our reconstructions because it uses 3D information to make a single estimate for each pixel. + +3. **Q: What regularization parameter should I use?** + + We recommend starting with the defaults then testing over a few orders of magnitude and choosing a result that isn't too noisy or too smooth. + +### Developers note + +These configuration files are automatically generated when the tests run. See `/tests/cli_tests/test_settings.py` - `test_generate_example_settings`. + +To keep these settings up to date, run `pytest` locally when `cli/settings.py` changes. diff --git a/docs/examples/configs/birefringence-and-phase.yml b/docs/examples/configs/birefringence-and-phase.yml new file mode 100644 index 00000000..6c53dfe6 --- /dev/null +++ b/docs/examples/configs/birefringence-and-phase.yml @@ -0,0 +1,31 @@ +input_channel_names: +- State0 +- State1 +- State2 +- State3 +time_indices: all +reconstruction_dimension: 3 +birefringence: + transfer_function: + swing: 0.1 + apply_inverse: + wavelength_illumination: 0.532 + background_path: '' + remove_estimated_background: false + flip_orientation: false + rotate_orientation: false +phase: + transfer_function: + wavelength_illumination: 0.532 + yx_pixel_size: 0.325 + z_pixel_size: 2.0 + z_padding: 0 + index_of_refraction_media: 1.3 + numerical_aperture_detection: 1.2 + numerical_aperture_illumination: 0.5 + invert_phase_contrast: false + apply_inverse: + reconstruction_algorithm: Tikhonov + regularization_strength: 0.001 + TV_rho_strength: 0.001 + TV_iterations: 1 diff --git a/docs/examples/configs/birefringence.yml b/docs/examples/configs/birefringence.yml new file mode 100644 index 00000000..ed50536c --- /dev/null +++ b/docs/examples/configs/birefringence.yml @@ -0,0 +1,16 @@ +input_channel_names: +- State0 +- State1 +- State2 +- State3 +time_indices: all +reconstruction_dimension: 3 +birefringence: + transfer_function: + swing: 0.1 + apply_inverse: + wavelength_illumination: 0.532 + background_path: '' + remove_estimated_background: false + flip_orientation: false + rotate_orientation: false diff --git a/docs/examples/configs/fluorescence.yml b/docs/examples/configs/fluorescence.yml new file mode 100644 index 00000000..3e84d884 --- /dev/null +++ b/docs/examples/configs/fluorescence.yml @@ -0,0 +1,17 @@ +input_channel_names: +- GFP +time_indices: all +reconstruction_dimension: 3 +fluorescence: + transfer_function: + yx_pixel_size: 0.325 + z_pixel_size: 2.0 + z_padding: 0 + index_of_refraction_media: 1.3 + numerical_aperture_detection: 1.2 + wavelength_emission: 0.507 + apply_inverse: + reconstruction_algorithm: Tikhonov + regularization_strength: 0.001 + TV_rho_strength: 0.001 + TV_iterations: 1 diff --git a/docs/examples/configs/phase.yml b/docs/examples/configs/phase.yml new file mode 100644 index 00000000..381b487e --- /dev/null +++ b/docs/examples/configs/phase.yml @@ -0,0 +1,19 @@ +input_channel_names: +- BF +time_indices: all +reconstruction_dimension: 3 +phase: + transfer_function: + wavelength_illumination: 0.532 + yx_pixel_size: 0.325 + z_pixel_size: 2.0 + z_padding: 0 + index_of_refraction_media: 1.3 + numerical_aperture_detection: 1.2 + numerical_aperture_illumination: 0.5 + invert_phase_contrast: false + apply_inverse: + reconstruction_algorithm: Tikhonov + regularization_strength: 0.001 + TV_rho_strength: 0.001 + TV_iterations: 1 diff --git a/examples/documentation/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.py b/docs/examples/deprecated/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.py similarity index 100% rename from examples/documentation/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.py rename to docs/examples/deprecated/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.py diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_anisotropic_target.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_anisotropic_target.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_anisotropic_target.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_anisotropic_target.ipynb diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_cardiac_muscle.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiac_muscle.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_cardiac_muscle.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiac_muscle.ipynb diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_1.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_1.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_1.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_1.ipynb diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_2.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_2.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_2.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiomyocyte_infected_2.ipynb diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_cardiomyocyte_mock.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiomyocyte_mock.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_cardiomyocyte_mock.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_cardiomyocyte_mock.ipynb diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_human_uterus.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_human_uterus.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_human_uterus.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_human_uterus.ipynb diff --git a/examples/documentation/PTI_experiment/PTI_full_FOV_mouse_brain_aco.ipynb b/docs/examples/deprecated/PTI_experiment/PTI_full_FOV_mouse_brain_aco.ipynb similarity index 100% rename from examples/documentation/PTI_experiment/PTI_full_FOV_mouse_brain_aco.ipynb rename to docs/examples/deprecated/PTI_experiment/PTI_full_FOV_mouse_brain_aco.ipynb diff --git a/examples/documentation/PTI_experiment/README.md b/docs/examples/deprecated/PTI_experiment/README.md similarity index 100% rename from examples/documentation/PTI_experiment/README.md rename to docs/examples/deprecated/PTI_experiment/README.md diff --git a/examples/documentation/QLIPP_experiment/2D_QLIPP_recon_experiment.ipynb b/docs/examples/deprecated/QLIPP_experiment/2D_QLIPP_recon_experiment.ipynb similarity index 100% rename from examples/documentation/QLIPP_experiment/2D_QLIPP_recon_experiment.ipynb rename to docs/examples/deprecated/QLIPP_experiment/2D_QLIPP_recon_experiment.ipynb diff --git a/examples/documentation/QLIPP_experiment/3D_QLIPP_recon_experiment.ipynb b/docs/examples/deprecated/QLIPP_experiment/3D_QLIPP_recon_experiment.ipynb similarity index 100% rename from examples/documentation/QLIPP_experiment/3D_QLIPP_recon_experiment.ipynb rename to docs/examples/deprecated/QLIPP_experiment/3D_QLIPP_recon_experiment.ipynb diff --git a/examples/documentation/fluorescence_deconvolution/fluorescence_deconv.ipynb b/docs/examples/deprecated/fluorescence_deconvolution/fluorescence_deconv.ipynb similarity index 100% rename from examples/documentation/fluorescence_deconvolution/fluorescence_deconv.ipynb rename to docs/examples/deprecated/fluorescence_deconvolution/fluorescence_deconv.ipynb diff --git a/examples/maintenance/PTI_simulation/PTI_Simulation_Forward_2D3D.py b/docs/examples/maintenance/PTI_simulation/PTI_Simulation_Forward_2D3D.py similarity index 98% rename from examples/maintenance/PTI_simulation/PTI_Simulation_Forward_2D3D.py rename to docs/examples/maintenance/PTI_simulation/PTI_Simulation_Forward_2D3D.py index 85d05f89..41831ca2 100644 --- a/examples/maintenance/PTI_simulation/PTI_Simulation_Forward_2D3D.py +++ b/docs/examples/maintenance/PTI_simulation/PTI_Simulation_Forward_2D3D.py @@ -9,9 +9,12 @@ # density and anisotropy," bioRxiv 2020.12.15.422951 (2020).``` # #################################################################### +from pathlib import Path + import matplotlib.pyplot as plt import numpy as np from numpy.fft import fftshift +from platformdirs import user_data_dir from waveorder import optics, util, waveorder_simulator from waveorder.visuals import jupyter_visuals @@ -481,8 +484,8 @@ # ################################# # Save simulations - -output_dir = "./" +temp_dirpath = Path(user_data_dir("PTI_simulation")) +temp_dirpath.mkdir(parents=True, exist_ok=True) if sample_type == "3D": output_file = "PTI_simulation_data_NA_det_147_NA_illu_140_3D_spoke_discrete_no_1528_ne_1553_no_noise_Born" @@ -491,8 +494,10 @@ else: print("sample_type needs to be 2D or 3D.") +output_path = temp_dirpath / output_file + np.savez( - output_dir + output_file, + output_path, I_meas=I_meas_noise, lambda_illu=lambda_illu, n_media=n_media, diff --git a/examples/maintenance/PTI_simulation/PTI_Simulation_Recon2D.py b/docs/examples/maintenance/PTI_simulation/PTI_Simulation_Recon2D.py similarity index 97% rename from examples/maintenance/PTI_simulation/PTI_Simulation_Recon2D.py rename to docs/examples/maintenance/PTI_simulation/PTI_Simulation_Recon2D.py index 646cbb2b..ffadd88b 100644 --- a/examples/maintenance/PTI_simulation/PTI_Simulation_Recon2D.py +++ b/docs/examples/maintenance/PTI_simulation/PTI_Simulation_Recon2D.py @@ -9,17 +9,23 @@ # density and anisotropy," bioRxiv 2020.12.15.422951 (2020).``` # #################################################################### +from pathlib import Path + import matplotlib.pyplot as plt import numpy as np from numpy.fft import fftshift +from platformdirs import user_data_dir from waveorder import optics, waveorder_reconstructor from waveorder.visuals import jupyter_visuals ## Initialization ## Load simulated images and parameters - -file_name = "./PTI_simulation_data_NA_det_147_NA_illu_140_2D_spoke_discrete_no_1528_ne_1553_no_noise_Born.npz" +temp_dirpath = Path(user_data_dir("PTI_simulation")) +file_name = ( + temp_dirpath + / "PTI_simulation_data_NA_det_147_NA_illu_140_2D_spoke_discrete_no_1528_ne_1553_no_noise_Born.npz" +) array_loaded = np.load(file_name) list_of_array_names = sorted(array_loaded) diff --git a/examples/maintenance/PTI_simulation/PTI_Simulation_Recon3D.py b/docs/examples/maintenance/PTI_simulation/PTI_Simulation_Recon3D.py similarity index 98% rename from examples/maintenance/PTI_simulation/PTI_Simulation_Recon3D.py rename to docs/examples/maintenance/PTI_simulation/PTI_Simulation_Recon3D.py index 115458e0..8dacbc90 100644 --- a/examples/maintenance/PTI_simulation/PTI_Simulation_Recon3D.py +++ b/docs/examples/maintenance/PTI_simulation/PTI_Simulation_Recon3D.py @@ -8,17 +8,23 @@ # "uPTI: uniaxial permittivity tensor imaging of intrinsic # # density and anisotropy," bioRxiv 2020.12.15.422951 (2020).``` # #################################################################### +from pathlib import Path + import matplotlib.pyplot as plt import numpy as np from numpy.fft import fftshift +from platformdirs import user_data_dir from waveorder import optics, waveorder_reconstructor from waveorder.visuals import jupyter_visuals ## Initialization ## Load simulated images and parameters - -file_name = "./PTI_simulation_data_NA_det_147_NA_illu_140_2D_spoke_discrete_no_1528_ne_1553_no_noise_Born.npz" +temp_dirpath = Path(user_data_dir("PTI_simulation")) +file_name = ( + temp_dirpath + / "PTI_simulation_data_NA_det_147_NA_illu_140_2D_spoke_discrete_no_1528_ne_1553_no_noise_Born.npz" +) array_loaded = np.load(file_name) list_of_array_names = sorted(array_loaded) diff --git a/examples/maintenance/PTI_simulation/PTI_formulation.html b/docs/examples/maintenance/PTI_simulation/PTI_formulation.html similarity index 100% rename from examples/maintenance/PTI_simulation/PTI_formulation.html rename to docs/examples/maintenance/PTI_simulation/PTI_formulation.html diff --git a/examples/maintenance/PTI_simulation/README.md b/docs/examples/maintenance/PTI_simulation/README.md similarity index 100% rename from examples/maintenance/PTI_simulation/README.md rename to docs/examples/maintenance/PTI_simulation/README.md diff --git a/examples/maintenance/QLIPP_simulation/2D_QLIPP_forward.py b/docs/examples/maintenance/QLIPP_simulation/2D_QLIPP_forward.py similarity index 94% rename from examples/maintenance/QLIPP_simulation/2D_QLIPP_forward.py rename to docs/examples/maintenance/QLIPP_simulation/2D_QLIPP_forward.py index fd7b2c41..43845335 100644 --- a/examples/maintenance/QLIPP_simulation/2D_QLIPP_forward.py +++ b/docs/examples/maintenance/QLIPP_simulation/2D_QLIPP_forward.py @@ -10,9 +10,12 @@ ##################################################################################################### +from pathlib import Path + import matplotlib.pyplot as plt import numpy as np from numpy.fft import fftshift +from platformdirs import user_data_dir from waveorder import optics, util, waveorder_simulator from waveorder.visuals import jupyter_visuals @@ -103,7 +106,9 @@ ).astype("float64") # Save simulation -output_file = "./2D_QLIPP_simulation.npz" +temp_dirpath = Path(user_data_dir("QLIPP_simulation")) +temp_dirpath.mkdir(parents=True, exist_ok=True) +output_file = temp_dirpath / "2D_QLIPP_simulation.npz" np.savez( output_file, I_meas=I_meas_noise, diff --git a/examples/maintenance/QLIPP_simulation/2D_QLIPP_recon.py b/docs/examples/maintenance/QLIPP_simulation/2D_QLIPP_recon.py similarity index 96% rename from examples/maintenance/QLIPP_simulation/2D_QLIPP_recon.py rename to docs/examples/maintenance/QLIPP_simulation/2D_QLIPP_recon.py index f8a8583a..2b49bd6c 100644 --- a/examples/maintenance/QLIPP_simulation/2D_QLIPP_recon.py +++ b/docs/examples/maintenance/QLIPP_simulation/2D_QLIPP_recon.py @@ -10,8 +10,11 @@ # eLife 9:e55502 (2020).``` # ##################################################################################################### +from pathlib import Path + import matplotlib.pyplot as plt import numpy as np +from platformdirs import user_data_dir from waveorder import waveorder_reconstructor from waveorder.visuals import jupyter_visuals @@ -19,8 +22,8 @@ # ### Load simulated data # Load simulations - -file_name = "./2D_QLIPP_simulation.npz" +temp_dirpath = Path(user_data_dir("QLIPP_simulation")) +file_name = temp_dirpath / "2D_QLIPP_simulation.npz" array_loaded = np.load(file_name) list_of_array_names = sorted(array_loaded) diff --git a/examples/maintenance/README.md b/docs/examples/maintenance/README.md similarity index 100% rename from examples/maintenance/README.md rename to docs/examples/maintenance/README.md diff --git a/examples/models/README.md b/docs/examples/models/README.md similarity index 100% rename from examples/models/README.md rename to docs/examples/models/README.md diff --git a/examples/models/inplane_oriented_thick_pol3d.py b/docs/examples/models/inplane_oriented_thick_pol3d.py similarity index 100% rename from examples/models/inplane_oriented_thick_pol3d.py rename to docs/examples/models/inplane_oriented_thick_pol3d.py diff --git a/examples/models/inplane_oriented_thick_pol3d_vector.py b/docs/examples/models/inplane_oriented_thick_pol3d_vector.py similarity index 100% rename from examples/models/inplane_oriented_thick_pol3d_vector.py rename to docs/examples/models/inplane_oriented_thick_pol3d_vector.py diff --git a/examples/models/isotropic_fluorescent_thick_3d.py b/docs/examples/models/isotropic_fluorescent_thick_3d.py similarity index 100% rename from examples/models/isotropic_fluorescent_thick_3d.py rename to docs/examples/models/isotropic_fluorescent_thick_3d.py diff --git a/examples/models/isotropic_thin_3d.py b/docs/examples/models/isotropic_thin_3d.py similarity index 100% rename from examples/models/isotropic_thin_3d.py rename to docs/examples/models/isotropic_thin_3d.py diff --git a/examples/models/phase_thick_3d.py b/docs/examples/models/phase_thick_3d.py similarity index 100% rename from examples/models/phase_thick_3d.py rename to docs/examples/models/phase_thick_3d.py diff --git a/examples/visuals/plot_greens_tensor.py b/docs/examples/visuals/plot_greens_tensor.py similarity index 100% rename from examples/visuals/plot_greens_tensor.py rename to docs/examples/visuals/plot_greens_tensor.py diff --git a/examples/visuals/plot_vector_transfer_function_support.py b/docs/examples/visuals/plot_vector_transfer_function_support.py similarity index 100% rename from examples/visuals/plot_vector_transfer_function_support.py rename to docs/examples/visuals/plot_vector_transfer_function_support.py diff --git a/docs/images/HSV_legend.png b/docs/images/HSV_legend.png new file mode 100644 index 00000000..5de940a4 Binary files /dev/null and b/docs/images/HSV_legend.png differ diff --git a/docs/images/JCh_Color_legend.png b/docs/images/JCh_Color_legend.png new file mode 100644 index 00000000..eb53f5fe Binary files /dev/null and b/docs/images/JCh_Color_legend.png differ diff --git a/docs/images/JCh_legend.png b/docs/images/JCh_legend.png new file mode 100644 index 00000000..8af5efc9 Binary files /dev/null and b/docs/images/JCh_legend.png differ diff --git a/docs/images/acq_finished.png b/docs/images/acq_finished.png new file mode 100644 index 00000000..cb07c394 Binary files /dev/null and b/docs/images/acq_finished.png differ diff --git a/docs/images/acquire_buttons.png b/docs/images/acquire_buttons.png new file mode 100644 index 00000000..6e001d46 Binary files /dev/null and b/docs/images/acquire_buttons.png differ diff --git a/docs/images/acquisition_settings.png b/docs/images/acquisition_settings.png new file mode 100644 index 00000000..9a4877bd Binary files /dev/null and b/docs/images/acquisition_settings.png differ diff --git a/docs/images/advanced.png b/docs/images/advanced.png new file mode 100644 index 00000000..ed658392 Binary files /dev/null and b/docs/images/advanced.png differ diff --git a/docs/images/cap_bg.png b/docs/images/cap_bg.png new file mode 100644 index 00000000..6dfbf70f Binary files /dev/null and b/docs/images/cap_bg.png differ diff --git a/docs/images/cli_structure.png b/docs/images/cli_structure.png new file mode 100644 index 00000000..0ddb1dd1 Binary files /dev/null and b/docs/images/cli_structure.png differ diff --git a/docs/images/comms_video_screenshot.png b/docs/images/comms_video_screenshot.png new file mode 100644 index 00000000..98607aa3 Binary files /dev/null and b/docs/images/comms_video_screenshot.png differ diff --git a/docs/images/connect_to_mm.png b/docs/images/connect_to_mm.png new file mode 100644 index 00000000..8e8fe256 Binary files /dev/null and b/docs/images/connect_to_mm.png differ diff --git a/docs/images/create_group.png b/docs/images/create_group.png new file mode 100644 index 00000000..5719ff61 Binary files /dev/null and b/docs/images/create_group.png differ diff --git a/docs/images/create_group_voltage.png b/docs/images/create_group_voltage.png new file mode 100644 index 00000000..e97dfeed Binary files /dev/null and b/docs/images/create_group_voltage.png differ diff --git a/docs/images/create_preset.png b/docs/images/create_preset.png new file mode 100644 index 00000000..6b78e125 Binary files /dev/null and b/docs/images/create_preset.png differ diff --git a/docs/images/create_preset_voltage.png b/docs/images/create_preset_voltage.png new file mode 100644 index 00000000..a6a648d0 Binary files /dev/null and b/docs/images/create_preset_voltage.png differ diff --git a/docs/images/general_reconstruction_settings.png b/docs/images/general_reconstruction_settings.png new file mode 100644 index 00000000..9da93ff0 Binary files /dev/null and b/docs/images/general_reconstruction_settings.png differ diff --git a/docs/images/ideal_plot.png b/docs/images/ideal_plot.png new file mode 100644 index 00000000..8cc03620 Binary files /dev/null and b/docs/images/ideal_plot.png differ diff --git a/docs/images/modulation.png b/docs/images/modulation.png new file mode 100644 index 00000000..6c5e44df Binary files /dev/null and b/docs/images/modulation.png differ diff --git a/docs/images/no-overlay.png b/docs/images/no-overlay.png new file mode 100644 index 00000000..cbb860cf Binary files /dev/null and b/docs/images/no-overlay.png differ diff --git a/docs/images/overlay-demo.png b/docs/images/overlay-demo.png new file mode 100644 index 00000000..da49c9eb Binary files /dev/null and b/docs/images/overlay-demo.png differ diff --git a/docs/images/overlay.png b/docs/images/overlay.png new file mode 100644 index 00000000..69c71358 Binary files /dev/null and b/docs/images/overlay.png differ diff --git a/docs/images/phase_reconstruction_settings.png b/docs/images/phase_reconstruction_settings.png new file mode 100644 index 00000000..967a77d4 Binary files /dev/null and b/docs/images/phase_reconstruction_settings.png differ diff --git a/docs/images/poincare_swing.svg b/docs/images/poincare_swing.svg new file mode 100644 index 00000000..d114424b --- /dev/null +++ b/docs/images/poincare_swing.svg @@ -0,0 +1,535 @@ + + + +SWING = 0.25SWING = 0.125 diff --git a/docs/images/reconstruction_birefriengence.png b/docs/images/reconstruction_birefriengence.png new file mode 100644 index 00000000..20ac93b4 Binary files /dev/null and b/docs/images/reconstruction_birefriengence.png differ diff --git a/docs/images/reconstruction_data.png b/docs/images/reconstruction_data.png new file mode 100644 index 00000000..5a85e570 Binary files /dev/null and b/docs/images/reconstruction_data.png differ diff --git a/docs/images/reconstruction_data_info.png b/docs/images/reconstruction_data_info.png new file mode 100644 index 00000000..9a38b441 Binary files /dev/null and b/docs/images/reconstruction_data_info.png differ diff --git a/docs/images/reconstruction_models.png b/docs/images/reconstruction_models.png new file mode 100644 index 00000000..400812f9 Binary files /dev/null and b/docs/images/reconstruction_models.png differ diff --git a/docs/images/reconstruction_queue.png b/docs/images/reconstruction_queue.png new file mode 100644 index 00000000..2a221241 Binary files /dev/null and b/docs/images/reconstruction_queue.png differ diff --git a/docs/images/run_calib.png b/docs/images/run_calib.png new file mode 100644 index 00000000..f7748333 Binary files /dev/null and b/docs/images/run_calib.png differ diff --git a/docs/images/run_port.png b/docs/images/run_port.png new file mode 100644 index 00000000..012cb1e8 Binary files /dev/null and b/docs/images/run_port.png differ diff --git a/docs/images/waveorder_Fig1_Overview.png b/docs/images/waveorder_Fig1_Overview.png new file mode 100644 index 00000000..b2244e8e Binary files /dev/null and b/docs/images/waveorder_Fig1_Overview.png differ diff --git a/docs/images/waveorder_plugin_logo.png b/docs/images/waveorder_plugin_logo.png new file mode 100644 index 00000000..ef09b340 Binary files /dev/null and b/docs/images/waveorder_plugin_logo.png differ diff --git a/docs/microscope-installation-guide.md b/docs/microscope-installation-guide.md new file mode 100644 index 00000000..e396766d --- /dev/null +++ b/docs/microscope-installation-guide.md @@ -0,0 +1,100 @@ +# Microscope Installation Guide + +This guide will walk through a complete waveorder installation consisting of: +1. Checking pre-requisites for compatibility. +2. Installing Meadowlark DS5020 and liquid crystals. +3. Installing and launching the latest stable version of `waveorder` via `pip`. +4. Installing a compatible version of Micro-Manager and LC device drivers. +5. Connecting `waveorder` to Micro-Manager via a `pycromanager` connection. + +## Compatibility Summary +Before you start you will need to confirm that your system is compatible with the following software: + +| Software | Version | +| :--- | :--- | +| `waveorder` | 0.4.0 | +| OS | Windows 10 | +| Micro-Manager version | [2023-04-26 (160 MB)](https://download.micro-manager.org/nightly/2.0/Windows/MMSetup_64bit_2.0.1_20230426.exe) | +| Meadowlark drivers | [USB driver (70 kB)](https://github.com/mehta-lab/recOrder/releases/download/0.4.0/usbdrvd.dll) | +| Meadowlark PC software version | 1.08 | +| Meadowlark controller firmware version | >=1.04 | + +## Install Meadowlark DS5020 and liquid crystals + +Start by installing the Meadowlark DS5020 and liquid crystals using the software on the USB stick provided by Meadowlark. You will need to install the USB drivers and CellDrive5000. + +**Check your installation versions** by opening CellDrive5000 and double clicking the Meadowlark Optics logo. Confirm that **"PC software version = 1.08" and "Controller firmware version >= 1.04".** + +If you need to change your PC software version, follow these steps: +- From "Add and remove programs", remove CellDrive5000 and "National Instruments Software". +- From "Device manager", open the "Meadowlark Optics" group, right click `mlousb`, click "Uninstall device", check "Delete the driver software for this device", and click "Uninstall". Uninstall `Meadowlark Optics D5020 LC Driver` following the same steps. +- Using the USB stick provided by Meadowlark, reinstall the USB drivers and CellDrive5000. + +## Install waveorder software + +(Optional but recommended) install [anaconda](https://www.anaconda.com/products/distribution) and create a virtual environment +``` +conda create -y -n waveorder python=3.10 +conda activate waveorder +``` + +Install `waveorder` with acquisition dependencies (napari and pycro-manager): +``` +pip install waveorder[all] +``` +Check your installation: +``` +napari -w waveorder +``` +should launch napari with the waveorder plugin (may take 15 seconds on a fresh installation). + +## Install and configure Micro-Manager + +Download and install [`Micro-Manager 2.0` nightly build `20230426` (~150 MB link).](https://download.micro-manager.org/nightly/2.0/Windows/MMSetup_64bit_2.0.1_20230426.exe) + +**Note:** We have tested waveorder with `20230426`, but most features will work with newer builds. We recommend testing a minimal installation with `20230426` before testing with a different nightly build or additional device drivers. + +Before launching Micro-Manager, download the [USB driver](https://github.com/mehta-lab/recOrder/releases/download/0.4.0rc0/usbdrvd.dll) and place this file into your Micro-Manager folder (likely `C:\Program Files\Micro-Manager` or similar). + +Launch Micro-Manager, open `Devices > Hardware Configuration Wizard...`, and add the `MeadowlarkLC` device to your configuration. Confirm your installation by opening `Devices > Device Property Browser...` and confirming that `MeadowlarkLC` properties appear. + +**Upgrading users:** you will need to reinstall the Meadowlark device to your Micro-Manager configuration file, because the device driver's name has changed to from `MeadowlarkLcOpenSource` to `MeadowlarkLC`. + +### Option 1 (recommended): Voltage-mode calibration installation + Create a new channel group and add the `MeadowlarkLC-Voltage (V) LC-A` and `MeadowlarkLC-Voltage (V) LC-B` properties. + +![](https://github.com/mehta-lab/recOrder/blob/main/docs/images/create_group_voltage.png) + +Add 5 presets to this group named `State0`, `State1`, `State2`, `State3`, and `State4`. You can set random voltages to add these presets, and `waveorder` will calibrate and set these voltages later. + +![](https://github.com/mehta-lab/recOrder/blob/main/docs/images/create_preset_voltage.png) + +### Option 2 (soon deprecated): retardance mode calibration installation + +Create a new channel group and add the property `MeadowlarkLC-String send to -`. + +![](https://github.com/mehta-lab/recOrder/blob/main/docs/images/create_group.png) + +Add 5 presets to this group named `State0`, `State1`, `State2`, `State3`, and `State4` and set the corresponding preset values to `state0`, `state1`, `state2`, `state3`, `state4` in the `MeadowlarkLC-String send to –`* property. + +![](https://github.com/mehta-lab/recOrder/blob/main/docs/images/create_preset.png) + +### (Optional) Enable "Phase From BF" acquisition + +If you would like to reconstruct phase from brightfield, add a Micro-Manager preset with brightfield properties (e.g. moving the polarization analyzer out the light path) and give the preset a name that contains one of the following case-insensitive keywords: + +`["bf", "brightfield", "bright", "labelfree", "label-free", "lf", "label", "phase, "ph"]` + +In `waveorder` you can select this preset using the `Acquisition Settings > BF Channel` dropdown menu. + +### Enable port access + +Finally, enable port access so that Micro-Manager can communicate with `waveorder` through the `pycromanager` bridge. To do so open Micro-Manager and navigate to `Tools > Options` and check the box that says `Run server on port 4827` + +![](https://github.com/mehta-lab/recOrder/blob/main/docs/images/run_port.png) + +## Connect `waveorder` to Micro-Manager + +From the `waveorder` window, click `Switch to Online`. If you see `Success`, your installation is complete and you can [proceed to the napari plugin guide](./napari-plugin-guide.md). + +If you you see `Failed`, check that Micro-Manager is open, check that you've enabled `Run server on port 4827`. If the connection continues to fail, report an issue with your stack trace for support. diff --git a/docs/napari-plugin-guide.md b/docs/napari-plugin-guide.md new file mode 100644 index 00000000..d4591ab9 --- /dev/null +++ b/docs/napari-plugin-guide.md @@ -0,0 +1,204 @@ +# Napari Plugin Guide +This guide summarizes a complete `waveorder` workflow. + +## Launch `waveorder` +Activate the `waveorder` environment +``` +conda activate waveorder +``` + +Launch `napari` with `waveorder` +``` +napari -w waveorder +``` +## Connect to Micro-Manager +Click “Connect to MM”. If the connection succeeds, proceed to calibration. If not, revisit the [microscope installation guide](./microscope-installation-guide.md). + +![](./images/connect_to_mm.png) + +For polarization imaging, start with the **Calibration** tab. For phase-from-brightfield imaging, you can skip the calibration and go to the **Aquisition / Reconstruction** tab. + +## Calibration tab +The first step in the acquisition process is to calibrate the liquid crystals and measure a background. In the `waveorder` plugin you will see the following options for controlling the calibration: + +![](./images/run_calib.png) + + +### Prepare for a calibration +Place your sample on the stage, focus on the surface of the coverslip/well, navigate to **an empty FOV**, then align the light source into **Kohler illumination** [following these steps](https://www.microscopyu.com/tutorials/kohler). + +### Choose calibration parameters +Browse for and choose a **Directory** where you calibration and background images will be saved. + +Choose a **Swing** based on the anisotropy of your sample. We recommend + +* ​Tissue Imaging: `swing = 0.1 - 0.05` +* Live or fixed Cells: `swing = 0.05 – 0.03` + +We recommend starting with a swing of **0.1** for tissue samples and **0.05** for cells then reducing the swing to measure smaller structures. See the [calibration guide](./calibration-guide.md) for more information about this parameter and the calibration process. + +Choose an **Illumination Scheme** to decides how many polarization states you will calibrate and use. We recommend starting with the *4-State (Ext, 0, 60, 120)* scheme as it requires one less illumination state than the *5-State* scheme. + +**Calibration Mode** is set automatically, so the default value is a good place to start. Different modes allow calibrations with voltages, retardances, or hardware sequencing. + +The **Config Group** is set automatically to the Micro-Manager configuration group that contains the `State*` presets. You can modify this option if you have multple configuration groups with these presets. + +### Run the calibration +Start a calibration with **Run Calibration**. + +The progress bar will show the progress of calibration, and it should take less than 2 minutes on most systems. + +The plot shows the intensities over time during calibration. One way to diagnose an in-progress calibration is to watch the intensity plot. An ideal plot will look similar to the following: + +![](./images/ideal_plot.png) + +Once finished, you will get a calibration assessment and an extinction value. The extinction value gives you a metric for calibration quality: the higher the extinction, the cleaner the light path and the greater the sensitivity of QLIPP. + +* **Extinction 0 – 50**: Very poor. The alignment of the universal compensator may be off or the sample chamber may be highly birefringent. + +* **Extinction 50 - 100**: Okay extinction, could be okay for tissue imaging and strong anisotropic structures. Most likely not suitable for cell imaging + +* **Extinction 100 - 200**: Good Extinction. These are the typical values we get on our microscopes. + +* **Extinction 200+**: Excellent. Indicates a very well-aligned and clean light path and high sensitivity of the system. + +For a deeper discussion of the calibration procedure, swing, and the extinction ratio, see the [calibration guide](./calibration-guide.md). + +### Optional: Load Calibration +The **Load Calibration** button allows earlier calibrations to be reused. Select a *polarization_calibration.txt* file and Micro-Manager's presets will be updated with these settings. `waveorder` will also collect a few images to update the extinction ratio to reflect the current condition of the light path. Once this short acquisition has finished, the user can acquire data as normal. + +This feature is useful if Micro-Manager and/or `waveorder` crashes. If the sample and imaging setup haven't changed, it is safe to reuse a calibration. Otherwise, if the sample or the microscope changes, we recommend performing a new calibration. + +### Optional: Calculate Extinction +The **Calculate Extinction** button acquires a few images and recalculates the extinction value. + +This feature is useful for checking if a new region of your sample requires a recalibration. If the sample or background varies as you move around the sample, the extinction will drop and you should recalibrate and acquire background images as close to the area you will be imaging as possible. + +### Capture Background + +The **Capture Background** button will acquire several images under each of the calibrated polarization states, average them (we recommend 5), save them to specified **Background Folder Name** within the main **Directory**, then display the result in napari layers. + +![](./images/cap_bg.png) + +It is normal to see background retardance and orientation. We will use these background images to correct the data we collect our acquisitions of the sample. + +### Advanced Tab +The advanced tab gives the user a log output which can be useful for debugging purposes. There is a log level “debugging” which serves as a verbose output. Look here for any hints as to what may have gone wrong during calibration or acquisition. + +## Acquisition / Reconstruction Tab +This acquisition tab is designed to acquire and reconstruct single volumes of both phase and birefringence measurements to allow the user to test their calibration and background. We recommend this tab for quick testing and the Micro-Manager MDA acquisition for high-throughput data collection. + +### Acquire Buttons +![](./images/acquire_buttons.png) + +The **Retardance + Orientation**, **Phase From BF**, and **Retardance + Orientation + Phase** buttons set off Micro-Manager acquisitions that use the upcoming acquisition settings. After the acquisition is complete, these routines will set off `waveorder` reconstructions that estimate the named parameters. + +The **STOP** button will end the acquisition as soon as possible, though Micro-Manager acquisitions cannot always be interrupted. + +### Acquisition Settings +![](./images/acquisition_settings.png) + +The **Acquisition Mode** sets the target dimensions for the reconstruction. Perhaps surprisingly, all 2D reconstructions require 3D data except for **Retardance + Orientation** in **2D Acquisition Mode**. The following table summarizes the data that will be acquired when an acquisition button is pressed in **2D** and **3D** acquisition modes: + +| **Acquisition** \ Acquisition Mode | 2D mode | 3D mode | +| :--- | :--- | :--- | +| **Retardance + Orientation** | CYX data | CZYX data | +| **Phase From BF** | ZYX data | ZYX data | +| **Retardance + Orientation + Phase** | CZYX data | CZYX data | + +Unless a **Retardance + Orientation** reconstruction in **2D Acquisition Mode** is requested, `waveorder` uses Micro-Manager's z-stage to acquire 3D data. **Z Start**, **Z End**, and **Z Step** are stage settings for acquiring an image volume, relative to the current position of the stage. Values are in the stage's default units, typically in micrometers. + +For example, to image a 20 um thick cell the user would focus in the middle of the cell then choose + +* **Z Start** = -12 +* **Z End** = 12 +* **Z Step** = 0.25 + +For phase reconstruction, the stack should have about two depths-of-focus above and below the edges of the sample because the reconstruction algorithm uses defocus information to more accurately reconstruct phase. + +### General Reconstruction Settings +![](./images/general_reconstruction_settings.png) + +The **Save Directory** and **Save Name** are where the acquired data (`/_snap_/raw_data.zarr`) and reconstructions (`/_snap_/reconstruction.zarr`) will be saved. + +The **Background Correction** menu has several options (each with mouseover explanations): +* **None**: No background correction is performed. +* **Measured**: Corrects sample images with a background image acquired at an empty field of view, loaded from **Background Path**, by default the most recent background acquisition. +* **Estimated**: Estimates the sample background by fitting a 2D surface to the sample images. Works well when structures are spatially distributed across the field of view and a clear background is unavailable. +* **Measured + Estimated**: Applies a **Measured** background correction then an **Estimated** background correction. Use to remove residual background after the sample retardance is corrected with measured background. + +The remaining parameters are used by the reconstructions: + +* **GPU ID**: Not implemented +* **Wavelength (nm)**: illumination wavelength +* **Objective NA**: numerical aperture of the objective, typically found next to magnification +* **Condenser NA**: numerical aperture of the condenser +* **Camera Pixel Size (um)**: pixel size of the camera in micrometers (e.g. 6.5 μm) +* **RI of Obj. Media**: refractive index of the objective media, typical values are 1.0 (air), 1.3 (water), 1.473 (glycerol), or 1.512 (oil) +* **Magnification**: magnification of the objective +* **Rotate Orientation (90 deg)**: rotates "Orientation" reconstructions by +90 degrees clockwise and saves the result, most useful when a known-orientation sample is available +* **Flip Orientation**: flips "Orientation" reconstructions about napari's horizontal axis before saving the result +* **Invert Phase Contrast**: inverts the phase reconstruction's contrast by flipping the positive and negative directions of the stage during the reconstruction, and saves the result + +### Phase Reconstruction Settings +![](./images/phase_reconstruction_settings.png) + +These parameters are used only by phase reconstructions + +* **Z Padding**: The number of slices to pad on either end of the stack, necessary if the sample is not fully out of focus on either end of the stack +* **Regularizer**: Choose "Tikhonov", the "TV" regularizer is not implemented +* **Strength**: The Tikhonov regularization strength, too small/large will result in reconstructions that are too noisy/smooth + +The acquired data will then be displayed in napari layers. Note that phase reconstruction is more computationally expensive and may take several minutes depending on your system. + +Examples of acquiring 2D birefringence data (kidney tissue) with this snap method are below: + +![](./images/acq_finished.png) + +### Recreating reconstructions +`waveorder`'s GUI acquires data from Micro-Manager, reads the GUI to generate a configuration file, then uses a CLI to reconstruct the acquired data with the configuration file, which makes all reconstructions exactly reproducible via a CLI. See the terminal that started napari for a log of the exact CLI commands that will reproduce the results in the napari window. + +See the [reconstruction guide](./reconstruction-guide.md) for CLI usage instructions. + +## Reconstruction Tab +The **Reconstruction** tab is designed to reconstruct `birefriengence, phase, birefrignence with phase, and flurescenece` datasets that have been either acquired or coverted to `.zarr` store as well as acquisitions that are in progress. + +![](./images/reconstruction_data.png) + +The **Input Store** and **Output Directory** point to the input and output `.zarr` data locations. Once an Input Store is selected some metadata parameters can be viewed by hovering the cursor over the `info label` ⓘ. + +![](./images/reconstruction_models.png) + +A `Model` defines the reconstruction parameters. Multiple models can be run against a dataset with varying parameters. The model generates a configuration file `.yml`, then uses the CLI to reconstruct the data with the configuration file, which makes all reconstructions exactly reproducible via a CLI. +* **New**: Builds a model based on the `Checkbox` selection. +* **Load**: Allows a model to be imported using a previous reconstruction `.yml` file. +* **Clear**: This will clear all defined models. + +![](./images/reconstruction_birefriengence.png) + +Once a `New` model is built, it is pre-populated with default values that can be accessed by clicking on the ► icon and the parameters can be changed as required. +See the [reconstruction guide](./reconstruction-guide.md) for further information on the parameters. + +![](./images/reconstruction_queue.png) + +Once the **RUN** button is triggered, the reconstruction will proceed based on the defined model(s) concurrently. + +> [!CAUTION] +> Since the models run concurrently, it is the users responsibility to manage compute resources accordingly on a local or SLURM system. + +The `Reconstruction Queue` section will display the progress of the reconstruction in the form of text output. Once a reconstruction finishes the queue will self clear. Only in the case of any issues or error that are encountered the entry will remain. + +Once the reconstruction processing finishes, based on the option `Show after Reconstruction` the reconstructed images will show up in the napari viewer. + +## Visualizations +When an **Orientation*** layer appears at the top of the layers list, `waveorder` will automatically color it with an HSV color map that indicates the orientation. + +If the **Orientation*** layer has a matching **Retardance*** layer in the layer list, a **BirefringenceOverlay*** layer that only shows orientation colors in regions with large retardance is generated. This overlay is computed lazily (when the slider moves), and this computation can be turned off by hiding the layer (eyeball in the layer list). + +![](./images/overlay.png) + +If the **BirefringenceOverlay*** needs to be regenerated, an **Orientation*** layer can be dragged to the top of the layer list: +![](./images/no-overlay.png) + +The **Visualization** tab shows the mapping between HSV colors and the orientation, and the **Overlay Retardance Maximum** slider controls the mapping between retardance values and saturated colors in the overlay. +![](./images/overlay-demo.png) diff --git a/docs/reconstruction-guide.md b/docs/reconstruction-guide.md new file mode 100644 index 00000000..6751cc39 --- /dev/null +++ b/docs/reconstruction-guide.md @@ -0,0 +1,53 @@ +# Automating reconstructions + +`waveorder` uses a configuration-file-based command-line interface (CLI) to perform all reconstructions. + +## Preparing your data + +`waveorder` is compatible with OME-Zarr, a chunked next generation file format with an [open specification](https://ngff.openmicroscopy.org/0.4/). All acquisitions completed with the `waveorder` plugin will be automatically converted to `.zarr` format, and existing data can be converted using `iohub`'s `convert` utility. + +Inside a `waveorder` environment, convert a Micro-Manager TIFF sequence, OME-TIFF, or pycromanager NDTiff dataset with +``` +iohub convert ` + -i ./dataset/ ` + -o ./dataset.zarr +``` + +## How can I use `waveorder`'s CLI to perform reconstructions? +`waveorder`'s CLI is summarized in the following figure: + + +The main command `waveorder` command is composed of two subcommands: `compute-tf` and `apply-inv-tf`. + +A reconstruction can be performed with a single `reconstruct` call. For example: +``` +waveorder reconstruct ` + -i ./data.zarr/*/*/* ` + -c ./config.yml ` + -o ./reconstruction.zarr +``` +Equivalently, a reconstruction can be performed with a `compute-tf` call followed by an `apply-inv-tf` call. For example: +``` +waveorder compute-tf ` + -i ./data.zarr/0/0/0 ` + -c ./config.yml ` + -o ./tf.zarr + +waveorder apply-inv-tf + -i ./data.zarr/*/*/* ` + -t ./tf.zarr ` + -c ./config.yml ` + -o ./reconstruction.zarr +``` +Computing the transfer function is typically the most expensive part of the reconstruction, so saving a transfer function then applying it to many datasets can save time. + +## Input options + +The input `-i` flag always accepts a list of inputs, either explicitly e.g. `-i ./data.zarr/A/1/0 ./data.zarr/A/2/0` or through wildcards `-i ./data.zarr/*/*/*`. The positions in a high-content screening `.zarr` store are organized into `/row/col/fov` folders, so `./input.zarr/*/*/*` creates a list of all positions in a dataset. + +The `waveorder compute-tf` command accepts a list of inputs, but it only computes the transfer function for the first position in the list. The `apply-inv-tf` command accepts a list of inputs and applies the same transfer function to all of the inputs, which requires that all positions contain arrays with matching TCZYX shapes. + +## What types of reconstructions are supported? +See `/waveorder/examples/` for a list of example configuration files. + +WIP: This documentation will be expanded for each reconstruction type and parameter. diff --git a/docs/software-installation-guide.md b/docs/software-installation-guide.md new file mode 100644 index 00000000..cf8039e1 --- /dev/null +++ b/docs/software-installation-guide.md @@ -0,0 +1,28 @@ +# Software Installation Guide + +1. (Optional but recommended) install [`conda`](https://github.com/conda-forge/miniforge) and create a virtual environment + + ```sh + conda create -y -n waveorder python=3.10 + conda activate waveorder + ``` + +2. Install `waveorder`: + + ```sh + pip install waveorder + ``` + +3. To use the GUI: open `napari` with `waveorder`: + + ```sh + napari -w waveorder + ``` + +4. View command-line help by running + + ```sh + waveorder + ``` + +5. To acquire data via Micro-Manager`, follow the [microscope installation guide](./microscope-installation-guide.md). diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index f091ab36..00000000 --- a/examples/README.md +++ /dev/null @@ -1,10 +0,0 @@ -`waveorder` is undergoing a significant refactor, and this `examples/` folder serves as a good place to understand the current state of the repository. - -Some examples require `pip install waveorder[examples]` for `napari` and `jupyter`. Visit the [napari installation guide](https://napari.org/dev/tutorials/fundamentals/installation.html) if napari installation fails. - -| Folder | Requires | Description | -|------------------|----------------------------|-------------------------------------------------------------------------------------------------------| -| `models/` | `pip install waveorder[examples]` | Demonstrates the latest functionality of `waveorder` through simulations and reconstructions using various models. | -| `maintenance/` | `pip install waveorder` | Examples of computational imaging methods enabled by functionality of waveorder; scripts are maintained with automated tests. | -| `visuals/` | `pip install waveorder[examples]` | Visualizations of transfer functions and Green's tensors. | -| `documentation/` | `pip install waveorder`, complete datasets | Provides examples of real-data reconstructions; serves as documentation and is not actively maintained. | diff --git a/examples/documentation/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.pdf b/examples/documentation/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.pdf deleted file mode 100644 index c34cbe79..00000000 Binary files a/examples/documentation/PTI_experiment/PTI_Experiment_Recon3D_anisotropic_target_small.pdf and /dev/null differ diff --git a/examples/documentation/README.md b/examples/documentation/README.md deleted file mode 100644 index b3fbbcb9..00000000 --- a/examples/documentation/README.md +++ /dev/null @@ -1 +0,0 @@ -This folder of `documentation` examples consists of a set of data-dependent reconstructions in `/experimental_reconstructions/`. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index a9f2c685..3ec962b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,9 +2,6 @@ requires = ["setuptools", "wheel", "setuptools_scm"] build-backend = "setuptools.build_meta" -[tool.setuptools_scm] -write_to = "waveorder/_version.py" - [project] name = "waveorder" description = "Wave-optical simulations and deconvolution of optical properties" @@ -45,27 +42,70 @@ classifiers = [ "Operating System :: MacOS", ] dependencies = [ - "numpy>=1.24", + "click>=8.0.1", + "colorspacious>=1.1.2", + "importlib-metadata", + "iohub==0.1.0", + "ipywidgets>=7.5.1", "matplotlib>=3.1.1", - "scipy>=1.3.0", + "natsort>=7.1.1", + "numpy>=1.24", + "ome-zarr==0.8.3", # unpin when resolved: https://github.com/ome/napari-ome-zarr/issues/111 + "psutil", + "pyqtgraph>=0.12.3", + "pydantic==1.10.19", "pywavelets>=1.1.1", - "ipywidgets>=7.5.1", + "scipy>=1.3.0", + "submitit", "torch>=2.4.1", + "qtpy", + "wget>=3.2" ] dynamic = ["version"] [project.optional-dependencies] -dev = ["pytest", "pytest-cov", "black==25.1.0"] -examples = ["napari[all]", "jupyter"] +all = [ + "napari[pyqt6]", + "napari-ome-zarr>=0.3.2", # drag and drop convenience + "pycromanager==0.27.2", + "jupyter" +] +dev = [ + "black==25.1.0", + "hypothesis", + "pre-commit", + "pytest-cov", + "pytest-qt", + "pytest>=5.0.0", + "tox", +] [project.urls] Homepage = "https://github.com/mehta-lab/waveorder" Repository = "https://github.com/mehta-lab/waveorder" Issues = "https://github.com/mehta-lab/waveorder/issues" +[project.scripts] +waveorder = "waveorder.cli.main:cli" + +[project.entry-points."napari.manifest"] +waveorder = "waveorder:napari.yaml" + [tool.black] line-length = 79 [tool.isort] profile = "black" line_length = 79 + +[tool.setuptools_scm] +write_to = "waveorder/_version.py" + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +where = ["."] + +[tool.setuptools.package-data] +"*" = ["*.yaml"] diff --git a/tests/acq_tests/test_acq.py b/tests/acq_tests/test_acq.py new file mode 100644 index 00000000..1a506cc8 --- /dev/null +++ b/tests/acq_tests/test_acq.py @@ -0,0 +1,17 @@ +from unittest.mock import patch + +import numpy as np + +from waveorder.acq.acquisition_workers import _check_scale_mismatch + + +def test_check_scale_mismatch(): + warn_fn_path = "waveorder.acq.acquisition_workers.show_warning" + identity = np.array((1.0, 1.0, 1.0)) + with patch(warn_fn_path) as mock: + _check_scale_mismatch(identity, (1, 1, 1, 1, 1)) + mock.assert_not_called() + _check_scale_mismatch(identity, (1, 1, 1, 1, 1.001)) + mock.assert_not_called() + _check_scale_mismatch(identity, (1, 1, 1, 1, 1.1)) + mock.assert_called_once() diff --git a/tests/calibration_tests/test_calibration.py b/tests/calibration_tests/test_calibration.py new file mode 100644 index 00000000..627012c3 --- /dev/null +++ b/tests/calibration_tests/test_calibration.py @@ -0,0 +1,2 @@ +def test_calib_imports(): + from waveorder.calib import Calibration, Optimization diff --git a/tests/cli_tests/test_cli.py b/tests/cli_tests/test_cli.py new file mode 100644 index 00000000..feda9a5e --- /dev/null +++ b/tests/cli_tests/test_cli.py @@ -0,0 +1,11 @@ +from click.testing import CliRunner + +from waveorder.cli.main import cli + + +def test_main(): + runner = CliRunner() + result = runner.invoke(cli) + + assert result.exit_code == 0 + assert "Toolkit" in result.output diff --git a/tests/cli_tests/test_compute_tf.py b/tests/cli_tests/test_compute_tf.py new file mode 100644 index 00000000..7bd02a40 --- /dev/null +++ b/tests/cli_tests/test_compute_tf.py @@ -0,0 +1,136 @@ +import os + +from click.testing import CliRunner + +from waveorder.cli import settings +from waveorder.cli.compute_transfer_function import ( + generate_and_save_birefringence_transfer_function, + generate_and_save_fluorescence_transfer_function, + generate_and_save_phase_transfer_function, +) +from waveorder.cli.main import cli +from waveorder.io import utils + + +def test_compute_transfer(tmp_path, example_plate): + recon_settings = settings.ReconstructionSettings( + input_channel_names=[f"State{i}" for i in range(4)], + reconstruction_dimension=3, + birefringence=settings.BirefringenceSettings(), + phase=settings.PhaseSettings(), + ) + config_path = tmp_path / "test.yml" + utils.model_to_yaml(recon_settings, config_path) + + output_path = tmp_path / "output.zarr" + + plate_path, _ = example_plate + runner = CliRunner() + result = runner.invoke( + cli, + [ + "compute-tf", + "-i", + str(plate_path / "A" / "1" / "0"), + "-c", + str(config_path), + "-o", + str(output_path), + ], + ) + assert result.exit_code == 0 + + +def test_compute_transfer_blank_config(): + runner = CliRunner() + for option in ("-c ", "--config-path "): + cmd = "compute-tf " + option + result = runner.invoke(cli, cmd) + assert result.exit_code == 2 + assert "Error" in result.output + + +def test_compute_transfer_blank_output(): + runner = CliRunner() + for option in ("-o ", "--output-path "): + cmd = "compute-tf " + option + result = runner.invoke(cli, cmd) + assert result.exit_code == 2 + assert "Error" in result.output + + +def test_compute_transfer_output_file(tmp_path, example_plate): + recon_settings = settings.ReconstructionSettings( + input_channel_names=["BF"], + reconstruction_dimension=3, + phase=settings.PhaseSettings(), + ) + config_path = tmp_path / "test.yml" + utils.model_to_yaml(recon_settings, config_path) + + plate_path, _ = example_plate + runner = CliRunner() + for option in ("-o", "--output-dirpath"): + for output_folder in ["test1.zarr", "test2/test.zarr"]: + output_path = tmp_path.joinpath(output_folder) + result = runner.invoke( + cli, + [ + "compute-tf", + "-i", + str(plate_path / "A" / "1" / "0"), + "-c", + str(config_path), + str(option), + str(output_path), + ], + ) + assert result.exit_code == 0 + assert str(output_path) in result.output + assert output_path.exists() + + +def test_stokes_matrix_write(birefringence_phase_recon_settings_function): + settings, dataset = birefringence_phase_recon_settings_function + generate_and_save_birefringence_transfer_function(settings, dataset) + assert dataset["intensity_to_stokes_matrix"] + + +def test_absorption_and_phase_write( + birefringence_phase_recon_settings_function, +): + settings, dataset = birefringence_phase_recon_settings_function + generate_and_save_phase_transfer_function(settings, dataset, (3, 4, 5)) + assert dataset["real_potential_transfer_function"] + assert dataset["imaginary_potential_transfer_function"] + assert dataset["imaginary_potential_transfer_function"].shape == ( + 1, + 1, + 3, + 4, + 5, + ) + assert "absorption_transfer_function" not in dataset + assert "phase_transfer_function" not in dataset + + +def test_phase_3dim_write(birefringence_phase_recon_settings_function): + settings, dataset = birefringence_phase_recon_settings_function + settings.reconstruction_dimension = 2 + generate_and_save_phase_transfer_function(settings, dataset, (3, 4, 5)) + assert dataset["absorption_transfer_function"] + assert dataset["phase_transfer_function"] + assert dataset["phase_transfer_function"].shape == (1, 1, 3, 4, 5) + assert "real_potential_transfer_function" not in dataset + assert "imaginary_potential_transfer_function" not in dataset + + +def test_fluorescence_write(fluorescence_recon_settings_function): + settings, dataset = fluorescence_recon_settings_function + generate_and_save_fluorescence_transfer_function( + settings, dataset, (3, 4, 5) + ) + assert dataset["optical_transfer_function"] + assert dataset["optical_transfer_function"].shape == (1, 1, 3, 4, 5) + assert "real_potential_transfer_function" not in dataset + assert "imaginary_potential_transfer_function" not in dataset diff --git a/tests/cli_tests/test_reconstruct.py b/tests/cli_tests/test_reconstruct.py new file mode 100644 index 00000000..6d7796c6 --- /dev/null +++ b/tests/cli_tests/test_reconstruct.py @@ -0,0 +1,260 @@ +from pathlib import Path +from tempfile import TemporaryDirectory +from unittest.mock import patch + +import numpy as np +import pytest +from click.testing import CliRunner +from iohub.ngff import open_ome_zarr +from iohub.ngff_meta import TransformationMeta + +from waveorder.cli import settings +from waveorder.cli.apply_inverse_transfer_function import ( + apply_inverse_transfer_function_cli, +) +from waveorder.cli.main import cli +from waveorder.io import utils + +input_scale = [1, 2, 3, 4, 5] +# Setup options +birefringence_settings = settings.BirefringenceSettings( + transfer_function=settings.BirefringenceTransferFunctionSettings() +) + +# birefringence_option, time_indices, phase_option, dimension_option, time_length_target +all_options = [ + (birefringence_settings, [0, 3, 4], None, 2, 5), + (birefringence_settings, 0, settings.PhaseSettings(), 2, 5), + (birefringence_settings, [0, 1], None, 3, 5), + (birefringence_settings, "all", settings.PhaseSettings(), 3, 5), +] + + +@pytest.fixture(scope="session") +def tmp_input_path_zarr(): + tmp_path = TemporaryDirectory() + yield Path(tmp_path.name) / "input.zarr", Path(tmp_path.name) / "test.yml" + tmp_path.cleanup() + + +def test_reconstruct(tmp_input_path_zarr): + input_path, tmp_config_yml = tmp_input_path_zarr + # Generate input "dataset" + channel_names = [f"State{x}" for x in range(4)] + dataset = open_ome_zarr( + input_path, + layout="hcs", + mode="w", + channel_names=channel_names, + ) + + position = dataset.create_position("0", "0", "0") + position.create_zeros( + "0", + (5, 4, 4, 5, 6), + dtype=np.uint16, + transform=[TransformationMeta(type="scale", scale=input_scale)], + ) + + for i, ( + birefringence_option, + time_indices, + phase_option, + dimension_option, + time_length_target, + ) in enumerate(all_options): + if (birefringence_option is None) and (phase_option is None): + continue + + # Generate recon settings + recon_settings = settings.ReconstructionSettings( + input_channel_names=channel_names, + time_indices=time_indices, + reconstruction_dimension=dimension_option, + birefringence=birefringence_option, + phase=phase_option, + ) + config_path = tmp_config_yml.with_name(f"{i}.yml") + utils.model_to_yaml(recon_settings, config_path) + + # Run CLI + runner = CliRunner() + tf_path = input_path.with_name(f"tf_{i}.zarr") + runner.invoke( + cli, + [ + "compute-tf", + "-i", + str(input_path / "0" / "0" / "0"), + "-c", + str(config_path), + "-o", + str(tf_path), + ], + catch_exceptions=False, + ) + assert tf_path.exists() + + +def test_append_channel_reconstruction(tmp_input_path_zarr): + input_path, tmp_config_yml = tmp_input_path_zarr + output_path = input_path.with_name(f"output.zarr") + + # Generate input "dataset" + channel_names = [f"State{x}" for x in range(4)] + ["GFP"] + dataset = open_ome_zarr( + input_path, + layout="hcs", + mode="w", + channel_names=channel_names, + ) + position = dataset.create_position("0", "0", "0") + position.create_zeros( + "0", + (5, 5, 4, 5, 6), + dtype=np.uint16, + transform=[TransformationMeta(type="scale", scale=input_scale)], + ) + + # Generate recon settings + biref_settings = settings.ReconstructionSettings( + input_channel_names=[f"State{x}" for x in range(4)], + time_indices="all", + reconstruction_dimension=3, + birefringence=settings.BirefringenceSettings(), + phase=None, + fluorescence=None, + ) + fluor_settings = settings.ReconstructionSettings( + input_channel_names=["GFP"], + time_indices="all", + reconstruction_dimension=3, + birefringence=None, + phase=None, + fluorescence=settings.FluorescenceSettings(), + ) + biref_config_path = tmp_config_yml.with_name(f"biref.yml") + fluor_config_path = tmp_config_yml.with_name(f"fluor.yml") + + utils.model_to_yaml(biref_settings, biref_config_path) + utils.model_to_yaml(fluor_settings, fluor_config_path) + + # Apply birefringence reconstruction + runner = CliRunner() + runner.invoke( + cli, + [ + "reconstruct", + "-i", + str(input_path / "0" / "0" / "0"), + "-c", + str(biref_config_path), + "-o", + str(output_path), + ], + catch_exceptions=False, + ) + assert output_path.exists() + with open_ome_zarr(output_path) as dataset: + assert dataset["0/0/0"]["0"].shape[1] == 4 + + # Append fluoresncence reconstruction + runner.invoke( + cli, + [ + "reconstruct", + "-i", + str(input_path / "0" / "0" / "0"), + "-c", + str(fluor_config_path), + "-o", + str(output_path), + ], + catch_exceptions=False, + ) + assert output_path.exists() + with open_ome_zarr(output_path) as dataset: + assert dataset["0/0/0"]["0"].shape[1] == 5 + assert dataset.channel_names[-1] == "GFP_Density3D" + assert dataset.channel_names[-2] == "Pol" + + +def test_cli_apply_inv_tf_mock(tmp_input_path_zarr): + tmp_input_zarr, tmp_config_yml = tmp_input_path_zarr + tmp_config_yml = tmp_config_yml.with_name("0.yml").resolve() + tf_path = tmp_input_zarr.with_name("tf_0.zarr").resolve() + input_path = (tmp_input_zarr / "0" / "0" / "0").resolve() + result_path = tmp_input_zarr.with_name("result.zarr").resolve() + + assert tmp_config_yml.exists() + assert tf_path.exists() + assert input_path.exists() + assert not result_path.exists() + + runner = CliRunner() + with patch( + "waveorder.cli.apply_inverse_transfer_function.apply_inverse_transfer_function_cli" + ) as mock: + cmd = [ + "apply-inv-tf", + "-i", + str(input_path), + "-t", + str(tf_path), + "-c", + str(tmp_config_yml), + "-o", + str(result_path), + "-j", + str(1), + ] + result_inv = runner.invoke( + cli, + cmd, + catch_exceptions=False, + ) + mock.assert_called_with( + [input_path], + Path(tf_path), + Path(tmp_config_yml), + Path(result_path), + 1, + 1, + ) + assert result_inv.exit_code == 0 + + +def test_cli_apply_inv_tf_output(tmp_input_path_zarr, capsys): + tmp_input_zarr, tmp_config_yml = tmp_input_path_zarr + input_path = tmp_input_zarr / "0" / "0" / "0" + + for i, ( + birefringence_option, + time_indices, + phase_option, + dimension_option, + time_length_target, + ) in enumerate(all_options): + if (birefringence_option is None) and (phase_option is None): + continue + + result_path = tmp_input_zarr.with_name(f"result{i}.zarr").resolve() + + tf_path = tmp_input_zarr.with_name(f"tf_{i}.zarr") + tmp_config_yml = tmp_config_yml.with_name(f"{i}.yml") + + # # Check output + apply_inverse_transfer_function_cli( + [input_path], tf_path, tmp_config_yml, result_path, 1 + ) + + result_dataset = open_ome_zarr(str(result_path / "0" / "0" / "0")) + assert result_dataset["0"].shape[0] == time_length_target + assert result_dataset["0"].shape[3:] == (5, 6) + + assert result_path.exists() + captured = capsys.readouterr() + assert "submitted" in captured.out + + # Check scale transformations pass through + assert input_scale == result_dataset.scale diff --git a/tests/cli_tests/test_settings.py b/tests/cli_tests/test_settings.py new file mode 100644 index 00000000..df4f8d82 --- /dev/null +++ b/tests/cli_tests/test_settings.py @@ -0,0 +1,123 @@ +import pytest +from pydantic.v1 import ValidationError + +from waveorder.cli import settings +from waveorder.io import utils + + +def test_reconstruction_settings(): + # Test defaults + s = settings.ReconstructionSettings( + birefringence=settings.BirefringenceSettings() + ) + assert len(s.input_channel_names) == 4 + assert s.birefringence.apply_inverse.background_path == "" + assert s.phase == None + assert s.fluorescence == None + + # Test logic that "fluorescence" or ("phase" and/or "birefringence") + s = settings.ReconstructionSettings( + input_channel_names=["GFP"], + birefringence=None, + phase=None, + fluorescence=settings.FluorescenceSettings(), + ) + + assert s.fluorescence.apply_inverse.reconstruction_algorithm == "Tikhonov" + + # Not allowed to supply both phase/biref and fluorescence + with pytest.raises(ValidationError): + settings.ReconstructionSettings( + phase=settings.PhaseSettings(), + fluorescence=settings.FluorescenceSettings(), + ) + + # Test incorrect settings + with pytest.raises(ValidationError): + settings.ReconstructionSettings(input_channel_names=3) + + with pytest.raises(ValidationError): + settings.ReconstructionSettings(reconstruction_dimension=1) + + # Test typo + with pytest.raises(ValidationError): + settings.ReconstructionSettings( + flurescence=settings.FluorescenceSettings() + ) + + +def test_biref_tf_settings(): + settings.BirefringenceTransferFunctionSettings(swing=0.1) + + with pytest.raises(ValidationError): + settings.BirefringenceTransferFunctionSettings(swing=1.1) + + with pytest.raises(ValidationError): + settings.BirefringenceTransferFunctionSettings(scheme="Test") + + +def test_phase_tf_settings(): + settings.PhaseTransferFunctionSettings( + index_of_refraction_media=1.0, numerical_aperture_detection=0.8 + ) + + with pytest.raises(ValidationError): + settings.PhaseTransferFunctionSettings( + index_of_refraction_media=1.0, numerical_aperture_detection=1.1 + ) + + # Inconsistent units + with pytest.raises(Warning): + settings.PhaseTransferFunctionSettings( + yx_pixel_size=650, z_pixel_size=0.3 + ) + + # Extra parameter + with pytest.raises(ValidationError): + settings.PhaseTransferFunctionSettings(zyx_pixel_size=650) + + +def test_fluor_tf_settings(): + settings.FluorescenceTransferFunctionSettings( + wavelength_emission=0.500, yx_pixel_size=0.2 + ) + + with pytest.raises(Warning): + settings.FluorescenceTransferFunctionSettings( + wavelength_emission=0.500, yx_pixel_size=2000 + ) + + +def test_generate_example_settings(): + example_path = "./docs/examples/configs" + + s0 = settings.ReconstructionSettings( + birefringence=settings.BirefringenceSettings(), + phase=settings.PhaseSettings(), + ) + s1 = settings.ReconstructionSettings( + input_channel_names=["BF"], + phase=settings.PhaseSettings(), + ) + s2 = settings.ReconstructionSettings( + birefringence=settings.BirefringenceSettings(), + ) + s3 = settings.ReconstructionSettings( + input_channel_names=["GFP"], + fluorescence=settings.FluorescenceSettings(), + ) + file_names = [ + "birefringence-and-phase.yml", + "phase.yml", + "birefringence.yml", + "fluorescence.yml", + ] + settings_list = [s0, s1, s2, s3] + + # Save to examples folder and test roundtrip + for file_name, settings_obj in zip(file_names, settings_list): + utils.model_to_yaml(settings_obj, example_path + file_name) + settings_roundtrip = utils.yaml_to_model( + example_path + file_name, settings.ReconstructionSettings + ) + assert settings_obj.dict() == settings_roundtrip.dict() diff --git a/tests/conftest.py b/tests/conftest.py index c6101034..da9cb9cb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,13 +1,77 @@ +import os + +import numpy as np +import pytest import torch +from iohub.ngff import open_ome_zarr + +from waveorder.cli import settings def device_params(): devices = ["cpu"] if torch.cuda.is_available(): devices.append("cuda") - if torch.backends.mps.is_available(): + if ( + torch.backends.mps.is_available() + and os.getenv("GITHUB_ACTIONS") == "false" + ): devices.append("mps") return "device", devices _DEVICE = device_params() + + +@pytest.fixture(scope="function") +def example_plate(tmp_path): + plate_path = tmp_path / "input.zarr" + + position_list = ( + ("A", "1", "0"), + ("B", "1", "0"), + ("B", "2", "0"), + ) + + plate_dataset = open_ome_zarr( + plate_path, + layout="hcs", + mode="w", + channel_names=[f"State{i}" for i in range(4)] + ["BF"], + ) + + for row, col, fov in position_list: + position = plate_dataset.create_position(row, col, fov) + position.create_zeros("0", (2, 5, 4, 5, 6), dtype=np.uint16) + + yield plate_path, plate_dataset + + +@pytest.fixture(scope="function") +def birefringence_phase_recon_settings_function(tmp_path): + recon_settings = settings.ReconstructionSettings( + birefringence=settings.BirefringenceSettings(), + phase=settings.PhaseSettings(), + ) + dataset = open_ome_zarr( + tmp_path, + layout="fov", + mode="w", + channel_names=[f"State{i}" for i in range(4)], + ) + yield recon_settings, dataset + + +@pytest.fixture(scope="function") +def fluorescence_recon_settings_function(tmp_path): + recon_settings = settings.ReconstructionSettings( + input_channel_names=["GFP"], + fluorescence=settings.FluorescenceSettings(), + ) + dataset = open_ome_zarr( + tmp_path, + layout="fov", + mode="w", + channel_names=[f"State{i}" for i in range(4)], + ) + yield recon_settings, dataset diff --git a/tests/mmcore_tests/test_core_func.py b/tests/mmcore_tests/test_core_func.py new file mode 100644 index 00000000..c9ac39a5 --- /dev/null +++ b/tests/mmcore_tests/test_core_func.py @@ -0,0 +1,202 @@ +from typing import Callable, Tuple +from unittest.mock import MagicMock, Mock, call + +import numpy as np +import pytest +from numpy import ndarray + +# tested components +from waveorder.io.core_functions import * + +# TODO: move these to fixture or generate with Hypothesis +# dynamic range +TIFF_I_MAX = 2**16 +# image size +IMAGE_WIDTH = np.random.randint(1, 2**12) +IMAGE_HEIGHT = np.random.randint(1, 2**12) +PIXEL_COUNT = IMAGE_HEIGHT * IMAGE_WIDTH +# serialized image from the pycromanager bridge +SERIAL_IMAGE = np.random.randint(0, TIFF_I_MAX, size=(PIXEL_COUNT,)) +# LC device parameters +# TODO: parameterize this example +DEVICE_PROPERTY = ("deviceName", "propertyName") +CONFIG_GROUP = "configGroup" +CONFIG_NAME = "State0" +# LC state in native units +LC_STATE = np.random.rand(1)[0] * 10 + + +def _get_mmcore_mock(): + """Creates a mock for the `pycromanager.Core` object. + + Returns + ------- + MagicMock + MMCore mock object + """ + mmcore_mock_config = { + "getImage": Mock(return_value=SERIAL_IMAGE), + "getProperty": Mock(return_value=str(LC_STATE)), + } + return MagicMock(**mmcore_mock_config) + + +def _get_snap_manager_mock(): + """Creates a mock for the pycromanager remote Snap Live Window Manager object. + + Returns + ------- + MagicMock + Mock object for `org.micromanager.internal.SnapLiveManager` via pycromanager + """ + sm = MagicMock() + get_snap_mocks = { + "getHeight": Mock(return_value=IMAGE_HEIGHT), + "getWidth": Mock(return_value=IMAGE_WIDTH), + "getRawPixels": Mock(return_value=SERIAL_IMAGE), + } + # TODO: break down these JAVA call stack chains for maintainability + sm.getDisplay.return_value.getDisplayedImages.return_value.get = Mock( + # return image object mock with H, W, and pixel values + return_value=Mock(**get_snap_mocks) + ) + sm.getDisplay.return_value.getImagePlus.return_value.getStatistics = Mock( + # return statistics object mock with the attribute "umean" + return_value=Mock(umean=SERIAL_IMAGE.mean()) + ) + return sm + + +def _is_int(data: ndarray): + """Check if the data type is integer. + + Parameters + ---------- + data + + Returns + ------- + bool + True if the data type is any integer type. + """ + return np.issubdtype(data.dtype, np.integer) + + +def _get_examples(low: float, high: float): + """Generate 4 valid and 4 invalid floating numbers for closed interval [low, high]. + + Parameters + ---------- + low : float + high : float + + Returns + ------- + tuple(1d-array, 1d-array) + valid and invalid values + """ + epsilon = np.finfo(float).eps + samples = np.random.rand(4) + valid_values = samples * (high - low) + low + epsilon + invalid_values = np.array( + [ + low - samples[0], + low - samples[1], + high + samples[2], + high + samples[3], + ] + ) + return valid_values, invalid_values + + +def test_suspend_live_sm(): + """Test `waveorder.io.core_functions.suspend_live_sm`.""" + snap_manager = _get_snap_manager_mock() + with suspend_live_sm(snap_manager) as sm: + sm.setSuspended.assert_called_once_with(True) + snap_manager.setSuspended.assert_called_with(False) + + +def test_snap_and_get_image(): + """Test `waveorder.io.core_functions.snap_and_get_image`.""" + sm = _get_snap_manager_mock() + image = snap_and_get_image(sm) + assert _is_int(image), image.dtype + assert image.shape == (IMAGE_HEIGHT, IMAGE_WIDTH), image.shape + + +def test_snap_and_average(): + """Test `waveorder.io.core_functions.snap_and_average`.""" + sm = _get_snap_manager_mock() + mean = snap_and_average(sm) + np.testing.assert_almost_equal(mean, SERIAL_IMAGE.mean()) + + +def _set_lc_test( + tested_func: Callable[[object, Tuple[str, str], float], None], + value_range: Tuple[float, float], +): + mmc = _get_mmcore_mock() + valid_values, invalid_values = _get_examples(*value_range) + for value in valid_values: + tested_func(mmc, DEVICE_PROPERTY, value) + mmc.setProperty.assert_called_with( + DEVICE_PROPERTY[0], DEVICE_PROPERTY[1], str(value) + ) + for value in invalid_values: + with pytest.raises(ValueError): + tested_func(mmc, DEVICE_PROPERTY, value) + + +def test_set_lc_waves(): + """Test `waveorder.io.core_functions.set_lc_waves`.""" + _set_lc_test(set_lc_waves, (0.001, 1.6)) + + +def test_set_lc_voltage(): + """Test `waveorder.io.core_functions.set_lc_voltage`.""" + _set_lc_test(set_lc_voltage, (0.0, 20.0)) + + +def test_set_lc_daq(): + """Test `waveorder.io.core_functions.set_lc_daq`.""" + _set_lc_test(set_lc_daq, (0.0, 5.0)) + + +def test_get_lc(): + """Test `waveorder.io.core_functions.get_lc`.""" + mmc = _get_mmcore_mock() + state = get_lc(mmc, DEVICE_PROPERTY) + mmc.getProperty.assert_called_once_with(*DEVICE_PROPERTY) + np.testing.assert_almost_equal(state, LC_STATE) + + +def test_define_meadowlark_state(): + """Test `waveorder.io.core_functions.define_meadowlark_state`.""" + mmc = _get_mmcore_mock() + define_meadowlark_state(mmc, DEVICE_PROPERTY) + mmc.setProperty.assert_called_once_with(*DEVICE_PROPERTY, 0) + mmc.waitForDevice.assert_called_once_with(DEVICE_PROPERTY[0]) + + +def test_define_config_state(): + """Test `waveorder.io.core_functions.define_config_state`.""" + mmc = _get_mmcore_mock() + device_properties = [DEVICE_PROPERTY] * 4 + values = _get_examples(0, 10)[0].tolist() + define_config_state( + mmc, CONFIG_GROUP, CONFIG_NAME, device_properties, values + ) + expected_calls = [ + call(CONFIG_GROUP, CONFIG_NAME, *d, str(v)) + for d, v in zip(device_properties, values) + ] + got_calls = mmc.defineConfig.call_args_list + assert got_calls == expected_calls, got_calls + + +def test_set_lc_state(): + """Test `waveorder.io.core_functions.set_lc_state`.""" + mmc = _get_mmcore_mock() + set_lc_state(mmc, CONFIG_GROUP, CONFIG_NAME) + mmc.setConfig.assert_called_once_with(CONFIG_GROUP, CONFIG_NAME) diff --git a/tests/test_examples.py b/tests/test_examples.py index 3aeb868a..afc937c1 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -7,8 +7,10 @@ def _run_scripts(scripts): for script in scripts: - path = os.path.join(os.getcwd(), "examples/maintenance/", script) - completed_process = subprocess.run(["python", path]) + path = os.path.join(os.getcwd(), "docs/examples/maintenance/", script) + completed_process = subprocess.run( + [sys.executable, path], env=os.environ + ) assert completed_process.returncode == 0 @@ -17,6 +19,9 @@ def _run_scripts(scripts): # close them to make the tests pass. # TODO: see if we can make these run locally w/o showing matplotlib # @patch("matplotlib.pyplot.show") +@pytest.mark.skipif( + os.getenv("GITHUB_ACTIONS") == "true", reason="Skip on GitHub Actions" +) def test_qlipp_examples(): scripts = [ "QLIPP_simulation/2D_QLIPP_forward.py", @@ -25,6 +30,9 @@ def test_qlipp_examples(): _run_scripts(scripts) +@pytest.mark.skipif( + os.getenv("GITHUB_ACTIONS") == "true", reason="Skip on GitHub Actions" +) def test_pti_examples(): scripts = [ "PTI_simulation/PTI_Simulation_Forward_2D3D.py", @@ -34,18 +42,24 @@ def test_pti_examples(): _run_scripts(scripts) -@pytest.mark.skipif("napari" not in sys.modules, reason="requires napari") +@pytest.mark.skipif( + os.getenv("GITHUB_ACTIONS") == "true", + reason="Skip on GitHub Actions, requires napari", +) def test_phase_examples(): scripts = [ "isotropic_thin_3d.py", "phase_thick_3d.py", - "inplane_anisotropic_thin_pol3d.py", + "inplane_oriented_thick_pol3d.py", ] for script in scripts: - path = os.path.join(os.getcwd(), "examples/models/", script) + path = os.path.join(os.getcwd(), "docs/examples/models/", script) # examples needs two s so send input="e\ne" completed_process = subprocess.run( - ["python", path], input="e\ne", encoding="ascii" + [sys.executable, path], + input="e\ne", + encoding="ascii", + env=os.environ, ) assert completed_process.returncode == 0 diff --git a/tests/util_tests/test_create_empty.py b/tests/util_tests/test_create_empty.py new file mode 100644 index 00000000..506cb280 --- /dev/null +++ b/tests/util_tests/test_create_empty.py @@ -0,0 +1,60 @@ +from pathlib import Path + +import numpy as np +from iohub.ngff import Position, open_ome_zarr + +from waveorder.cli.utils import create_empty_hcs_zarr + + +def test_create_empty_hcs_zarr(tmp_path): + store_path = tmp_path / Path("test_store.zarr") + position_keys: list[tuple[str]] = [ + ("A", "0", "3"), + ("B", "10", "4"), + ] + shape = (1, 2, 1, 1024, 1024) + chunks = (1, 1, 1, 256, 256) + scale = (1, 1, 1, 0.5, 0.5) + channel_names = ["Channel1", "Channel2"] + dtype = np.uint16 + plate_metadata = {"test": 2} + + create_empty_hcs_zarr( + store_path, + position_keys, + shape, + chunks, + scale, + channel_names, + dtype, + plate_metadata, + ) + + # Verify existence of positions and channels + with open_ome_zarr(store_path, mode="r") as plate: + assert plate.zattrs["test"] == 2 + for position_key in position_keys: + position = plate["/".join(position_key)] + assert isinstance(position, Position) + assert position[0].shape == shape + + # Repeat creation should not fail + more_channel_names = ["Channel3"] + create_empty_hcs_zarr( + store_path, + position_keys, + shape, + chunks, + scale, + more_channel_names, + dtype, + ) + + # Verify existence of appended channel names + channel_names += more_channel_names + for position_key in position_keys: + position_path = store_path + for element in position_key: + position_path /= element + with open_ome_zarr(position_path, mode="r") as position: + assert position.channel_names == channel_names diff --git a/tests/util_tests/test_io.py b/tests/util_tests/test_io.py new file mode 100644 index 00000000..ee645add --- /dev/null +++ b/tests/util_tests/test_io.py @@ -0,0 +1,72 @@ +import os +from pathlib import Path + +import pytest +import yaml + +from waveorder.cli import settings +from waveorder.io.utils import add_index_to_path, model_to_yaml + + +@pytest.fixture +def model(): + # Create a sample model object + return settings.ReconstructionSettings( + birefringence=settings.BirefringenceSettings() + ) + + +@pytest.fixture +def yaml_path(tmpdir): + # Create a temporary YAML file path + return os.path.join(tmpdir, "model.yaml") + + +def test_model_to_yaml(model, yaml_path): + # Call the function under test + model_to_yaml(model, yaml_path) + + # Check if the YAML file is created + assert os.path.exists(yaml_path) + + # Load the YAML file and verify its contents + with open(yaml_path, "r") as f: + yaml_data = yaml.safe_load(f) + + # Check if the YAML data is a dictionary + assert isinstance(yaml_data, dict) + + # Check YAML data + assert "input_channel_names" in yaml_data + + +def test_model_to_yaml_invalid_model(): + # Create an object that does not have a 'dict()' method + invalid_model = "not a model object" + + # Call the function and expect a TypeError + with pytest.raises(TypeError): + model_to_yaml(invalid_model, "model.yaml") + + +def test_add_index_to_path(tmp_path): + test_cases = [ + ("output.txt", "output_0.txt"), + ("output.txt", "output_1.txt"), + ("output.txt", "output_2.txt"), + ("output.png", "output_0.png"), + ("output.png", "output_1.png"), + ("output.png", "output_2.png"), + ("folder", "folder_0"), + ("folder", "folder_1"), + ("folder", "folder_2"), + ] + + for input_path_str, expected_output_str in test_cases: + input_path = tmp_path / Path(input_path_str) + expected_output = tmp_path / Path(expected_output_str) + + output_path = add_index_to_path(input_path) + assert output_path == expected_output + + output_path.touch() # Create a file/folder at the expected output path for testing diff --git a/tests/util_tests/test_overlays.py b/tests/util_tests/test_overlays.py new file mode 100644 index 00000000..dff1a623 --- /dev/null +++ b/tests/util_tests/test_overlays.py @@ -0,0 +1,72 @@ +import hypothesis.extra.numpy as npst +import hypothesis.strategies as st +import numpy as np +from hypothesis import given +from numpy.testing import assert_equal +from numpy.typing import NDArray + +from waveorder.io.visualization import ret_ori_overlay, ret_ori_phase_overlay + + +@st.composite +def _birefringence(draw): + shape = (2,) + tuple( + draw(st.lists(st.integers(1, 16), min_size=2, max_size=4)) + ) + dtype = draw(npst.floating_dtypes(sizes=(32, 64))) + bit_width = dtype.itemsize * 8 + retardance = draw( + npst.arrays( + dtype, + shape=shape, + elements=st.floats( + min_value=1.0000000168623835e-16, + max_value=50, + exclude_min=True, + width=bit_width, + ), + ) + ) + orientation = draw( + npst.arrays( + dtype, + shape=shape, + elements=st.floats( + min_value=0, + max_value=dtype.type(np.pi), + exclude_min=True, + exclude_max=True, + width=bit_width, + ), + ) + ) + + return retardance, orientation + + +@given(briefringence=_birefringence(), jch=st.booleans()) +def test_ret_ori_overlay(briefringence: tuple[NDArray, NDArray], jch: bool): + """Test waveorder.io.utils.ret_ori_overlay()""" + retardance, orientation = briefringence + retardance_copy = retardance.copy() + orientation_copy = orientation.copy() + cmap = "JCh" if jch else "HSV" + overlay = ret_ori_overlay( + np.stack((retardance, orientation)), + ret_max=np.percentile(retardance, 99), + cmap=cmap, + ) + + overlay2 = ret_ori_phase_overlay( + np.stack((retardance, orientation, retardance)), # dummy phase + ) + + # check that the function did not mutate input data + assert_equal(retardance, retardance_copy) + assert_equal(orientation, orientation_copy) + # check output properties + # output contains NaN, pending further investigation + # assert overlay.min() >= 0 + # assert overlay.max() <= 1 + assert overlay.shape == (3,) + retardance.shape + assert overlay2.shape == (3,) + retardance.shape diff --git a/tests/widget_tests/test_dock_widget.py b/tests/widget_tests/test_dock_widget.py new file mode 100644 index 00000000..f11e8c40 --- /dev/null +++ b/tests/widget_tests/test_dock_widget.py @@ -0,0 +1,9 @@ +from napari.viewer import ViewerModel + +from waveorder.plugin.main_widget import MainWidget + + +def test_dock_widget(make_napari_viewer): + viewer: ViewerModel = make_napari_viewer() + viewer.window.add_dock_widget(MainWidget(viewer)) + assert "waveorder" in list(viewer._window._dock_widgets.keys())[0] diff --git a/tests/widget_tests/test_sample_contributions.py b/tests/widget_tests/test_sample_contributions.py new file mode 100644 index 00000000..66e6bbfe --- /dev/null +++ b/tests/widget_tests/test_sample_contributions.py @@ -0,0 +1,8 @@ +from waveorder.scripts.samples import download_and_unzip + + +def test_download_and_unzip(): + p1, p2 = download_and_unzip("target") + + assert p1.exists() + assert p2.exists() diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..6e2d6793 --- /dev/null +++ b/tox.ini @@ -0,0 +1,33 @@ +# Modified from from cookiecutter-napari-plugin +# For more information about tox, see https://tox.readthedocs.io/en/latest/ +[tox] +envlist = py{310,311}-{linux,macos,windows} +isolated_build=true + +[gh-actions] +python = + 3.10: py310 + 3.11: py311 + +[gh-actions:env] +PLATFORM = + ubuntu-latest: linux + macos-latest: macos + windows-latest: windows + +[testenv] +platform = + macos: darwin + linux: linux + windows: win32 +passenv = + CI + GITHUB_ACTIONS + DISPLAY + XAUTHORITY + NUMPY_EXPERIMENTAL_ARRAY_FUNCTION + PYVISTA_OFF_SCREEN +extras = + dev + all +commands = pytest -v --color=yes --cov=waveorder --cov-report=xml diff --git a/waveorder/acq/__init__.py b/waveorder/acq/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/waveorder/acq/acq_functions.py b/waveorder/acq/acq_functions.py new file mode 100644 index 00000000..2ba1f8ed --- /dev/null +++ b/waveorder/acq/acq_functions.py @@ -0,0 +1,171 @@ +import glob +import json +import os +import time + +import numpy as np +from iohub import read_micromanager + +try: + from pycromanager import Studio +except: + pass + + +def generate_acq_settings( + mm, + channel_group, + channels=None, + zstart=None, + zend=None, + zstep=None, + save_dir=None, + prefix=None, + keep_shutter_open_channels=False, + keep_shutter_open_slices=False, +): + """ + This function generates a json file specific to the Micro-Manager SequenceSettings. + It has default parameters for a multi-channels z-stack acquisition but does not yet + support multi-position or multi-frame acquisitions. + + This also has default values for QLIPP Acquisition. Can be used as a framework for other types + of acquisitions + + Parameters + ---------- + mm: (object) MM Studio API object + scheme: (str) '4-State' or '5-State' + zstart: (float) relative starting position for the z-stack + zend: (float) relative ending position for the z-stack + zstep: (float) step size for the z-stack + save_dir: (str) path to save directory + prefix: (str) name to save the data under + + Returns + ------- + settings: (json) json dictionary conforming to MM SequenceSettings + """ + + # Get API Objects + am = mm.getAcquisitionManager() + ss = am.getAcquisitionSettings() + app = mm.app() + + # Get current SequenceSettings to modify + original_ss = ss.toJSONStream(ss) + original_json = json.loads(original_ss).copy() + + if zstart: + do_z = True + else: + do_z = False + + # Structure of the channel properties + channel_dict = { + "channelGroup": channel_group, + "config": None, + "exposure": None, + "zOffset": 0, + "doZStack": do_z, + "color": {"value": -16747854, "falpha": 0.0}, + "skipFactorFrame": 0, + "useChannel": True if channels else False, + } + + channel_list = None + if channels: + # Append all the channels with their current exposure settings + channel_list = [] + for chan in channels: + # todo: think about how to deal with missing exposure + exposure = app.getChannelExposureTime( + channel_group, chan, 10 + ) # sets exposure to 10 if not found + channel = channel_dict.copy() + channel["config"] = chan + channel["exposure"] = exposure + + channel_list.append(channel) + + # set other parameters + original_json["numFrames"] = 1 + original_json["intervalMs"] = 0 + original_json["relativeZSlice"] = True + original_json["slicesFirst"] = True + original_json["timeFirst"] = False + original_json["keepShutterOpenSlices"] = keep_shutter_open_slices + original_json["keepShutterOpenChannels"] = keep_shutter_open_channels + original_json["useAutofocus"] = False + original_json["saveMode"] = "MULTIPAGE_TIFF" + original_json["save"] = True if save_dir else False + original_json["root"] = save_dir if save_dir else "" + original_json["prefix"] = prefix if prefix else "Untitled" + original_json["channels"] = channel_list + original_json["zReference"] = 0.0 + original_json["channelGroup"] = channel_group + original_json["usePositionList"] = False + original_json["shouldDisplayImages"] = True + original_json["useSlices"] = do_z + original_json["useFrames"] = False + original_json["useChannels"] = True if channels else False + original_json["slices"] = ( + list(np.arange(float(zstart), float(zend + zstep), float(zstep))) + if zstart + else [] + ) + original_json["sliceZStepUm"] = zstep + original_json["sliceZBottomUm"] = zstart + original_json["sliceZTopUm"] = zend + original_json["acqOrderMode"] = 1 + + return original_json + + +def acquire_from_settings( + mm: Studio, + settings: dict, + grab_images: bool = True, + restore_settings: bool = True, +): + """Function to acquire an MDA acquisition with the native MM MDA Engine. + Assumes single position acquisition. + + Parameters + ---------- + mm : Studio + settings : dict + JSON dictionary conforming to MM SequenceSettings + grab_images : bool, optional + return the acquired array, by default True + restore_settings : bool, optional + restore MDA settings before acquisition, by default True + + Returns + ------- + NDArray + acquired images + """ + am = mm.getAcquisitionManager() + ss = am.getAcquisitionSettings() + + ss_new = ss.fromJSONStream(json.dumps(settings)) + am.runAcquisitionWithSettings(ss_new, True) + + time.sleep(3) + + if restore_settings: + am.setAcquisitionSettings(ss) + + # TODO: speed improvements in reading the data with pycromanager acquisition? + if grab_images: + # get the most recent acquisition if multiple + path = os.path.join(settings["root"], settings["prefix"]) + files = glob.glob(path + "*") + index = max([int(x.split(path + "_")[1]) for x in files]) + + reader = read_micromanager( + path + f"_{index}", data_type="ometiff", extract_data=True + ) + + return reader.get_array(0) diff --git a/waveorder/acq/acquisition_workers.py b/waveorder/acq/acquisition_workers.py new file mode 100644 index 00000000..32a169a9 --- /dev/null +++ b/waveorder/acq/acquisition_workers.py @@ -0,0 +1,651 @@ +from __future__ import annotations + +import logging +import shutil +from pathlib import Path + +# type hint/check +from typing import TYPE_CHECKING + +import numpy as np +from iohub import open_ome_zarr +from iohub.convert import TIFFConverter +from napari.qt.threading import WorkerBase, WorkerBaseSignals +from napari.utils.notifications import show_warning +from qtpy.QtCore import Signal + +from waveorder.acq.acq_functions import ( + acquire_from_settings, + generate_acq_settings, +) +from waveorder.cli import settings +from waveorder.cli.apply_inverse_transfer_function import ( + apply_inverse_transfer_function_cli, +) +from waveorder.cli.compute_transfer_function import ( + compute_transfer_function_cli, +) +from waveorder.io.utils import add_index_to_path, model_to_yaml, ram_message + +# avoid runtime import error +if TYPE_CHECKING: + from waveorder.calib.Calibration import QLIPP_Calibration + from waveorder.plugin.main_widget import MainWidget + + +def _check_scale_mismatch( + recon_scale: np.array, + ngff_scale: tuple[float, float, float, float, float], +) -> None: + if not np.allclose(np.array(ngff_scale[2:]), recon_scale, rtol=1e-2): + show_warning( + f"Requested reconstruction scale = {recon_scale} " + f"and OME-Zarr metadata scale = {ngff_scale[2:]} are not equal. " + "waveorder's reconstruction uses the GUI's " + "Z-step, pixel size, and magnification, " + "while napari's viewer uses the input array's metadata." + ) + + +def _generate_reconstruction_config_from_gui( + reconstruction_config_path, + mode, + calib_window, + input_channel_names, +): + if mode == "birefringence" or mode == "all": + if calib_window.bg_option == "None": + background_path = "" + remove_estimated_background = False + elif calib_window.bg_option == "Measured": + background_path = str(calib_window.acq_bg_directory) + remove_estimated_background = False + elif calib_window.bg_option == "Estimated": + background_path = "" + remove_estimated_background = True + elif calib_window.bg_option == "Measured + Estimated": + background_path = str(calib_window.acq_bg_directory) + remove_estimated_background = True + + birefringence_transfer_function_settings = ( + settings.BirefringenceTransferFunctionSettings( + swing=calib_window.swing, + ) + ) + birefringence_apply_inverse_settings = ( + settings.BirefringenceApplyInverseSettings( + wavelength_illumination=calib_window.recon_wavelength + / 1000, # convert from um to nm + background_path=background_path, + remove_estimated_background=remove_estimated_background, + flip_orientation=calib_window.flip_orientation, + rotate_orientation=calib_window.rotate_orientation, + ) + ) + birefringence_settings = settings.BirefringenceSettings( + transfer_function=birefringence_transfer_function_settings, + apply_inverse=birefringence_apply_inverse_settings, + ) + else: + birefringence_settings = None + + if mode == "phase" or mode == "all": + phase_transfer_function_settings = ( + settings.PhaseTransferFunctionSettings( + wavelength_illumination=calib_window.recon_wavelength + / 1000, # um + yx_pixel_size=calib_window.ps / calib_window.mag, # um + z_pixel_size=calib_window.z_step, # um + z_padding=calib_window.pad_z, + index_of_refraction_media=calib_window.n_media, + numerical_aperture_detection=calib_window.obj_na, + numerical_aperture_illumination=calib_window.cond_na, + invert_phase_contrast=calib_window.invert_phase_contrast, + ) + ) + phase_apply_inverse_settings = settings.FourierApplyInverseSettings( + reconstruction_algorithm=calib_window.phase_regularizer, + regularization_strength=calib_window.ui.le_phase_strength.text(), + TV_rho_strength=calib_window.ui.le_rho.text(), + TV_iterations=calib_window.ui.le_itr.text(), + ) + phase_settings = settings.PhaseSettings( + transfer_function=phase_transfer_function_settings, + apply_inverse=phase_apply_inverse_settings, + ) + else: + phase_settings = None + + reconstruction_settings = settings.ReconstructionSettings( + input_channel_names=input_channel_names, + reconstruction_dimension=int(calib_window.acq_mode[0]), + birefringence=birefringence_settings, + phase=phase_settings, + ) + + model_to_yaml(reconstruction_settings, reconstruction_config_path) + + +class PolarizationAcquisitionSignals(WorkerBaseSignals): + """ + Custom Signals class that includes napari native signals + """ + + phase_image_emitter = Signal(tuple) + bire_image_emitter = Signal(tuple) + phase_reconstructor_emitter = Signal(object) + aborted = Signal() + + +class BFAcquisitionSignals(WorkerBaseSignals): + """ + Custom Signals class that includes napari native signals + """ + + phase_image_emitter = Signal(tuple) + phase_reconstructor_emitter = Signal(object) + aborted = Signal() + + +class BFAcquisitionWorker(WorkerBase): + """ + Class to execute a brightfield acquisition. First step is to snap the images follow by a second + step of reconstructing those images. + """ + + def __init__(self, calib_window: MainWidget): + super().__init__(SignalsClass=BFAcquisitionSignals) + + # Save current state of GUI window + self.calib_window = calib_window + + # Init Properties + self.prefix = "snap" + self.dm = self.calib_window.mm.displays() + self.dim = ( + "2D" + if self.calib_window.ui.cb_acq_mode.currentIndex() == 0 + else "3D" + ) + self.img_dim = None + + save_dir = ( + self.calib_window.save_directory + if self.calib_window.save_directory + else self.calib_window.directory + ) + + if save_dir is None: + raise ValueError( + "save directory is empty, please specify a directory in the plugin" + ) + + if self.calib_window.save_name is None: + self.snap_dir = Path(save_dir) / "snap" + else: + self.snap_dir = Path(save_dir) / ( + self.calib_window.save_name + "_snap" + ) + self.snap_dir = add_index_to_path(self.snap_dir) + self.snap_dir.mkdir() + + def _check_abort(self): + if self.abort_requested: + self.aborted.emit() + raise TimeoutError("Stop Requested") + + def _check_ram(self): + """ + Show a warning if RAM < 32 GB. + """ + is_warning, msg = ram_message() + if is_warning: + show_warning(msg) + else: + logging.info(msg) + + def work(self): + """ + Function that runs the 2D or 3D acquisition and reconstructs the data + """ + self._check_ram() + logging.info("Running Acquisition...") + self._check_abort() + + channel_idx = self.calib_window.ui.cb_acq_channel.currentIndex() + channel = self.calib_window.ui.cb_acq_channel.itemText(channel_idx) + channel_group = None + + groups = self.calib_window.mmc.getAvailableConfigGroups() + group_list = [] + for i in range(groups.size()): + group_list.append(groups.get(i)) + + for group in group_list: + config = self.calib_window.mmc.getAvailableConfigs(group) + for idx in range(config.size()): + if channel in config.get(idx): + channel_group = group + break + + # Create and validate reconstruction settings + self.config_path = self.snap_dir / "reconstruction_settings.yml" + + _generate_reconstruction_config_from_gui( + self.config_path, + "phase", + self.calib_window, + input_channel_names=["BF"], + ) + + # Acquire 3D stack + logging.debug("Acquiring 3D stack") + + # Generate MDA Settings + settings = generate_acq_settings( + self.calib_window.mm, + channel_group=channel_group, + channels=[channel], + zstart=self.calib_window.z_start, + zend=self.calib_window.z_end, + zstep=self.calib_window.z_step, + save_dir=str(self.snap_dir), + prefix=self.prefix, + keep_shutter_open_slices=True, + ) + + self._check_abort() + + # Acquire from MDA settings uses MM MDA GUI + # Returns (1, 4/5, Z, Y, X) array + stack = acquire_from_settings( + self.calib_window.mm, + settings, + grab_images=True, + restore_settings=True, + ) + self._check_abort() + + # Cleanup acquisition by closing window, converting to zarr, and deleting temp directory + self._cleanup_acq() + + # Reconstruct snapped images + self.n_slices = stack.shape[2] + + phase, scale = self._reconstruct() + self._check_abort() + + # Warn the user about axial + if self.calib_window.invert_phase_contrast: + show_warning( + "Inverting the phase contrast. This affects the visualization and saved reconstruction." + ) + + # Warn user about mismatched scales + recon_scale = np.array( + (self.calib_window.z_step,) + + 2 * (self.calib_window.ps / self.calib_window.mag,) + ) + _check_scale_mismatch(recon_scale, scale) + + logging.info("Finished Acquisition") + logging.debug("Finished Acquisition") + + # Emit the images and let thread know function is finished + self.phase_image_emitter.emit((phase, scale)) + + def _reconstruct(self): + """ + Method to reconstruct + """ + self._check_abort() + + # Create i/o paths + transfer_function_path = Path(self.snap_dir) / "transfer_function.zarr" + reconstruction_path = Path(self.snap_dir) / "reconstruction.zarr" + input_data_path = Path(self.latest_out_path) / "0" / "0" / "0" + + # TODO: skip if config files match + compute_transfer_function_cli( + input_position_dirpath=input_data_path, + config_filepath=self.config_path, + output_dirpath=transfer_function_path, + ) + + apply_inverse_transfer_function_cli( + input_position_dirpaths=[input_data_path], + transfer_function_dirpath=transfer_function_path, + config_filepath=self.config_path, + output_dirpath=reconstruction_path, + ) + + # Read reconstruction to pass to emitters + with open_ome_zarr(reconstruction_path, mode="r") as dataset: + phase = dataset["0/0/0/0"][0] + scale = dataset["0/0/0"].scale + + return phase, scale + + def _cleanup_acq(self): + # Get display windows + disps = self.dm.getAllDataViewers() + + # loop through display window and find one with matching prefix + for i in range(disps.size()): + disp = disps.get(i) + + # close the datastore and grab the path to where the data is saved + if self.prefix in disp.getName(): + dp = disp.getDataProvider() + dir_ = dp.getSummaryMetadata().getDirectory() + prefix = dp.getSummaryMetadata().getPrefix() + closed = False + disp.close() + while not closed: + closed = disp.isClosed() + dp.close() + + # Try to delete the data, sometime it isn't cleaned up quickly enough and will + # return an error. In this case, catch the error and then try to close again (seems to work). + try: + self.latest_out_path = self.snap_dir / "raw_data.zarr" + converter = TIFFConverter( + str(Path(dir_) / prefix), + str(self.latest_out_path), + data_type="ometiff", + grid_layout=False, + ) + converter.run() + shutil.rmtree(Path(dir_) / prefix) + except PermissionError as ex: + dp.close() + break + else: + continue + + +# TODO: Cache common OTF's on local computers and use those for reconstruction +class PolarizationAcquisitionWorker(WorkerBase): + """ + Class to execute a birefringence/phase acquisition. First step is to snap the images follow by a second + step of reconstructing those images. + """ + + def __init__( + self, calib_window: MainWidget, calib: QLIPP_Calibration, mode: str + ): + super().__init__(SignalsClass=PolarizationAcquisitionSignals) + + # Save current state of GUI window + self.calib_window = calib_window + + # Init properties + self.calib = calib + self.mode = mode + self.n_slices = None + self.prefix = "waveorderPluginSnap" + self.dm = self.calib_window.mm.displays() + self.channel_group = self.calib_window.config_group + + # Determine whether 2D or 3D acquisition is needed + if self.mode == "birefringence" and self.calib_window.acq_mode == "2D": + self.dim = "2D" + else: + self.dim = "3D" + + save_dir = ( + self.calib_window.save_directory + if self.calib_window.save_directory + else self.calib_window.directory + ) + + if save_dir is None: + raise ValueError( + "save directory is empty, please specify a directory in the plugin" + ) + + if self.calib_window.save_name is None: + self.snap_dir = Path(save_dir) / "snap" + else: + self.snap_dir = Path(save_dir) / ( + self.calib_window.save_name + "_snap" + ) + self.snap_dir = add_index_to_path(self.snap_dir) + self.snap_dir.mkdir() + + def _check_abort(self): + if self.abort_requested: + self.aborted.emit() + raise TimeoutError("Stop Requested") + + def _check_ram(self): + """ + Show a warning if RAM < 32 GB. + """ + is_warning, msg = ram_message() + if is_warning: + show_warning(msg) + else: + logging.info(msg) + + def work(self): + """ + Function that runs the 2D or 3D acquisition and reconstructs the data + """ + self._check_ram() + logging.info("Running Acquisition...") + + # List the Channels to acquire, if 5-state then append 5th channel + channels = ["State0", "State1", "State2", "State3"] + if self.calib.calib_scheme == "5-State": + channels.append("State4") + + self._check_abort() + + # Create and validate reconstruction settings + self.config_path = self.snap_dir / "reconstruction_settings.yml" + _generate_reconstruction_config_from_gui( + self.config_path, + self.mode, + self.calib_window, + input_channel_names=channels, + ) + + # Acquire 2D stack + if self.dim == "2D": + logging.debug("Acquiring 2D stack") + + # Generate MDA Settings + self.settings = generate_acq_settings( + self.calib_window.mm, + channel_group=self.channel_group, + channels=channels, + save_dir=str(self.snap_dir), + prefix=self.prefix, + keep_shutter_open_channels=True, + ) + self._check_abort() + # acquire images + stack = self._acquire() + + # Acquire 3D stack + else: + logging.debug("Acquiring 3D stack") + + # Generate MDA Settings + self.settings = generate_acq_settings( + self.calib_window.mm, + channel_group=self.channel_group, + channels=channels, + zstart=self.calib_window.z_start, + zend=self.calib_window.z_end, + zstep=self.calib_window.z_step, + save_dir=str(self.snap_dir), + prefix=self.prefix, + keep_shutter_open_channels=True, + keep_shutter_open_slices=True, + ) + + self._check_abort() + + # set acquisition order to channel-first + self.settings["slicesFirst"] = False + self.settings["acqOrderMode"] = 0 # TIME_POS_SLICE_CHANNEL + + # acquire images + stack = self._acquire() + + # Cleanup acquisition by closing window, converting to zarr, and deleting temp directory + self._cleanup_acq() + + # Reconstruct snapped images + self._check_abort() + self.n_slices = stack.shape[2] + birefringence, phase, scale = self._reconstruct() + self._check_abort() + + # Warn the user about rotations and flips + if self.calib_window.rotate_orientation: + show_warning( + "Applying a +90 degree rotation to the orientation channel. This affects the visualization and saved reconstruction." + ) + if self.calib_window.flip_orientation: + show_warning( + "Applying a flip to orientation channel. This affects the visualization and saved reconstruction." + ) + + # Warn user about mismatched scales + recon_scale = np.array( + (self.calib_window.z_step,) + + 2 * (self.calib_window.ps / self.calib_window.mag,) + ) + _check_scale_mismatch(recon_scale, scale) + + logging.info("Finished Acquisition") + logging.debug("Finished Acquisition") + + # Emit the images and let thread know function is finished + self.bire_image_emitter.emit((birefringence, scale)) + self.phase_image_emitter.emit((phase, scale)) + + def _check_exposure(self) -> None: + """ + Check that all LF channels have the same exposure settings. If not, abort Acquisition. + """ + # parse exposure times + channel_exposures = [] + for channel in self.settings["channels"]: + channel_exposures.append(channel["exposure"]) + logging.debug(f"Verifying exposure times: {channel_exposures}") + channel_exposures = np.array(channel_exposures) + # check if exposure times are equal + if not np.all(channel_exposures == channel_exposures[0]): + error_exposure_msg = ( + f"The MDA exposure times are not equal! Aborting Acquisition.\n" + f"Please manually set the exposure times to the same value from the MDA menu." + ) + + raise ValueError(error_exposure_msg) + + self._check_abort() + + def _acquire(self) -> np.ndarray: + """ + Acquire images. + + Returns + ------- + stack: (nd-array) Dimensions are (C, Z, Y, X). Z=1 for 2D acquisition. + """ + # check if exposure times are the same + self._check_exposure() + + # Acquire from MDA settings uses MM MDA GUI + # Returns (1, 4/5, Z, Y, X) array + stack = acquire_from_settings( + self.calib_window.mm, + self.settings, + grab_images=True, + restore_settings=True, + ) + self._check_abort() + + return stack + + def _reconstruct(self): + """ + Method to reconstruct. First need to initialize the reconstructor given + what type of acquisition it is (birefringence only skips a lot of heavy compute needed for phase). + This function also checks to see if the reconstructor needs to be updated from previous acquisitions + + """ + self._check_abort() + + # Create config and i/o paths + transfer_function_path = Path(self.snap_dir) / "transfer_function.zarr" + reconstruction_path = Path(self.snap_dir) / "reconstruction.zarr" + input_data_path = Path(self.latest_out_path) / "0" / "0" / "0" + + # TODO: skip if config files match + compute_transfer_function_cli( + input_position_dirpath=input_data_path, + config_filepath=self.config_path, + output_dirpath=transfer_function_path, + ) + + apply_inverse_transfer_function_cli( + input_position_dirpaths=[input_data_path], + transfer_function_dirpath=transfer_function_path, + config_filepath=self.config_path, + output_dirpath=reconstruction_path, + unique_id="waveorderAcq", + ) + + # Read reconstruction to pass to emitters + with open_ome_zarr(reconstruction_path, mode="r") as dataset: + czyx_data = dataset["0/0/0/0"][0] + birefringence = czyx_data[0:4] + try: + phase = czyx_data[4] + except: + phase = None + scale = dataset["0/0/0"].scale + + return birefringence, phase, scale + + def _cleanup_acq(self): + # Get display windows + disps = self.dm.getAllDataViewers() + + # loop through display window and find one with matching prefix + for i in range(disps.size()): + disp = disps.get(i) + + # close the datastore and grab the path to where the data is saved + if self.prefix in disp.getName(): + dp = disp.getDataProvider() + dir_ = dp.getSummaryMetadata().getDirectory() + prefix = dp.getSummaryMetadata().getPrefix() + closed = False + disp.close() + while not closed: + closed = disp.isClosed() + dp.close() + + # Try to delete the data, sometime it isn't cleaned up quickly enough and will + # return an error. In this case, catch the error and then try to close again (seems to work). + try: + self.latest_out_path = self.snap_dir / "raw_data.zarr" + converter = TIFFConverter( + str(Path(dir_) / prefix), + str(self.latest_out_path), + data_type="ometiff", + grid_layout=False, + ) + converter.run() + shutil.rmtree(Path(dir_) / prefix) + except PermissionError as ex: + dp.close() + break + else: + continue diff --git a/waveorder/calib/Calibration.py b/waveorder/calib/Calibration.py new file mode 100644 index 00000000..ab106580 --- /dev/null +++ b/waveorder/calib/Calibration.py @@ -0,0 +1,1512 @@ +import json +import logging +import os +import time +import warnings +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +from importlib_metadata import version +from iohub import open_ome_zarr +from iohub.ngff_meta import TransformationMeta +from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable +from napari.utils.notifications import show_warning +from scipy.interpolate import interp1d +from scipy.optimize import least_squares +from scipy.stats import linregress + +from waveorder.calib.Optimization import BrentOptimizer, MinScalarOptimizer +from waveorder.io.core_functions import * +from waveorder.io.utils import MockEmitter + +LC_DEVICE_NAME = "MeadowlarkLC" + + +class QLIPP_Calibration: + def __init__( + self, + mmc, + mm, + group="Channel", + lc_control_mode="MM-Retardance", + interp_method="schnoor_fit", + wavelength=532, + optimization="min_scalar", + print_details=True, + ): + """ + + Parameters + ---------- + mmc : object + Micro-Manager core instance + mm : object + Micro-Manager Studio instance + group : str + Name of the Micro-Manager channel group used defining LC states [State0, State1, State2, ...] + lc_control_mode : str + Defined the control mode of the liquid crystals. One of the following: + * MM-Retardance: The retardance of the LC is set directly through the Micro-Manager LC device adapter. The + Micro-Manager device adapter determines the corresponding voltage which is sent to the LC. + * MM-Voltage: The CalibrationData class in waveorder uses the LC calibration data to determine the correct + LC voltage for a given retardance. The LC voltage is set through the Micro-Manager LC device adapter. + * DAC: The CalibrationData class in waveorder uses the LC calibration data to determine the correct + LC voltage for a given retardance. The voltage is applied to the IO port of the LC controller through the + TriggerScope DAC outputs. + interp_method : str + Method of interpolating the LC retardance-to-voltage calibration curve. One of the following: + * linear: linear interpolation of retardance as a function of voltage and wavelength + * schnoor_fit: Schnoor fit interpolation as described in https://doi.org/10.1364/AO.408383 + wavelength : float + Measurement wavelength + optimization : str + LC retardance optimization method, 'min_scalar' (default) or 'brent' + print_details : bool + Set verbose option + """ + + # Micro-Manager API + self.mm = mm + self.mmc = mmc + self.snap_manager = mm.getSnapLiveManager() + + # Meadowlark LC Device Adapter Property Names + self.PROPERTIES = { + "LCA": (LC_DEVICE_NAME, "Retardance LC-A [in waves]"), + "LCB": (LC_DEVICE_NAME, "Retardance LC-B [in waves]"), + "LCA-Voltage": (LC_DEVICE_NAME, "Voltage (V) LC-A"), + "LCB-Voltage": (LC_DEVICE_NAME, "Voltage (V) LC-B"), + "LCA-DAC": ("TS_DAC01", "Volts"), + "LCB-DAC": ("TS_DAC02", "Volts"), + "State0": ( + LC_DEVICE_NAME, + "Pal. elem. 00; enter 0 to define; 1 to activate", + ), + "State1": ( + LC_DEVICE_NAME, + "Pal. elem. 01; enter 0 to define; 1 to activate", + ), + "State2": ( + LC_DEVICE_NAME, + "Pal. elem. 02; enter 0 to define; 1 to activate", + ), + "State3": ( + LC_DEVICE_NAME, + "Pal. elem. 03; enter 0 to define; 1 to activate", + ), + "State4": ( + LC_DEVICE_NAME, + "Pal. elem. 04; enter 0 to define; 1 to activate", + ), + } + self.group = group + + # GUI Emitter + self.intensity_emitter = MockEmitter() + self.plot_sequence_emitter = MockEmitter() + + # Set Mode + # TODO: make sure LC or TriggerScope are loaded in the respective modes + allowed_modes = ["MM-Retardance", "MM-Voltage", "DAC"] + if lc_control_mode not in allowed_modes: + raise ValueError(f"LC control mode must be one of {allowed_modes}") + self.mode = lc_control_mode + self.LC_DAC_conversion = 4 # convert between the input range of LCs (0-20V) and the output range of the DAC (0-5V) + + # Initialize calibration class + allowed_interp_methods = ["schnoor_fit", "linear"] + if interp_method not in allowed_interp_methods: + raise ValueError( + "LC calibration data interpolation method must be one of " + f"{allowed_interp_methods}" + ) + dir_path = mmc.getDeviceAdapterSearchPaths().get( + 0 + ) # MM device adapter directory + self.calib = CalibrationData( + os.path.join(dir_path, "mmgr_dal_MeadowlarkLC.csv"), + interp_method=interp_method, + wavelength=wavelength, + ) + + # Optimizer + if optimization == "min_scalar": + self.optimizer = MinScalarOptimizer(self) + elif optimization == "brent": + self.optimizer = BrentOptimizer(self) + else: + raise ModuleNotFoundError(f"No optimizer named {optimization}") + + # User / Calculated Parameters + self.swing = None + self.wavelength = None + self.lc_bound = None + self.I_Black = None + self.ratio = 1.793 + self.print_details = print_details + self.calib_scheme = "4-State" + + # LC States + self.lca_ext = None + self.lcb_ext = None + self.lca_0 = None + self.lcb_0 = None + self.lca_45 = None + self.lcb_45 = None + self.lca_60 = None + self.lcb_60 = None + self.lca_90 = None + self.lcb_90 = None + self.lca_120 = None + self.lcb_120 = None + self.lca_135 = None + self.lcb_135 = None + + # Calibration Outputs + self.I_Ext = None + self.I_Ref = None + self.I_Elliptical = None + self.inten = [] + self.swing0 = None + self.swing45 = None + self.swing60 = None + self.swing90 = None + self.swing120 = None + self.swing135 = None + self.height = None + self.width = None + self.directory = None + self.inst_mat = None + + # Shutter + self.shutter_device = self.mmc.getShutterDevice() + self._auto_shutter_state = None + self._shutter_state = None + + def set_dacs(self, lca_dac, lcb_dac): + self.PROPERTIES["LCA-DAC"] = (f"TS_{lca_dac}", "Volts") + self.PROPERTIES["LCB-DAC"] = (f"TS_{lcb_dac}", "Volts") + + def set_wavelength(self, wavelength): + self.calib.set_wavelength(wavelength) + self.wavelength = self.calib.wavelength + + def set_lc(self, retardance, LC: str): + """ + Set LC state to given retardance in waves + + Parameters + ---------- + retardance : float + Retardance in waves + LC : str + LCA or LCB + + Returns + ------- + + """ + + if self.mode == "MM-Retardance": + set_lc_waves(self.mmc, self.PROPERTIES[f"{LC}"], retardance) + elif self.mode == "MM-Voltage": + volts = self.calib.get_voltage(retardance) + set_lc_voltage(self.mmc, self.PROPERTIES[f"{LC}-Voltage"], volts) + elif self.mode == "DAC": + volts = self.calib.get_voltage(retardance) + dac_volts = volts / self.LC_DAC_conversion + set_lc_daq(self.mmc, self.PROPERTIES[f"{LC}-DAC"], dac_volts) + + def get_lc(self, LC: str): + """ + Get LC retardance in waves + + Parameters + ---------- + LC : str + LCA or LCB + + Returns + ------- + LC retardance in waves + """ + + if self.mode == "MM-Retardance": + retardance = get_lc(self.mmc, self.PROPERTIES[f"{LC}"]) + elif self.mode == "MM-Voltage": + volts = get_lc( + self.mmc, self.PROPERTIES[f"{LC}-Voltage"] + ) # returned value is in volts + retardance = self.calib.get_retardance(volts) + elif self.mode == "DAC": + dac_volts = get_lc(self.mmc, self.PROPERTIES[f"{LC}-DAC"]) + volts = dac_volts * self.LC_DAC_conversion + retardance = self.calib.get_retardance(volts) + + return retardance + + def define_lc_state(self, state, lca_retardance, lcb_retardance): + """ + Define of the two LCs after calibration + + Parameters + ---------- + state: str + Polarization stage (e.g. State0) + lca_retardance: float + LCA retardance in waves + lcb_retardance: float + LCB retardance in waves + + Returns + ------- + + """ + + if self.mode == "MM-Retardance": + self.set_lc(lca_retardance, "LCA") + self.set_lc(lcb_retardance, "LCB") + define_meadowlark_state(self.mmc, self.PROPERTIES[state]) + elif self.mode == "DAC": + lca_volts = ( + self.calib.get_voltage(lca_retardance) / self.LC_DAC_conversion + ) + lcb_volts = ( + self.calib.get_voltage(lcb_retardance) / self.LC_DAC_conversion + ) + define_config_state( + self.mmc, + self.group, + state, + [self.PROPERTIES["LCA-DAC"], self.PROPERTIES["LCB-DAC"]], + [lca_volts, lcb_volts], + ) + elif self.mode == "MM-Voltage": + lca_volts = self.calib.get_voltage(lca_retardance) + lcb_volts = self.calib.get_voltage(lcb_retardance) + define_config_state( + self.mmc, + self.group, + state, + [ + self.PROPERTIES["LCA-Voltage"], + self.PROPERTIES["LCB-Voltage"], + ], + [lca_volts, lcb_volts], + ) + + def opt_lc(self, x, device_property, reference, normalize=False): + if isinstance(x, list) or isinstance(x, tuple): + x = x[0] + + self.set_lc(x, device_property) + + mean = snap_and_average(self.snap_manager) + + if normalize: + max_ = 65335 + min_ = self.I_Black + + val = (mean - min_) / (max_ - min_) + ref = (reference - min_) / (max_ - min_) + + logging.debug(f"LC-Value: {x}") + logging.debug(f"F-Value:{val - ref}\n") + return val - ref + + else: + logging.debug(str(mean)) + self.intensity_emitter.emit(mean) + self.inten.append(mean - reference) + + return np.abs(mean - reference) + + def opt_lc_cons(self, x, device_property, reference, mode): + self.set_lc(x, device_property) + swing = (self.lca_ext - x) * self.ratio + + if mode == "60": + self.set_lc(self.lcb_ext + swing, "LCB") + + if mode == "120": + self.set_lc(self.lcb_ext - swing, "LCB") + + mean = snap_and_average(self.snap_manager) + logging.debug(str(mean)) + + # append to intensity array for plotting later + self.intensity_emitter.emit(mean) + self.inten.append(mean - reference) + + return np.abs(mean - reference) + + def opt_lc_grid(self, a_min, a_max, b_min, b_max, step): + """ + Exhaustive Search method + + Finds the minimum intensity value for a given + grid of LCA,LCB values + + :param a_min: float + Minimum value of LCA + :param a_max: float + Maximum value of LCA + :param b_min: float + Minimum value of LCB + :param b_max: float + Maximum value of LCB + :param step: float + step size of the grid between max/min values + + + :return best_lca: float + LCA value corresponding to lowest mean Intensity + :return best_lcb: float + LCB value corresponding to lowest mean Intensity + :return min_int: float + Lowest value of mean Intensity + """ + + min_int = 65536 + better_lca = -1 + better_lcb = -1 + + # coarse search + for lca in np.arange(a_min, a_max, step): + for lcb in np.arange(b_min, b_max, step): + self.set_lc(lca, "LCA") + self.set_lc(lcb, "LCB") + + # current_int = np.mean(snap_image(calib.mmc)) + current_int = snap_and_average(self.snap_manager) + self.intensity_emitter.emit(current_int) + + if current_int < min_int: + better_lca = lca + better_lcb = lcb + min_int = current_int + logging.debug( + "update (%f, %f, %f)" + % (min_int, better_lca, better_lcb) + ) + + logging.debug("coarse search done") + logging.debug("better lca = " + str(better_lca)) + logging.debug("better lcb = " + str(better_lcb)) + logging.debug("better int = " + str(min_int)) + + best_lca = better_lca + best_lcb = better_lcb + + return best_lca, best_lcb, min_int + + # ========== Optimization wrappers ============= + # ============================================== + def opt_Iext(self): + self.plot_sequence_emitter.emit("Coarse") + logging.info("Calibrating State0 (Extinction)...") + logging.debug("Calibrating State0 (Extinction)...") + + set_lc_state(self.mmc, self.group, "State0") + time.sleep(2) + + # Perform exhaustive search with step 0.1 over range: + # 0.01 < LCA < 0.5 + # 0.25 < LCB < 0.75 + step = 0.1 + logging.debug(f"================================") + logging.debug(f"Starting first grid search, step = {step}") + logging.debug(f"================================") + + best_lca, best_lcb, i_ext_ = self.opt_lc_grid( + 0.01, 0.5, 0.25, 0.75, step + ) + + logging.debug("grid search done") + logging.debug("lca = " + str(best_lca)) + logging.debug("lcb = " + str(best_lcb)) + logging.debug("intensity = " + str(i_ext_)) + + self.set_lc(best_lca, "LCA") + self.set_lc(best_lcb, "LCB") + + logging.debug(f"================================") + logging.debug(f"Starting fine search") + logging.debug(f"================================") + + # Perform brent optimization around results of 2nd grid search + # threshold not very necessary here as intensity value will + # vary between exposure/lamp intensities + self.plot_sequence_emitter.emit("Fine") + lca, lcb, I_ext = self.optimizer.optimize( + state="ext", + lca_bound=0.1, + lcb_bound=0.1, + reference=self.I_Black, + thresh=1, + n_iter=5, + ) + + # Set the Extinction state to values output from optimization + self.define_lc_state("State0", lca, lcb) + + self.lca_ext = lca + self.lcb_ext = lcb + self.I_Ext = I_ext + + logging.debug("fine search done") + logging.info(f"LCA State0 (Extinction) = {lca:.3f}") + logging.debug(f"LCA State0 (Extinction) = {lca:.5f}") + logging.info(f"LCB State0 (Extinction) = {lcb:.3f}") + logging.debug(f"LCB State0 (Extinction) = {lcb:.5f}") + logging.info(f"Intensity (Extinction) = {I_ext:.0f}") + logging.debug(f"Intensity (Extinction) = {I_ext:.3f}") + + logging.debug("--------done--------") + logging.info("--------done--------") + + def opt_I0(self): + """ + no optimization performed for this. Simply apply swing and read intensity + This is the same as "Ielliptical". Used for both schemes. + :return: float + mean of image + """ + + logging.info("Calibrating State1 (I0)...") + logging.debug("Calibrating State1 (I0)...") + + self.lca_0 = self.lca_ext - self.swing + self.lcb_0 = self.lcb_ext + self.set_lc(self.lca_0, "LCA") + self.set_lc(self.lcb_0, "LCB") + + self.define_lc_state("State1", self.lca_0, self.lcb_0) + intensity = snap_and_average(self.snap_manager) + self.I_Elliptical = intensity + self.swing0 = np.sqrt( + (self.lcb_0 - self.lcb_ext) ** 2 + (self.lca_0 - self.lca_ext) ** 2 + ) + + logging.info(f"LCA State1 (I0) = {self.lca_0:.3f}") + logging.debug(f"LCA State1 (I0) = {self.lca_0:.5f}") + logging.info(f"LCB State1 (I0) = {self.lcb_0:.3f}") + logging.debug(f"LCB State1 (I0) = {self.lcb_0:.5f}") + logging.info(f"Intensity (I0) = {intensity:.0f}") + logging.debug(f"Intensity (I0) = {intensity:.3f}") + logging.info("--------done--------") + logging.debug("--------done--------") + + def opt_I45(self, lca_bound, lcb_bound): + """ + optimized relative to Ielliptical (opt_I90) + Parameters + ---------- + lca_bound + lcb_bound + Returns + ------- + lca, lcb value at optimized state + intensity value at optimized state + """ + self.inten = [] + logging.info("Calibrating State2 (I45)...") + logging.debug("Calibrating State2 (I45)...") + + self.set_lc(self.lca_ext, "LCA") + self.set_lc(self.lcb_ext - self.swing, "LCB") + + self.lca_45, self.lcb_45, intensity = self.optimizer.optimize( + "45", + lca_bound, + lcb_bound, + reference=self.I_Elliptical, + n_iter=5, + thresh=0.01, + ) + + self.define_lc_state("State2", self.lca_45, self.lcb_45) + + self.swing45 = np.sqrt( + (self.lcb_45 - self.lcb_ext) ** 2 + + (self.lca_45 - self.lca_ext) ** 2 + ) + + logging.info(f"LCA State2 (I45) = {self.lca_45:.3f}") + logging.debug(f"LCA State2 (I45) = {self.lca_45:.5f}") + logging.info(f"LCB State2 (I45) = {self.lcb_45:.3f}") + logging.debug(f"LCB State2 (I45) = {self.lcb_45:.5f}") + logging.info(f"Intensity (I45) = {intensity:.0f}") + logging.debug(f"Intensity (I45) = {intensity:.3f}") + logging.info("--------done--------") + logging.debug("--------done--------") + + def opt_I60(self, lca_bound, lcb_bound): + """ + optimized relative to Ielliptical (opt_I0_4State) + Parameters + ---------- + lca_bound + lcb_bound + Returns + ------- + lca, lcb value at optimized state + intensity value at optimized state + """ + self.inten = [] + + logging.info("Calibrating State2 (I60)...") + logging.debug("Calibrating State2 (I60)...") + + # Calculate Initial Swing for initial guess to optimize around + # Based on ratio calculated from ellpiticity/orientation of LC simulation + swing_ell = np.sqrt( + (self.lca_ext - self.lca_0) ** 2 + (self.lcb_ext - self.lcb_0) ** 2 + ) + lca_swing = np.sqrt(swing_ell**2 / (1 + self.ratio**2)) + lcb_swing = self.ratio * lca_swing + + # Optimization + self.set_lc(self.lca_ext + lca_swing, "LCA") + self.set_lc(self.lcb_ext + lcb_swing, "LCB") + + self.lca_60, self.lcb_60, intensity = self.optimizer.optimize( + "60", + lca_bound, + lcb_bound, + reference=self.I_Elliptical, + n_iter=5, + thresh=0.01, + ) + + self.define_lc_state("State2", self.lca_60, self.lcb_60) + + self.swing60 = np.sqrt( + (self.lcb_60 - self.lcb_ext) ** 2 + + (self.lca_60 - self.lca_ext) ** 2 + ) + + # Print comparison of target swing, target ratio + # Ratio determines the orientation of the elliptical state + # should be close to target. Swing will vary to optimize ellipticity + logging.debug( + f"ratio: swing_LCB / swing_LCA = {(self.lcb_ext - self.lcb_60) / (self.lca_ext - self.lca_60):.4f} \ + | target ratio: {-self.ratio}" + ) + logging.debug( + f"total swing = {self.swing60:.4f} | target = {swing_ell}" + ) + + logging.info(f"LCA State2 (I60) = {self.lca_60:.3f}") + logging.debug(f"LCA State2 (I60) = {self.lca_60:.5f}") + logging.info(f"LCB State2 (I60) = {self.lcb_60:.3f}") + logging.debug(f"LCB State2 (I60) = {self.lcb_60:.5f}") + logging.info(f"Intensity (I60) = {intensity:.0f}") + logging.debug(f"Intensity (I60) = {intensity:.3f}") + logging.info("--------done--------") + logging.debug("--------done--------") + + def opt_I90(self, lca_bound, lcb_bound): + """ + optimized relative to Ielliptical (opt_I90) + Parameters + ---------- + lca_bound + lcb_bound + Returns + ------- + lca, lcb value at optimized state + intensity value at optimized state + """ + logging.info("Calibrating State3 (I90)...") + logging.debug("Calibrating State3 (I90)...") + + self.inten = [] + + self.set_lc(self.lca_ext + self.swing, "LCA") + self.set_lc(self.lcb_ext, "LCB") + + self.lca_90, self.lcb_90, intensity = self.optimizer.optimize( + "90", + lca_bound, + lcb_bound, + reference=self.I_Elliptical, + n_iter=5, + thresh=0.01, + ) + + self.define_lc_state("State3", self.lca_90, self.lcb_90) + + self.swing90 = np.sqrt( + (self.lcb_90 - self.lcb_ext) ** 2 + + (self.lca_90 - self.lca_ext) ** 2 + ) + + logging.info(f"LCA State3 (I90) = {self.lca_90:.3f}") + logging.debug(f"LCA State3 (I90) = {self.lca_90:.5f}") + logging.info(f"LCB State3 (I90) = {self.lcb_90:.3f}") + logging.debug(f"LCB State3 (I90) = {self.lcb_90:.5f}") + logging.info(f"Intensity (I90) = {intensity:.0f}") + logging.debug(f"Intensity (I90) = {intensity:.3f}") + logging.info("--------done--------") + logging.debug("--------done--------") + + def opt_I120(self, lca_bound, lcb_bound): + """ + optimized relative to Ielliptical (opt_I0_4State) + Parameters + ---------- + lca_bound + lcb_bound + Returns + ------- + lca, lcb value at optimized state + intensity value at optimized state + """ + logging.info("Calibrating State3 (I120)...") + logging.debug("Calibrating State3 (I120)...") + + # Calculate Initial Swing for initial guess to optimize around + # Based on ratio calculated from ellpiticity/orientation of LC simulation + swing_ell = np.sqrt( + (self.lca_ext - self.lca_0) ** 2 + (self.lcb_ext - self.lcb_0) ** 2 + ) + lca_swing = np.sqrt(swing_ell**2 / (1 + self.ratio**2)) + lcb_swing = self.ratio * lca_swing + + # Brent Optimization + self.set_lc(self.lca_ext + lca_swing, "LCA") + self.set_lc(self.lcb_ext - lcb_swing, "LCB") + + self.lca_120, self.lcb_120, intensity = self.optimizer.optimize( + "120", + lca_bound, + lcb_bound, + reference=self.I_Elliptical, + n_iter=5, + thresh=0.01, + ) + + self.define_lc_state("State3", self.lca_120, self.lcb_120) + + self.swing120 = np.sqrt( + (self.lcb_120 - self.lcb_ext) ** 2 + + (self.lca_120 - self.lca_ext) ** 2 + ) + + # Print comparison of target swing, target ratio + # Ratio determines the orientation of the elliptical state + # should be close to target. Swing will vary to optimize ellipticity + logging.debug( + f"ratio: swing_LCB / swing_LCA = {(self.lcb_ext - self.lcb_120) / (self.lca_ext - self.lca_120):.4f}\ + | target ratio: {self.ratio}" + ) + logging.debug( + f"total swing = {self.swing120:.4f} | target = {swing_ell}" + ) + logging.info(f"LCA State3 (I120) = {self.lca_120:.3f}") + logging.debug(f"LCA State3 (I120) = {self.lca_120:.5f}") + logging.info(f"LCB State3 (I120) = {self.lcb_120:.3f}") + logging.debug(f"LCB State3 (I120) = {self.lcb_120:.5f}") + logging.info(f"Intensity (I120) = {intensity:.0f}") + logging.debug(f"Intensity (I120) = {intensity:.3f}") + logging.info("--------done--------") + logging.debug("--------done--------") + + def opt_I135(self, lca_bound, lcb_bound): + """ + optimized relative to Ielliptical (opt_I0) + Parameters + ---------- + lca_bound + lcb_bound + Returns + ------- + lca, lcb value at optimized state + intensity value at optimized state + """ + + logging.info("Calibrating State4 (I135)...") + logging.debug("Calibrating State4 (I135)...") + self.inten = [] + + self.set_lc(self.lca_ext, "LCA") + self.set_lc(self.lcb_ext + self.swing, "LCB") + + self.lca_135, self.lcb_135, intensity = self.optimizer.optimize( + "135", + lca_bound, + lcb_bound, + reference=self.I_Elliptical, + n_iter=5, + thresh=0.01, + ) + + self.define_lc_state("State4", self.lca_135, self.lcb_135) + + self.swing135 = np.sqrt( + (self.lcb_135 - self.lcb_ext) ** 2 + + (self.lca_135 - self.lca_ext) ** 2 + ) + + logging.info(f"LCA State4 (I135) = {self.lca_135:.3f}") + logging.debug(f"LCA State4 (I135) = {self.lca_135:.5f}") + logging.info(f"LCB State4 (I135) = {self.lcb_135:.3f}") + logging.debug(f"LCB State4 (I135) = {self.lcb_135:.5f}") + logging.info(f"Intensity (I135) = {intensity:.0f}") + logging.debug(f"Intensity (I135) = {intensity:.3f}") + logging.info("--------done--------") + logging.debug("--------done--------") + + def open_shutter(self): + if self.shutter_device == "": # no shutter + input("Please manually open the shutter and press ") + else: + self.mmc.setShutterOpen(True) + + def reset_shutter(self): + """ + Return autoshutter to its original state before closing + + Returns + ------- + + """ + if self.shutter_device == "": # no shutter + input( + "Please reset the shutter to its original state and press " + ) + logging.info( + "This is the end of the command-line instructions. You can return to the napari window." + ) + else: + self.mmc.setAutoShutter(self._auto_shutter_state) + self.mmc.setShutterOpen(self._shutter_state) + + def close_shutter_and_calc_blacklevel(self): + self._auto_shutter_state = self.mmc.getAutoShutter() + self._shutter_state = self.mmc.getShutterOpen() + + if self.shutter_device == "": # no shutter + show_warning( + "No shutter found. Please follow the command-line instructions..." + ) + shutter_warning_msg = """ + waveorder could not find an automatic shutter configured through Micro-Manager. + >>> If you would like manually enter the black level, enter an integer or float and press + >>> If you would like to estimate the black level, please close the shutter and press + """ + + in_string = input(shutter_warning_msg) + if in_string.isdigit(): # True if positive integer + self.I_Black = float(in_string) + return + else: + self.mmc.setAutoShutter(False) + self.mmc.setShutterOpen(False) + + n_avg = 20 + avgs = [] + for i in range(n_avg): + mean = snap_and_average(self.snap_manager) + self.intensity_emitter.emit(mean) + avgs.append(mean) + + blacklevel = np.mean(avgs) + self.I_Black = blacklevel + + def calculate_extinction( + self, swing, black_level, intensity_extinction, intensity_elliptical + ): + """ + Returns the extinction ratio, the ratio of the largest and smallest intensities that the imaging system can transmit above background. + See `/docs/calibration-guide.md` for a derivation of this expressions. + """ + return np.round( + (1 / np.sin(np.pi * swing) ** 2) + * (intensity_elliptical - intensity_extinction) + / (intensity_extinction - black_level) + + 1, + 2, + ) + + def calc_inst_matrix(self): + if self.calib_scheme == "4-State": + chi = self.swing + inst_mat = np.array( + [ + [1, 0, 0, -1], + [1, np.sin(2 * np.pi * chi), 0, -np.cos(2 * np.pi * chi)], + [ + 1, + -0.5 * np.sin(2 * np.pi * chi), + np.sqrt(3) * np.cos(np.pi * chi) * np.sin(np.pi * chi), + -np.cos(2 * np.pi * chi), + ], + [ + 1, + -0.5 * np.sin(2 * np.pi * chi), + -np.sqrt(3) / 2 * np.sin(2 * np.pi * chi), + -np.cos(2 * np.pi * chi), + ], + ] + ) + + return inst_mat + + if self.calib_scheme == "5-State": + chi = self.swing * 2 * np.pi + + inst_mat = np.array( + [ + [1, 0, 0, -1], + [1, np.sin(chi), 0, -np.cos(chi)], + [1, 0, np.sin(chi), -np.cos(chi)], + [1, -np.sin(chi), 0, -np.cos(chi)], + [1, 0, -np.sin(chi), -np.cos(chi)], + ] + ) + + return inst_mat + + def write_metadata(self, notes=None): + inst_mat = self.calc_inst_matrix() + inst_mat = np.around(inst_mat, decimals=5).tolist() + + metadata = { + "Summary": { + "Timestamp": str(datetime.now()), + "waveorder version": version("waveorder"), + }, + "Calibration": { + "Calibration scheme": self.calib_scheme, + "Swing (waves)": self.swing, + "Wavelength (nm)": self.wavelength, + "Retardance to voltage interpolation method": self.calib.interp_method, + "LC control mode": self.mode, + "Black level": np.round(self.I_Black, 2), + "Extinction ratio": self.extinction_ratio, + }, + "Notes": notes, + } + + if self.calib_scheme == "4-State": + metadata["Calibration"].update( + { + "Channel names": [f"State{i}" for i in range(4)], + "LC retardance": { + f"LC{i}_{j}": np.around( + getattr(self, f"lc{i.lower()}_{j}"), decimals=6 + ) + for j in ["ext", "0", "60", "120"] + for i in ["A", "B"] + }, + "LC voltage": { + f"LC{i}_{j}": np.around( + self.calib.get_voltage( + getattr(self, f"lc{i.lower()}_{j}") + ), + decimals=4, + ) + for j in ["ext", "0", "60", "120"] + for i in ["A", "B"] + }, + "Swing_0": np.around(self.swing0, decimals=3), + "Swing_60": np.around(self.swing60, decimals=3), + "Swing_120": np.around(self.swing120, decimals=3), + "Instrument matrix": inst_mat, + } + ) + + elif self.calib_scheme == "5-State": + metadata["Calibration"].update( + { + "Channel names": [f"State{i}" for i in range(5)], + "LC retardance": { + f"LC{i}_{j}": np.around( + getattr(self, f"lc{i.lower()}_{j}"), decimals=6 + ) + for j in ["ext", "0", "45", "90", "135"] + for i in ["A", "B"] + }, + "LC voltage": { + f"LC{i}_{j}": np.around( + self.calib.get_voltage( + getattr(self, f"lc{i.lower()}_{j}") + ), + decimals=4, + ) + for j in ["ext", "0", "45", "90", "135"] + for i in ["A", "B"] + }, + "Swing_0": np.around(self.swing0, decimals=3), + "Swing_45": np.around(self.swing45, decimals=3), + "Swing_90": np.around(self.swing90, decimals=3), + "Swing_135": np.around(self.swing135, decimals=3), + "Instrument matrix": inst_mat, + } + ) + + with open(self.meta_file, "w") as metafile: + json.dump(metadata, metafile, indent=1) + + def _add_colorbar(self, mappable): + last_axes = plt.gca() + ax = mappable.axes + fig = ax.figure + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + cbar = fig.colorbar(mappable, cax=cax) + plt.sca(last_axes) + return cbar + + def _capture_state(self, state: str, n_avg: int): + """Set the LCs to a certain state, then snap and average over a number of images. + + Parameters + ---------- + state : str + Name of the LC config, e.g. `"State0"` + n_avg : int + Number of images to capture and average + + Returns + ------- + ndarray + Average of N images + """ + with suspend_live_sm(self.snap_manager) as sm: + set_lc_state(self.mmc, self.group, state) + imgs = [] + for i in range(n_avg): + imgs.append(snap_and_get_image(sm)) + return np.mean(imgs, axis=0) + + def _plot_bg_images(self, imgs): + img_names = ( + ["Extinction", "0", "60", "120"] + if len(imgs) == 4 + else ["Extinction", "0", "45", "90", 135] + ) + fig, ax = ( + plt.subplots(2, 2, figsize=(20, 20)) + if len(imgs) == 4 + else plt.subplots(3, 2, figsize=(20, 20)) + ) + + img_idx = 0 + for ax1 in range(len(ax[:, 0])): + for ax2 in range(len(ax[0, :])): + if img_idx < len(imgs): + im = ax[ax1, ax2].imshow(imgs[img_idx], "gray") + ax[ax1, ax2].set_title(img_names[img_idx]) + self._add_colorbar(im) + else: + try: + fig.delaxes(ax[2, 1]) + except: + break + plt.show() + + @property + def pol_states(self): + """The polarization states of this calibration. + + Returns + ------- + tuple + Names of all the polarization states. + + Raises + ------ + ValueError + Found illegal calibration state. + """ + if self.calib_scheme == "4-State": + pols = ("ext", "0", "60", "120") + elif self.calib_scheme == "5-State": + pols = ("ext", "0", "45", "90", "135") + else: + raise ValueError( + f"Invalid calibration state: {self.calib_scheme}." + ) + return pols + + @property + def lc_states(self): + """The optimized LC retardance values of this calibration. + + Returns + ------- + dict + `Dict{"LCA": List[ext, ...], "LCB": List[ext, ...]}` + """ + lc_sides = ["A", "B"] + return { + f"LC{lc_side}": [ + self.__getattribute__("lc" + lc_side.lower() + "_" + pol) + for pol in self.pol_states + ] + for lc_side in lc_sides + } + + def capture_bg(self, n_avg, directory): + """ " + This function will capture an image at every state + and save to specified directory + This may throw errors depending on the Micro-Manager config file-- + modify 'State_' to match to the corresponding channel preset in config + :param: n_states (int) + Number of states used for calibration + :param: directory (string) + Directory to save images + """ + + if not os.path.exists(directory): + os.makedirs(directory) + + logging.info("Capturing Background") + self._auto_shutter_state = self.mmc.getAutoShutter() + self._shutter_state = self.mmc.getShutterOpen() + self.mmc.setAutoShutter(False) + self.open_shutter() + + num_states = int(self.calib_scheme[0]) + + # Acquire background data + yx_list = [] + for channel in range(num_states): + logging.debug(f"Capturing Background State{channel}") + yx_list.append(self._capture_state(f"State{channel}", n_avg)) + logging.debug(f"Saving Background State{channel}") + cyx_data = np.array(yx_list) + yx_scale = self.mmc.getPixelSizeUm() + + # Save to zarr + with open_ome_zarr( + os.path.join(directory, "background.zarr"), + layout="hcs", + mode="w", + channel_names=[f"State{i}" for i in range(num_states)], + ) as dataset: + position = dataset.create_position("0", "0", "0") + position.create_zeros( + name="0", + shape=(1, num_states, 1, cyx_data.shape[1], cyx_data.shape[2]), + dtype=np.float32, + chunks=(1, 1, 1, cyx_data.shape[1], cyx_data.shape[2]), + transform=[ + TransformationMeta( + type="scale", scale=[1, 1, 1, yx_scale, yx_scale] + ) + ], + ) + position["0"][0, :, 0] = cyx_data # save to 1C1YX array + + # self._plot_bg_images(np.asarray(imgs)) + self.reset_shutter() + + return cyx_data + + +class CalibrationData: + """ + Interpolates LC calibration data between retardance (in waves), voltage (in mV), and wavelength (in nm) + """ + + def __init__(self, path, wavelength=532, interp_method="linear"): + """ + + Parameters + ---------- + path : str + path to .csv calibration data file + wavelength : int + usage wavelength, in nanometers + interp_method : str + interpolation method, either "linear" or "schnoor_fit" (https://doi.org/10.1364/AO.408383) + """ + + header, raw_data = self.read_data(path) + self.calib_wavelengths = np.array( + [i[:3] for i in header[1::3]] + ).astype("double") + + self.wavelength = None + self.V_min = 0.0 + self.V_max = 20.0 + + if interp_method in ["linear", "schnoor_fit"]: + self.interp_method = interp_method + else: + raise ValueError("Unknown interpolation method.") + + self.set_wavelength(wavelength) + if interp_method == "linear": + self.interpolate_data( + raw_data, self.calib_wavelengths + ) # calib_wavelengths is not used, values hardcoded + elif interp_method == "schnoor_fit": + self.fit_params = self.fit_data(raw_data, self.calib_wavelengths) + + self.ret_min = self.get_retardance(self.V_max) + self.ret_max = self.get_retardance(self.V_min) + + @staticmethod + def read_data(path): + """ + Read raw calibration data + + Example calibration data format: + + Voltage(mv),490-A,490-B,Voltage(mv),546-A,546-B,Voltage(mv),630-A,630-B + -,-,-,-,-,-,-,-,- + 0,490,490,0,546,546,0,630,630 + 0,970.6205,924.4288,0,932.2446,891.2008,0,899.6626,857.2885 + 200,970.7488,924.4422,200,932.2028,891.1546,200,899.5908,857.3078 + ... + 20000,40.5954,40.4874,20000,38.6905,39.5402,20000,35.5043,38.1445 + -,-,-,-,-,-,-,-,- + + The first row of the CSV file is a header row, structured as [Voltage (mV), XXX-A, XXX-B, + Voltage (nm), XXX-A, XXX-B, ...] where XXX is the calibration wavelength in nanometers. For example 532-A would + contain measurements of the retardance of LCA as a function of applied voltage at 532 nm. The second row + contains dashes in every column. The third row contains "0" in the Voltage column and the calibration wavelength + in the retardance columns, e.g [0, 532, 532]. The following rows contain the LC calibration data. Retardance is + recorded in nanometers and voltage is recorded in millivolts. The last row contains dashes in every column. + + Parameters + ---------- + path : str + path to .csv calibration data file + + Returns + ------- + header : list + Calibration data file header line. Contains information on calibration wavelength + raw_data : ndarray + Calibration data. Voltage is in millivolts and retardance is in nanometers + + """ + with open(path, "r") as f: + header = f.readline().strip().split(",") + + raw_data = np.loadtxt(path, delimiter=",", comments="-", skiprows=3) + return header, raw_data + + @staticmethod + def schnoor_fit(V, a, b1, b2, c, d, e, wavelength): + """ + + Parameters + ---------- + V : float + Voltage in volts + a, b1, b2, c, d, e : float + Fit parameters + wavelength : float + Wavelength in nanometers + + Returns + ------- + retardance : float + Retardance in nanometers + + """ + retardance = a + (b1 + b2 / wavelength**2) / (1 + (V / c) ** d) ** e + + return retardance + + @staticmethod + def schnoor_fit_inv(retardance, a, b1, b2, c, d, e, wavelength): + """ + + Parameters + ---------- + retardance : float + Retardance in nanometers + a, b1, b2, c, d, e : float + Fit parameters + wavelength : float + Wavelength in nanometers + + Returns + ------- + voltage : float + Voltage in volts + + """ + + voltage = c * ( + ((b1 + b2 / wavelength**2) / (retardance - a)) ** (1 / e) - 1 + ) ** (1 / d) + + return voltage + + @staticmethod + def _fun(x, wavelengths, xdata, ydata): + fval = CalibrationData.schnoor_fit(xdata, *x, wavelengths) + res = ydata - fval + return res.flatten() + + def set_wavelength(self, wavelength): + if ( + len(self.calib_wavelengths) == 1 + and wavelength != self.calib_wavelengths + ): + raise ValueError( + "Calibration is not provided at this wavelength. " + "Wavelength dependence of LC retardance vs voltage cannot be extrapolated." + ) + + if ( + wavelength < self.calib_wavelengths.min() + or wavelength > self.calib_wavelengths.max() + ): + warnings.warn( + "Specified wavelength is outside of the calibration range. " + "LC retardance vs voltage data will be extrapolated at this wavelength." + ) + + self.wavelength = wavelength + if self.interp_method == "linear": + # Interpolation of calib beyond this range produce strange results. + if self.wavelength < 450: + self.wavelength = 450 + warnings.warn( + "Wavelength is limited to 450-720 nm for this interpolation method." + ) + if self.wavelength > 720: + self.wavelength = 720 + warnings.warn( + "Wavelength is limited to 450-720 nm for this interpolation method." + ) + + def fit_data(self, raw_data, calib_wavelengths): + """ + Perform Schnoor fit on interpolation data + + Parameters + ---------- + raw_data : np.array + LC calibration data in (Voltage, LCA retardance, LCB retardance) format. Only the LCA retardance vs voltage + curve is used. + calib_wavelengths : 1D np.array + Calibration wavelength for each (Voltage, LCA retardance, LCB retardance) set in the calibration data + + Returns + ------- + + """ + xdata = raw_data[:, 0::3] / 1000 # convert to volts + ydata = raw_data[:, 1::3] # in nanometers + + x0 = [10, 1000, 1e7, 1, 10, 0.1] + p = least_squares( + self._fun, + x0, + method="trf", + args=(calib_wavelengths, xdata, ydata), + bounds=((-np.inf, 0, 0, 0, 0, 0), (np.inf,) * 6), + x_scale=[10, 1000, 1e7, 1, 10, 0.1], + ) + + if not p.success: + raise RuntimeError("Schnoor fit to calibration data did not work.") + + y = ydata.flatten() + y_hat = y - p.fun + slope, intercept, r_value, *_ = linregress(y, y_hat) + r_squared = r_value**2 + if r_squared < 0.999: + warnings.warn( + f"Schnoor fit has R2 value of {r_squared:.5f}, fit may not have worked well." + ) + + return p.x + + def interpolate_data(self, raw_data, calib_wavelengths): + """ + Perform linear interpolation of LC calibration data + + Parameters + ---------- + raw_data : np.array + LC calibration data in (Voltage, LCA retardance, LCB retardance) format. Only the LCA retardance vs voltage + curve is used. + calib_wavelengths : 1D np.array + Calibration wavelength for each (Voltage, LCA retardance, LCB retardance) set in the calibration data + These values are not used in this method. Instead, the [490, 546, 630] wavelengths are hardcoded. + + Returns + ------- + + """ + # 0V to 20V step size 1 mV + x_range = np.arange(0, np.max(raw_data[:, ::3]), 1) + + # interpolate calib - only LCA data is used + spline490 = interp1d(raw_data[:, 0], raw_data[:, 1]) + spline546 = interp1d(raw_data[:, 3], raw_data[:, 4]) + spline630 = interp1d(raw_data[:, 6], raw_data[:, 7]) + + if self.wavelength < 490: + new_a1_y = np.interp(x_range, x_range, spline490(x_range)) + new_a2_y = np.interp(x_range, x_range, spline546(x_range)) + + wavelength_new = 490 + (490 - self.wavelength) + fact1 = np.abs(490 - wavelength_new) / (546 - 490) + fact2 = np.abs(546 - wavelength_new) / (546 - 490) + + temp_curve = np.asarray( + [ + [ + i, + 2 * new_a1_y[i] + - (fact1 * new_a1_y[i] + fact2 * new_a2_y[i]), + ] + for i in range(len(new_a1_y)) + ] + ) + self.spline = interp1d(temp_curve[:, 0], temp_curve[:, 1]) + self.curve = self.spline(x_range) + + elif self.wavelength > 630: + new_a1_y = np.interp(x_range, x_range, spline546(x_range)) + new_a2_y = np.interp(x_range, x_range, spline630(x_range)) + + wavelength_new = 630 + (630 - self.wavelength) + fact1 = np.abs(630 - wavelength_new) / (630 - 546) + fact2 = np.abs(546 - wavelength_new) / (630 - 546) + + temp_curve = np.asarray( + [ + [ + i, + 2 * new_a1_y[i] + - (fact1 * new_a1_y[i] + fact2 * new_a2_y[i]), + ] + for i in range(len(new_a1_y)) + ] + ) + self.spline = interp1d(temp_curve[:, 0], temp_curve[:, 1]) + self.curve = self.spline(x_range) + + elif 490 < self.wavelength < 546: + new_a1_y = np.interp(x_range, x_range, spline490(x_range)) + new_a2_y = np.interp(x_range, x_range, spline546(x_range)) + + fact1 = np.abs(490 - self.wavelength) / (546 - 490) + fact2 = np.abs(546 - self.wavelength) / (546 - 490) + + temp_curve = np.asarray( + [ + [i, fact1 * new_a1_y[i] + fact2 * new_a2_y[i]] + for i in range(len(new_a1_y)) + ] + ) + self.spline = interp1d(temp_curve[:, 0], temp_curve[:, 1]) + self.curve = self.spline(x_range) + + elif 546 < self.wavelength < 630: + new_a1_y = np.interp(x_range, x_range, spline546(x_range)) + new_a2_y = np.interp(x_range, x_range, spline630(x_range)) + + fact1 = np.abs(546 - self.wavelength) / (630 - 546) + fact2 = np.abs(630 - self.wavelength) / (630 - 546) + + temp_curve = np.asarray( + [ + [i, fact1 * new_a1_y[i] + fact2 * new_a2_y[i]] + for i in range(len(new_a1_y)) + ] + ) + self.spline = interp1d(temp_curve[:, 0], temp_curve[:, 1]) + self.curve = self.spline(x_range) + + elif self.wavelength == 490: + self.curve = spline490(x_range) + self.spline = spline490 + + elif self.wavelength == 546: + self.curve = spline546(x_range) + self.spline = spline546 + + elif self.wavelength == 630: + self.curve = spline630(x_range) + self.spline = spline630 + + else: + raise ValueError(f"Wavelength {self.wavelength} not understood") + + def get_voltage(self, retardance): + """ + + Parameters + ---------- + retardance : float + retardance in waves + + Returns + ------- + voltage + voltage in volts + + """ + + retardance = np.asarray(retardance, dtype="double") + voltage = None + ret_nanometers = retardance * self.wavelength + + if retardance < self.ret_min: + voltage = self.V_max + elif retardance > self.ret_max: + voltage = self.V_min + else: + if self.interp_method == "linear": + voltage = np.abs(self.curve - ret_nanometers).argmin() / 1000 + elif self.interp_method == "schnoor_fit": + voltage = self.schnoor_fit_inv( + ret_nanometers, *self.fit_params, self.wavelength + ) + + return voltage + + def get_retardance(self, volts): + """ + + Parameters + ---------- + volts : float + voltage in volts + + Returns + ------- + retardance : float + retardance in waves + + """ + + volts = np.asarray(volts, dtype="double") + ret_nanometers = None + + if volts < self.V_min: + volts = self.V_min + elif volts >= self.V_max: + if self.interp_method == "linear": + volts = ( + self.V_max - 1e-3 + ) # interpolation breaks down at upper boundary + else: + volts = self.V_max + + if self.interp_method == "linear": + ret_nanometers = self.spline(volts * 1000) + elif self.interp_method == "schnoor_fit": + ret_nanometers = self.schnoor_fit( + volts, *self.fit_params, self.wavelength + ) + retardance = ret_nanometers / self.wavelength + + return retardance diff --git a/waveorder/calib/Optimization.py b/waveorder/calib/Optimization.py new file mode 100644 index 00000000..ec636610 --- /dev/null +++ b/waveorder/calib/Optimization.py @@ -0,0 +1,470 @@ +import logging + +import numpy as np +from scipy import optimize + + +class BrentOptimizer: + def __init__(self, calib): + self.calib = calib + + def _check_bounds(self, lca_bound, lcb_bound): + current_lca = self.calib.get_lc("LCA") + current_lcb = self.calib.get_lc("LCB") + + # check that bounds don't exceed range of LC + lca_lower_bound = ( + 0.01 + if (current_lca - lca_bound) <= 0.01 + else current_lca - lca_bound + ) + lca_upper_bound = ( + 1.6 + if (current_lca + lca_bound) >= 1.6 + else current_lca + lca_bound + ) + + lcb_lower_bound = ( + 0.01 + if current_lcb - lcb_bound <= 0.01 + else current_lcb - lcb_bound + ) + lcb_upper_bound = ( + 1.6 if current_lcb + lcb_bound >= 1.6 else current_lcb + lcb_bound + ) + + return ( + lca_lower_bound, + lca_upper_bound, + lcb_lower_bound, + lcb_upper_bound, + ) + + def opt_lca( + self, + cost_function, + lower_bound, + upper_bound, + reference, + cost_function_args, + ): + xopt, fval, ierr, numfunc = optimize.fminbound( + cost_function, + x1=lower_bound, + x2=upper_bound, + disp=0, + args=cost_function_args, + full_output=True, + ) + + lca = xopt + lcb = self.calib.get_lc(self.calib.mmc, self.calib.PROPERTIES["LCA"]) + abs_intensity = fval + reference + difference = fval / reference * 100 + + logging.debug("\tOptimize lca ...") + logging.debug(f"\tlca = {lca:.5f}") + logging.debug(f"\tlcb = {lcb:.5f}") + logging.debug(f"\tIntensity = {abs_intensity}") + logging.debug(f"\tIntensity Difference = {difference:.4f}%") + + return [lca, lcb, abs_intensity, difference] + + def opt_lcb( + self, + cost_function, + lower_bound, + upper_bound, + reference, + cost_function_args, + ): + xopt, fval, ierr, numfunc = optimize.fminbound( + cost_function, + x1=lower_bound, + x2=upper_bound, + disp=0, + args=cost_function_args, + full_output=True, + ) + + lca = self.calib.get_lc(self.calib.mmc, self.calib.PROPERTIES["LCA"]) + lcb = xopt + abs_intensity = fval + reference + difference = fval / reference * 100 + + logging.debug("\tOptimize lcb ...") + logging.debug(f"\tlca = {lca:.5f}") + logging.debug(f"\tlcb = {lcb:.5f}") + logging.debug(f"\tIntensity = {abs_intensity}") + logging.debug(f"\tIntensity Difference = {difference:.4f}%") + + return [lca, lcb, abs_intensity, difference] + + def optimize(self, state, lca_bound, lcb_bound, reference, thresh, n_iter): + converged = False + iteration = 1 + self.calib.inten = [] + optimal = [] + + while not converged: + logging.debug(f"iteration: {iteration}") + + ( + lca_lower_bound, + lca_upper_bound, + lcb_lower_bound, + lcb_upper_bound, + ) = self._check_bounds(lca_bound, lcb_bound) + + if state == "ext": + results_lca = self.opt_lca( + self.calib.opt_lc, + lca_lower_bound, + lca_upper_bound, + reference, + (self.calib.PROPERTIES["LCA"], reference), + ) + + self.calib.set_lc(self.calib.mmc, results_lca[0], "LCA") + + optimal.append(results_lca) + + results_lcb = self.opt_lcb( + self.calib.opt_lc, + lcb_lower_bound, + lcb_upper_bound, + reference, + (self.calib.PROPERTIES["LCB"], reference), + ) + + self.calib.set_lc(self.calib.mmc, results_lca[1], "LCB") + + optimal.append(results_lcb) + + results = results_lcb + + if state == "45" or state == "135": + results = self.opt_lcb( + self.calib.opt_lc, + lca_lower_bound, + lca_upper_bound, + reference, + (self.calib.PROPERTIES["LCB"], reference), + ) + + optimal.append(results) + + if state == "60": + results = self.opt_lca( + self.calib.opt_lc_cons, + lca_lower_bound, + lca_upper_bound, + reference, + (reference, "60"), + ) + + swing = (self.calib.lca_ext - results[0]) * self.calib.ratio + lca = results[0] + lcb = self.calib.lcb_ext + swing + + optimal.append([lca, lcb, results[2], results[3]]) + + if state == "90": + results = self.opt_lca( + self.calib.opt_lc, + lca_lower_bound, + lca_upper_bound, + reference, + (self.calib.PROPERTIES["LCA"], reference), + ) + + optimal.append(results) + + if state == "120": + results = self.opt_lca( + self.calib.opt_lc_cons, + lca_lower_bound, + lca_upper_bound, + reference, + (reference, "120"), + ) + + swing = (self.calib.lca_ext - results[0]) * self.calib.ratio + lca = results[0] + lcb = self.calib.lcb_ext - swing + + optimal.append([lca, lcb, results[2], results[3]]) + + # if both LCA and LCB meet threshold, stop + if results[3] <= thresh: + converged = True + optimal = np.asarray(optimal) + + return optimal[-1, 0], optimal[-1, 1], optimal[-1, 2] + + # if loop preforms more than n_iter iterations, stop + elif iteration >= n_iter: + logging.debug( + f"Exceeded {n_iter} Iterations: Search discontinuing" + ) + + converged = True + optimal = np.asarray(optimal) + opt = np.where(optimal == np.min(np.abs(optimal[:, 0])))[0] + + logging.debug( + f"Lowest Inten: {optimal[opt, 0]}, lca = {optimal[opt, 1]}, lcb = {optimal[opt, 2]}" + ) + + return optimal[-1, 0], optimal[-1, 1], optimal[-1, 2] + + iteration += 1 + + +class MinScalarOptimizer: + def __init__(self, calib): + self.calib = calib + + def _check_bounds(self, lca_bound, lcb_bound): + current_lca = self.calib.get_lc("LCA") + current_lcb = self.calib.get_lc("LCB") + + if self.calib.mode == "voltage": + # check that bounds don't exceed range of LC + lca_lower_bound = ( + 0.01 + if (current_lca - lca_bound) <= 0.01 + else current_lca - lca_bound + ) + lca_upper_bound = ( + 2.2 + if (current_lca + lca_bound) >= 2.2 + else current_lca + lca_bound + ) + + lcb_lower_bound = ( + 0.01 + if current_lcb - lcb_bound <= 0.01 + else current_lcb - lcb_bound + ) + lcb_upper_bound = ( + 2.2 + if current_lcb + lcb_bound >= 2.2 + else current_lcb + lcb_bound + ) + + else: + # check that bounds don't exceed range of LC + lca_lower_bound = ( + 0.01 + if (current_lca - lca_bound) <= 0.01 + else current_lca - lca_bound + ) + lca_upper_bound = ( + 1.6 + if (current_lca + lca_bound) >= 1.6 + else current_lca + lca_bound + ) + + lcb_lower_bound = ( + 0.01 + if current_lcb - lcb_bound <= 0.01 + else current_lcb - lcb_bound + ) + lcb_upper_bound = ( + 1.6 + if current_lcb + lcb_bound >= 1.6 + else current_lcb + lcb_bound + ) + + return ( + lca_lower_bound, + lca_upper_bound, + lcb_lower_bound, + lcb_upper_bound, + ) + + def opt_lca( + self, + cost_function, + lower_bound, + upper_bound, + reference, + cost_function_args, + ): + res = optimize.minimize_scalar( + cost_function, + bounds=(lower_bound, upper_bound), + method="bounded", + args=cost_function_args, + ) + + lca = res.x + lcb = self.calib.get_lc("LCB") + abs_intensity = res.fun + reference + difference = res.fun / reference * 100 + + logging.debug("\tOptimize lca ...") + logging.debug(f"\tlca = {lca:.5f}") + logging.debug(f"\tlcb = {lcb:.5f}") + logging.debug(f"\tIntensity = {abs_intensity}") + logging.debug(f"\tIntensity Difference = {difference:.4f}%") + + return [lca, lcb, abs_intensity, difference] + + def opt_lcb( + self, + cost_function, + lower_bound, + upper_bound, + reference, + cost_function_args, + ): + res = optimize.minimize_scalar( + cost_function, + bounds=(lower_bound, upper_bound), + method="bounded", + args=cost_function_args, + ) + + lca = self.calib.get_lc("LCA") + lcb = res.x + abs_intensity = res.fun + reference + difference = res.fun / reference * 100 + + logging.debug("\tOptimize lcb ...") + logging.debug(f"\tlca = {lca:.5f}") + logging.debug(f"\tlcb = {lcb:.5f}") + logging.debug(f"\tIntensity = {abs_intensity}") + logging.debug(f"\tIntensity Difference = {difference:.4f}%") + + return [lca, lcb, abs_intensity, difference] + + def optimize( + self, state, lca_bound, lcb_bound, reference, thresh=None, n_iter=None + ): + ( + lca_lower_bound, + lca_upper_bound, + lcb_lower_bound, + lcb_upper_bound, + ) = self._check_bounds(lca_bound, lcb_bound) + + if state == "ext": + optimal = [] + + results_lca = self.opt_lca( + self.calib.opt_lc, + lca_lower_bound, + lca_upper_bound, + reference, + ("LCA", reference), + ) + + self.calib.set_lc(results_lca[0], "LCA") + + optimal.append(results_lca) + + results_lcb = self.opt_lcb( + self.calib.opt_lc, + lcb_lower_bound, + lcb_upper_bound, + reference, + ("LCB", reference), + ) + + self.calib.set_lc(results_lcb[1], "LCB") + + optimal.append(results_lcb) + + # ============BEGIN FINE SEARCH================= + + logging.debug(f"\n\tBeginning Finer Search\n") + lca_lower_bound = results_lcb[0] - 0.01 + lca_upper_bound = results_lcb[0] + 0.01 + lcb_lower_bound = results_lcb[1] - 0.01 + lcb_upper_bound = results_lcb[1] + 0.01 + + results_lca = self.opt_lca( + self.calib.opt_lc, + lca_lower_bound, + lca_upper_bound, + reference, + ("LCA", reference), + ) + + self.calib.set_lc(results_lca[0], "LCA") + + optimal.append(results_lca) + + results_lcb = self.opt_lcb( + self.calib.opt_lc, + lcb_lower_bound, + lcb_upper_bound, + reference, + ("LCB", reference), + ) + + self.calib.set_lc(results_lcb[1], "LCB") + + optimal.append(results_lcb) + + # Sometimes this optimization can drift away from the minimum, + # this makes sure we use the lowest iteration + optimal = np.asarray(optimal) + opt = np.where(optimal == np.min(optimal[:][2]))[0] + + lca = float(optimal[opt][0][0]) + lcb = float(optimal[opt][0][1]) + results = optimal[opt][0] + + if state == "45" or state == "135": + results = self.opt_lcb( + self.calib.opt_lc, + lcb_lower_bound, + lcb_upper_bound, + reference, + ("LCB", reference), + ) + + lca = results[0] + lcb = results[1] + + if state == "60": + results = self.opt_lca( + self.calib.opt_lc_cons, + lca_lower_bound, + lca_upper_bound, + reference, + ("LCA", reference, "60"), + ) + + swing = (self.calib.lca_ext - results[0]) * self.calib.ratio + lca = results[0] + lcb = self.calib.lcb_ext + swing + + if state == "90": + results = self.opt_lca( + self.calib.opt_lc, + lca_lower_bound, + lca_upper_bound, + reference, + ("LCA", reference), + ) + lca = results[0] + lcb = results[1] + + if state == "120": + results = self.opt_lca( + self.calib.opt_lc_cons, + lca_lower_bound, + lca_upper_bound, + reference, + ("LCB", reference, "120"), + ) + + swing = (self.calib.lca_ext - results[0]) * self.calib.ratio + lca = results[0] + lcb = self.calib.lcb_ext - swing + + return lca, lcb, results[2] diff --git a/waveorder/calib/__init__.py b/waveorder/calib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/waveorder/calib/calibration_workers.py b/waveorder/calib/calibration_workers.py new file mode 100644 index 00000000..35b148e1 --- /dev/null +++ b/waveorder/calib/calibration_workers.py @@ -0,0 +1,463 @@ +from __future__ import annotations + +import json +import logging +from pathlib import Path + +# type hint/check +from typing import TYPE_CHECKING + +import numpy as np +from iohub import open_ome_zarr +from napari.qt.threading import WorkerBase, WorkerBaseSignals, thread_worker +from qtpy.QtCore import Signal + +from waveorder.calib.Calibration import LC_DEVICE_NAME +from waveorder.cli import settings +from waveorder.cli.apply_inverse_transfer_function import ( + apply_inverse_transfer_function_cli, +) +from waveorder.cli.compute_transfer_function import ( + compute_transfer_function_cli, +) +from waveorder.io.core_functions import set_lc_state, snap_and_average +from waveorder.io.metadata_reader import MetadataReader +from waveorder.io.utils import MockEmitter, add_index_to_path, model_to_yaml + +# avoid runtime import error +if TYPE_CHECKING: + pass + + from waveorder.calib.Calibration import QLIPP_Calibration + from waveorder.plugin.main_widget import MainWidget + + +class CalibrationSignals(WorkerBaseSignals): + """ + Custom Signals class that includes napari native signals + """ + + progress_update = Signal(tuple) + extinction_update = Signal(str) + intensity_update = Signal(object) + calib_assessment = Signal(str) + calib_assessment_msg = Signal(str) + calib_file_emit = Signal(Path) + plot_sequence_emit = Signal(str) + lc_states = Signal(tuple) + aborted = Signal() + + +class BackgroundSignals(WorkerBaseSignals): + """ + Custom Signals class that includes napari native signals + """ + + bg_image_emitter = Signal(tuple) + bire_image_emitter = Signal(tuple) + bg_path_update_emitter = Signal(Path) + aborted = Signal() + + +class CalibrationWorkerBase(WorkerBase): + """ + Base class for creating calibration workers. + """ + + def __init_subclass__(cls, signals: WorkerBaseSignals): + """Called when creating calibration worker classes. + + Parameters + ---------- + signals : WorkerBaseSignals + Qt Signals class for the created worker class to send data across threads. + """ + super().__init_subclass__() + cls.signals = signals + + def __init__(self, calib_window: MainWidget, calib: QLIPP_Calibration): + """Initialize the worker object. + + Parameters + ---------- + calib_window : MainWidget + The waveorder plugin's main GUI widget object containing metadata input. + calib : QLIPP_Calibration + waveorder calibration backend object. + """ + super().__init__(SignalsClass=self.signals) + self.calib_window = calib_window + self.calib = calib + + def _check_abort(self): + """ + Called if the user presses the STOP button. + Needs to be checked after every major step to stop the process + """ + if self.abort_requested: + self.aborted.emit() + raise TimeoutError("Stop Requested.") + + def _write_meta_file(self, meta_file: str): + self.calib.meta_file = meta_file + self.calib.write_metadata( + notes=self.calib_window.ui.le_notes_field.text() + ) + + +class CalibrationWorker(CalibrationWorkerBase, signals=CalibrationSignals): + """ + Class to execute calibration + """ + + def __init__(self, calib_window, calib): + super().__init__(calib_window, calib) + + def work(self): + """ + Runs the full calibration algorithm and emits necessary signals. + """ + + self.plot_sequence_emit.emit("Coarse") + self.calib.intensity_emitter = self.intensity_update + self.calib.plot_sequence_emitter = self.plot_sequence_emit + self.progress_update.emit((1, "Calculating Blacklevel...")) + self._check_abort() + + logging.info("Calculating Black Level ...") + logging.debug("Calculating Black Level ...") + self.calib.close_shutter_and_calc_blacklevel() + + # Calculate Blacklevel + logging.info(f"Black Level: {self.calib.I_Black:.0f}\n") + logging.debug(f"Black Level: {self.calib.I_Black:.0f}\n") + + self._check_abort() + self.progress_update.emit((10, "Calibrating Extinction State...")) + + # Open shutter + self.calib.open_shutter() + + # Set LC Wavelength: + self.calib.set_wavelength(int(self.calib_window.wavelength)) + if self.calib_window.calib_mode == "MM-Retardance": + self.calib_window.mmc.setProperty( + LC_DEVICE_NAME, "Wavelength", self.calib_window.wavelength + ) + + self._check_abort() + + # Optimize States + ( + self._calibrate_4state() + if self.calib_window.calib_scheme == "4-State" + else self._calibrate_5state() + ) + + # Reset shutter autoshutter + self.calib.reset_shutter() + + # Calculate Extinction + extinction_ratio = self.calib.calculate_extinction( + self.calib.swing, + self.calib.I_Black, + self.calib.I_Ext, + self.calib.I_Elliptical, + ) + self._check_abort() + + # Update main GUI with extinction ratio + self.calib.extinction_ratio = extinction_ratio + self.extinction_update.emit(str(extinction_ratio)) + + # determine metadata filename + meta_file = ( + Path(self.calib_window.directory) / "calibration_metadata.txt" + ) + meta_file = add_index_to_path(meta_file) + + # Write Metadata + self._write_meta_file(meta_file) + self.calib_file_emit.emit(self.calib.meta_file) + self.progress_update.emit((100, "Finished")) + + self._check_abort() + + # Perform calibration assessment based on retardance values + self._assess_calibration() + + self._check_abort() + + # Emit calibrated LC states for plotting + self.lc_states.emit((self.calib.pol_states, self.calib.lc_states)) + + logging.info("\n=======Finished Calibration=======\n") + logging.info(f"EXTINCTION = {extinction_ratio:.2f}") + logging.debug("\n=======Finished Calibration=======\n") + logging.debug(f"EXTINCTION = {extinction_ratio:.2f}") + + def _calibrate_4state(self): + """ + Run through the 4-state calibration algorithm + """ + + search_radius = np.min((self.calib.swing / self.calib.ratio, 0.05)) + + self.calib.calib_scheme = "4-State" + + self._check_abort() + + # Optimize Extinction State + self.calib.opt_Iext() + + self._check_abort() + self.progress_update.emit((60, "Calibrating State 1...")) + + # Optimize first elliptical (reference) state + self.calib.opt_I0() + self.progress_update.emit((65, "Calibrating State 2...")) + + self._check_abort() + + # Optimize 60 deg state + self.calib.opt_I60(search_radius, search_radius) + self.progress_update.emit((75, "Calibrating State 3...")) + + self._check_abort() + + # Optimize 120 deg state + self.calib.opt_I120(search_radius, search_radius) + self.progress_update.emit((85, "Writing Metadata...")) + + self._check_abort() + + def _calibrate_5state(self): + search_radius = np.min((self.calib.swing, 0.05)) + + self.calib.calib_scheme = "5-State" + + # Optimize Extinction State + self.calib.opt_Iext() + self.progress_update.emit((50, "Calibrating State 1...")) + + self._check_abort() + + # Optimize First elliptical state + self.calib.opt_I0() + self.progress_update.emit((55, "Calibrating State 2...")) + + self._check_abort() + + # Optimize 45 deg state + self.calib.opt_I45(search_radius, search_radius) + self.progress_update.emit((65, "Calibrating State 3...")) + + self._check_abort() + + # Optimize 90 deg state + self.calib.opt_I90(search_radius, search_radius) + self.progress_update.emit((75, "Calibrating State 4...")) + + self._check_abort() + + # Optimize 135 deg state + self.calib.opt_I135(search_radius, search_radius) + self.progress_update.emit((85, "Writing Metadata...")) + + self._check_abort() + + def _assess_calibration(self): + """ + Assesses the quality of calibration based off retardance values. + Attempts to determine whether certain optical components are out of place. + """ + + if self.calib.extinction_ratio >= 100: + self.calib_assessment.emit("good") + self.calib_assessment_msg.emit("Successful Calibration") + elif 80 <= self.calib.extinction_ratio < 100: + self.calib_assessment.emit("okay") + self.calib_assessment_msg.emit( + "Successful Calibration, Okay Extinction Ratio" + ) + else: + self.calib_assessment.emit("bad") + message = ( + "Possibilities are: a) linear polarizer and LC are not oriented properly, " + "b) circular analyzer has wrong handedness, " + "c) the condenser is not setup for Kohler illumination, " + "d) a component, such as autofocus dichroic or sample chamber, distorts the polarization state" + ) + + self.calib_assessment_msg.emit("Poor Extinction. " + message) + + +class BackgroundCaptureWorker( + CalibrationWorkerBase, signals=BackgroundSignals +): + """ + Class to execute background capture. + """ + + def __init__(self, calib_window, calib): + super().__init__(calib_window, calib) + + def work(self): + # Make the background folder + bg_path = ( + Path(self.calib_window.directory) + / self.calib_window.ui.le_bg_folder.text() + ) + bg_path = add_index_to_path(bg_path) + bg_path.mkdir() + + self._check_abort() + + # capture and return background images + imgs = self.calib.capture_bg(self.calib_window.n_avg, bg_path) + + # build background-specific reconstruction settings + reconstruction_settings = settings.ReconstructionSettings( + input_channel_names=[ + f"State{i}" + for i in range(int(self.calib_window.calib_scheme[0])) + ], + reconstruction_dimension=2, + birefringence=settings.BirefringenceSettings( + transfer_function=settings.BirefringenceTransferFunctionSettings( + swing=self.calib_window.swing + ), + apply_inverse=settings.BirefringenceApplyInverseSettings( + wavelength_illumination=self.calib_window.recon_wavelength + / 1000, + background_path="", + remove_estimated_background=False, + flip_orientation=False, + rotate_orientation=False, + ), + ), + ) + + reconstruction_config_path = bg_path / "reconstruction_settings.yml" + model_to_yaml(reconstruction_settings, reconstruction_config_path) + + input_data_path = bg_path / "background.zarr" / "0" / "0" / "0" + transfer_function_path = bg_path / "transfer_function.zarr" + reconstruction_path = bg_path / "reconstruction.zarr" + + compute_transfer_function_cli( + input_position_dirpath=input_data_path, + config_filepath=reconstruction_config_path, + output_dirpath=transfer_function_path, + ) + + apply_inverse_transfer_function_cli( + input_position_dirpaths=[input_data_path], + transfer_function_dirpath=transfer_function_path, + config_filepath=reconstruction_config_path, + output_dirpath=reconstruction_path, + ) + + # Load reconstructions from file for layers + with open_ome_zarr(reconstruction_path, mode="r") as dataset: + self.retardance = dataset["0/0/0/0"][0, 0, 0] + self.birefringence = dataset["0/0/0/0"][0, :, 0] + scale = dataset["0/0/0"].scale + + # Save metadata file and emit imgs + meta_file = bg_path / "polarization_calibration.txt" + self._write_meta_file(meta_file) + + # Update last calibration file + note = self.calib_window.ui.le_notes_field.text() + + with open(self.calib_window.last_calib_meta_file, "r") as file: + current_json = json.load(file) + + old_note = current_json["Notes"] + if old_note is None or old_note == "" or old_note == note: + current_json["Notes"] = note + else: + current_json["Notes"] = old_note + ", " + note + + with open(self.calib_window.last_calib_meta_file, "w") as file: + json.dump(current_json, file, indent=1) + + self._check_abort() + + # Emit background images + background birefringence + self.bg_image_emitter.emit((imgs, scale)) + self.bire_image_emitter.emit( + ((self.retardance, self.birefringence[1]), scale) + ) + + # Emit bg path + self.bg_path_update_emitter.emit(bg_path) + + +@thread_worker +def load_calibration(calib, metadata: MetadataReader): + """ + Sets MM properties based upon calibration metadata file + + + Parameters + ---------- + calib: (object) waveorder Calibration Class + metadata: (object) MetadataReader instance + + Returns + ------- + calib (object) updated waveorder Calibration Class + """ + calib.calib_scheme = metadata.Calibration_scheme + + def _set_calib_attrs(calib, metadata): + """Set the retardance attributes in the waveorder Calibration object""" + if calib.calib_scheme == "4-State": + lc_states = ["ext", "0", "60", "120"] + elif calib.calib_scheme == "5-State": + lc_states = ["ext", "0", "45", "90", "135"] + else: + raise ValueError( + "Invalid calibration scheme in metadata: {calib.calib_scheme}" + ) + for side in ("A", "B"): + retardance_values = metadata.__getattribute__( + "LC" + side + "_retardance" + ) + for i, state in enumerate(lc_states): + # set the retardance value attribute (e.g. 'lca_0') + retardance_name = "lc" + side.lower() + "_" + state + setattr(calib, retardance_name, retardance_values[i]) + # set the swing value attribute (e.g. 'swing0') + if state != "ext": + swing_name = "swing" + state + setattr(calib, swing_name, metadata.Swing_measured[i - 1]) + + _set_calib_attrs(calib, metadata) + + for state, lca, lcb in zip( + [f"State{i}" for i in range(5)], + metadata.LCA_retardance, + metadata.LCB_retardance, + ): + calib.define_lc_state(state, lca, lcb) + + # Calculate black level after loading these properties + calib.intensity_emitter = MockEmitter() + calib.close_shutter_and_calc_blacklevel() + calib.open_shutter() + set_lc_state(calib.mmc, calib.group, "State0") + calib.I_Ext = snap_and_average(calib.snap_manager) + set_lc_state(calib.mmc, calib.group, "State1") + calib.I_Elliptical = snap_and_average(calib.snap_manager) + calib.reset_shutter() + + yield str( + calib.calculate_extinction( + calib.swing, calib.I_Black, calib.I_Ext, calib.I_Elliptical + ) + ) + + return calib diff --git a/waveorder/cli/apply_inverse_models.py b/waveorder/cli/apply_inverse_models.py new file mode 100644 index 00000000..52f72c3d --- /dev/null +++ b/waveorder/cli/apply_inverse_models.py @@ -0,0 +1,257 @@ +""" +This module converts GUI-level reconstruction calls into library calls +""" + +import numpy as np +import torch + +from waveorder.models import ( + inplane_oriented_thick_pol3d, + isotropic_fluorescent_thick_3d, + isotropic_thin_3d, + phase_thick_3d, +) + + +def radians_to_nanometers(retardance_rad, wavelength_illumination_um): + """ + waveorder returns retardance in radians, while waveorder displays and saves + retardance in nanometers. This function converts from radians to nanometers + using the illumination wavelength (which is internally handled in um + in waveorder). + """ + return retardance_rad * wavelength_illumination_um * 1e3 / (2 * np.pi) + + +def birefringence( + czyx_data, + cyx_no_sample_data, + wavelength_illumination, + recon_dim, + biref_inverse_dict, + transfer_function_dataset, +): + # Load transfer function + intensity_to_stokes_matrix = torch.tensor( + transfer_function_dataset["intensity_to_stokes_matrix"][0, 0, 0] + ) + + # Apply reconstruction + # (retardance, orientation, transmittance, depolarization) + reconstructed_parameters = ( + inplane_oriented_thick_pol3d.apply_inverse_transfer_function( + czyx_data, + intensity_to_stokes_matrix, + cyx_no_sample_data=cyx_no_sample_data, + project_stokes_to_2d=(recon_dim == 2), + **biref_inverse_dict, + ) + ) + + # Convert retardance + retardance = radians_to_nanometers( + reconstructed_parameters[0], wavelength_illumination + ) + + return torch.stack((retardance,) + reconstructed_parameters[1:]) + + +def phase( + czyx_data, + recon_dim, + settings_phase, + transfer_function_dataset, +): + # [phase only, 2] + if recon_dim == 2: + # Load transfer functions + absorption_transfer_function = torch.tensor( + transfer_function_dataset["absorption_transfer_function"][0, 0] + ) + phase_transfer_function = torch.tensor( + transfer_function_dataset["phase_transfer_function"][0, 0] + ) + + # Apply + ( + _, + output, + ) = isotropic_thin_3d.apply_inverse_transfer_function( + czyx_data[0], + absorption_transfer_function, + phase_transfer_function, + **settings_phase.apply_inverse.dict(), + ) + + # [phase only, 3] + elif recon_dim == 3: + # Load transfer functions + real_potential_transfer_function = torch.tensor( + transfer_function_dataset["real_potential_transfer_function"][0, 0] + ) + imaginary_potential_transfer_function = torch.tensor( + transfer_function_dataset["imaginary_potential_transfer_function"][ + 0, 0 + ] + ) + + # Apply + output = phase_thick_3d.apply_inverse_transfer_function( + czyx_data[0], + real_potential_transfer_function, + imaginary_potential_transfer_function, + z_padding=settings_phase.transfer_function.z_padding, + **settings_phase.apply_inverse.dict(), + ) + + # Pad to CZYX + while output.ndim != 4: + output = torch.unsqueeze(output, 0) + + return output + + +def birefringence_and_phase( + czyx_data, + cyx_no_sample_data, + wavelength_illumination, + recon_dim, + biref_inverse_dict, + settings_phase, + transfer_function_dataset, +): + # Load birefringence transfer function + intensity_to_stokes_matrix = torch.tensor( + transfer_function_dataset["intensity_to_stokes_matrix"][0, 0, 0] + ) + + # [biref and phase, 2] + if recon_dim == 2: + # Load phase transfer functions + absorption_transfer_function = torch.tensor( + transfer_function_dataset["absorption_transfer_function"][0, 0] + ) + phase_transfer_function = torch.tensor( + transfer_function_dataset["phase_transfer_function"][0, 0] + ) + + # Apply + reconstructed_parameters_2d = ( + inplane_oriented_thick_pol3d.apply_inverse_transfer_function( + czyx_data, + intensity_to_stokes_matrix, + cyx_no_sample_data=cyx_no_sample_data, + project_stokes_to_2d=True, + **biref_inverse_dict, + ) + ) + + reconstructed_parameters_3d = ( + inplane_oriented_thick_pol3d.apply_inverse_transfer_function( + czyx_data, + intensity_to_stokes_matrix, + cyx_no_sample_data=cyx_no_sample_data, + project_stokes_to_2d=False, + **biref_inverse_dict, + ) + ) + + brightfield_3d = reconstructed_parameters_3d[2] + + ( + _, + yx_phase, + ) = isotropic_thin_3d.apply_inverse_transfer_function( + brightfield_3d, + absorption_transfer_function, + phase_transfer_function, + **settings_phase.apply_inverse.dict(), + ) + + # Convert retardance + retardance = radians_to_nanometers( + reconstructed_parameters_2d[0], wavelength_illumination + ) + + output = torch.stack( + (retardance,) + + reconstructed_parameters_2d[1:] + + (torch.unsqueeze(yx_phase, 0),) + ) # CZYX + + # [biref and phase, 3] + elif recon_dim == 3: + # Load phase transfer functions + intensity_to_stokes_matrix = torch.tensor( + transfer_function_dataset["intensity_to_stokes_matrix"][0, 0, 0] + ) + # Load transfer functions + real_potential_transfer_function = torch.tensor( + transfer_function_dataset["real_potential_transfer_function"][0, 0] + ) + imaginary_potential_transfer_function = torch.tensor( + transfer_function_dataset["imaginary_potential_transfer_function"][ + 0, 0 + ] + ) + + # Apply + reconstructed_parameters_3d = ( + inplane_oriented_thick_pol3d.apply_inverse_transfer_function( + czyx_data, + intensity_to_stokes_matrix, + cyx_no_sample_data=cyx_no_sample_data, + project_stokes_to_2d=False, + **biref_inverse_dict, + ) + ) + + brightfield_3d = reconstructed_parameters_3d[2] + + zyx_phase = phase_thick_3d.apply_inverse_transfer_function( + brightfield_3d, + real_potential_transfer_function, + imaginary_potential_transfer_function, + z_padding=settings_phase.transfer_function.z_padding, + **settings_phase.apply_inverse.dict(), + ) + + # Convert retardance + retardance = radians_to_nanometers( + reconstructed_parameters_3d[0], wavelength_illumination + ) + + # Save + output = torch.stack( + (retardance,) + reconstructed_parameters_3d[1:] + (zyx_phase,) + ) + return output + + +def fluorescence( + czyx_data, recon_dim, settings_fluorescence, transfer_function_dataset +): + # [fluo, 2] + if recon_dim == 2: + raise NotImplementedError + # [fluo, 3] + elif recon_dim == 3: + # Load transfer functions + optical_transfer_function = torch.tensor( + transfer_function_dataset["optical_transfer_function"][0, 0] + ) + + # Apply + output = ( + isotropic_fluorescent_thick_3d.apply_inverse_transfer_function( + czyx_data[0], + optical_transfer_function, + settings_fluorescence.transfer_function.z_padding, + **settings_fluorescence.apply_inverse.dict(), + ) + ) + # Pad to CZYX + while output.ndim != 4: + output = torch.unsqueeze(output, 0) + + return output diff --git a/waveorder/cli/apply_inverse_transfer_function.py b/waveorder/cli/apply_inverse_transfer_function.py new file mode 100644 index 00000000..368cf496 --- /dev/null +++ b/waveorder/cli/apply_inverse_transfer_function.py @@ -0,0 +1,437 @@ +import itertools +import os +import warnings +from functools import partial +from pathlib import Path +from typing import Final + +import click +import numpy as np +import submitit +import torch +import torch.multiprocessing as mp +from iohub import open_ome_zarr + +from waveorder.cli import apply_inverse_models, jobs_mgmt +from waveorder.cli.monitor import monitor_jobs +from waveorder.cli.parsing import ( + config_filepath, + input_position_dirpaths, + output_dirpath, + processes_option, + ram_multiplier, + transfer_function_dirpath, +) +from waveorder.cli.printing import echo_headline, echo_settings +from waveorder.cli.settings import ReconstructionSettings +from waveorder.cli.utils import ( + apply_inverse_to_zyx_and_save, + create_empty_hcs_zarr, +) +from waveorder.io import utils + +JM = jobs_mgmt.JobsManagement() + + +def _check_background_consistency( + background_shape, data_shape, input_channel_names +): + data_cyx_shape = (len(input_channel_names),) + data_shape[3:] + if background_shape != data_cyx_shape: + raise ValueError( + f"Background shape {background_shape} does not match data shape {data_cyx_shape}" + ) + + +def get_reconstruction_output_metadata(position_path: Path, config_path: Path): + # Get non-OME-Zarr plate-level metadata if it's available + plate_metadata = {} + try: + input_plate = open_ome_zarr( + position_path.parent.parent.parent, mode="r" + ) + plate_metadata = dict(input_plate.zattrs) + plate_metadata.pop("plate") + except RuntimeError: + warnings.warn( + "Position is not part of a plate...no plate metadata will be copied." + ) + + # Load the first position to infer dataset information + input_dataset = open_ome_zarr(str(position_path), mode="r") + T, _, Z, Y, X = input_dataset.data.shape + + settings = utils.yaml_to_model(config_path, ReconstructionSettings) + + # Simplify important settings names + recon_biref = settings.birefringence is not None + recon_phase = settings.phase is not None + recon_fluo = settings.fluorescence is not None + recon_dim = settings.reconstruction_dimension + + # Prepare output dataset + channel_names = [] + if recon_biref: + channel_names.append("Retardance") + channel_names.append("Orientation") + channel_names.append("BF") + channel_names.append("Pol") + if recon_phase: + if recon_dim == 2: + channel_names.append("Phase2D") + elif recon_dim == 3: + channel_names.append("Phase3D") + if recon_fluo: + fluor_name = settings.input_channel_names[0] + if recon_dim == 2: + channel_names.append(fluor_name + "_Density2D") + elif recon_dim == 3: + channel_names.append(fluor_name + "_Density3D") + + if recon_dim == 2: + output_z_shape = 1 + elif recon_dim == 3: + output_z_shape = input_dataset.data.shape[2] + + return { + "shape": (T, len(channel_names), output_z_shape, Y, X), + "chunks": (1, 1, 1, Y, X), + "scale": input_dataset.scale, + "channel_names": channel_names, + "dtype": np.float32, + "plate_metadata": plate_metadata, + } + + +def apply_inverse_transfer_function_single_position( + input_position_dirpath: Path, + transfer_function_dirpath: Path, + config_filepath: Path, + output_position_dirpath: Path, + num_processes, + output_channel_names: list[str], +) -> None: + echo_headline("\nStarting reconstruction...") + + # Load datasets + transfer_function_dataset = open_ome_zarr(transfer_function_dirpath) + input_dataset = open_ome_zarr(input_position_dirpath) + output_dataset = open_ome_zarr(output_position_dirpath, mode="r+") + + # Load config file + settings = utils.yaml_to_model(config_filepath, ReconstructionSettings) + + # Check input channel names + if not set(settings.input_channel_names).issubset( + input_dataset.channel_names + ): + raise ValueError( + f"Each of the input_channel_names = {settings.input_channel_names} in {config_filepath} must appear in the dataset {input_position_dirpath} which currently contains channel_names = {input_dataset.channel_names}." + ) + + # Find input channel indices + input_channel_indices = [] + for input_channel_name in settings.input_channel_names: + input_channel_indices.append( + input_dataset.channel_names.index(input_channel_name) + ) + + # Find output channel indices + output_channel_indices = [] + for output_channel_name in output_channel_names: + output_channel_indices.append( + output_dataset.channel_names.index(output_channel_name) + ) + + # Find time indices + if settings.time_indices == "all": + time_indices = range(input_dataset.data.shape[0]) + elif isinstance(settings.time_indices, list): + time_indices = settings.time_indices + elif isinstance(settings.time_indices, int): + time_indices = [settings.time_indices] + + # Check for invalid times + time_ubound = input_dataset.data.shape[0] - 1 + if np.max(time_indices) > time_ubound: + raise ValueError( + f"time_indices = {time_indices} includes a time index beyond the maximum index of the dataset = {time_ubound}" + ) + + # Simplify important settings names + recon_biref = settings.birefringence is not None + recon_phase = settings.phase is not None + recon_fluo = settings.fluorescence is not None + recon_dim = settings.reconstruction_dimension + + # Prepare birefringence parameters + if settings.birefringence is not None: + # settings.birefringence has more parameters than waveorder needs, + # so this section converts the settings to a dict and separates the + # waveorder parameters (biref_inverse_dict) from the waveorder + # parameters (cyx_no_sample_data, and wavelength_illumination) + biref_inverse_dict = settings.birefringence.apply_inverse.dict() + + # Resolve background path into array + background_path = biref_inverse_dict.pop("background_path") + if background_path != "": + cyx_no_sample_data = utils.load_background(background_path) + _check_background_consistency( + cyx_no_sample_data.shape, + input_dataset.data.shape, + settings.input_channel_names, + ) + else: + cyx_no_sample_data = None + + # Get illumination wavelength for retardance radians -> nanometers conversion + biref_wavelength_illumination = biref_inverse_dict.pop( + "wavelength_illumination" + ) + + # Prepare the apply_inverse_model_function and its arguments + + # [biref only] + if recon_biref and (not recon_phase): + echo_headline("Reconstructing birefringence with settings:") + echo_settings(settings.birefringence) + + # Setup parameters for apply_inverse_to_zyx_and_save + apply_inverse_model_function = apply_inverse_models.birefringence + apply_inverse_args = { + "cyx_no_sample_data": cyx_no_sample_data, + "wavelength_illumination": biref_wavelength_illumination, + "recon_dim": recon_dim, + "biref_inverse_dict": biref_inverse_dict, + "transfer_function_dataset": transfer_function_dataset, + } + + # [phase only] + if recon_phase and (not recon_biref): + echo_headline("Reconstructing phase with settings:") + echo_settings(settings.phase.apply_inverse) + + # Setup parameters for apply_inverse_to_zyx_and_save + apply_inverse_model_function = apply_inverse_models.phase + apply_inverse_args = { + "recon_dim": recon_dim, + "settings_phase": settings.phase, + "transfer_function_dataset": transfer_function_dataset, + } + + # [biref and phase] + if recon_biref and recon_phase: + echo_headline("Reconstructing birefringence and phase with settings:") + echo_settings(settings.birefringence.apply_inverse) + echo_settings(settings.phase.apply_inverse) + + # Setup parameters for apply_inverse_to_zyx_and_save + apply_inverse_model_function = ( + apply_inverse_models.birefringence_and_phase + ) + apply_inverse_args = { + "cyx_no_sample_data": cyx_no_sample_data, + "wavelength_illumination": biref_wavelength_illumination, + "recon_dim": recon_dim, + "biref_inverse_dict": biref_inverse_dict, + "settings_phase": settings.phase, + "transfer_function_dataset": transfer_function_dataset, + } + + # [fluo] + if recon_fluo: + echo_headline("Reconstructing fluorescence with settings:") + echo_settings(settings.fluorescence.apply_inverse) + + # Setup parameters for apply_inverse_to_zyx_and_save + apply_inverse_model_function = apply_inverse_models.fluorescence + apply_inverse_args = { + "recon_dim": recon_dim, + "settings_fluorescence": settings.fluorescence, + "transfer_function_dataset": transfer_function_dataset, + } + + # Make the partial function for apply inverse + partial_apply_inverse_to_zyx_and_save = partial( + apply_inverse_to_zyx_and_save, + apply_inverse_model_function, + input_dataset, + output_position_dirpath, + input_channel_indices, + output_channel_indices, + **apply_inverse_args, + ) + + # Multiprocessing logic + if num_processes > 1: + # Loop through T, processing and writing as we go + click.echo( + f"\nStarting multiprocess pool with {num_processes} processes" + ) + with mp.Pool(num_processes) as p: + p.starmap( + partial_apply_inverse_to_zyx_and_save, + itertools.product(time_indices), + ) + else: + for t_idx in time_indices: + partial_apply_inverse_to_zyx_and_save(t_idx) + + # Save metadata at position level + output_dataset.zattrs["settings"] = settings.dict() + + echo_headline(f"Closing {output_position_dirpath}\n") + output_dataset.close() + transfer_function_dataset.close() + input_dataset.close() + + echo_headline( + f"Recreate this reconstruction with:\n$ waveorder apply-inv-tf {input_position_dirpath} {transfer_function_dirpath} -c {config_filepath} -o {output_position_dirpath}" + ) + + +def apply_inverse_transfer_function_cli( + input_position_dirpaths: list[Path], + transfer_function_dirpath: Path, + config_filepath: Path, + output_dirpath: Path, + num_processes: int = 1, + ram_multiplier: float = 1.0, + unique_id: str = "", +) -> None: + output_metadata = get_reconstruction_output_metadata( + input_position_dirpaths[0], config_filepath + ) + create_empty_hcs_zarr( + store_path=output_dirpath, + position_keys=[p.parts[-3:] for p in input_position_dirpaths], + **output_metadata, + ) + # Initialize torch num of threads and interoeration operations + if num_processes > 1: + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + + # Estimate resources + with open_ome_zarr(input_position_dirpaths[0]) as input_dataset: + T, C, Z, Y, X = input_dataset["0"].shape + + settings = utils.yaml_to_model(config_filepath, ReconstructionSettings) + gb_ram_request = 0 + gb_per_element = 4 / 2**30 # bytes_per_float32 / bytes_per_gb + voxel_resource_multiplier = 4 + fourier_resource_multiplier = 32 + input_memory = Z * Y * X * gb_per_element + if settings.birefringence is not None: + gb_ram_request += input_memory * voxel_resource_multiplier + if settings.phase is not None: + gb_ram_request += input_memory * fourier_resource_multiplier + if settings.fluorescence is not None: + gb_ram_request += input_memory * fourier_resource_multiplier + + gb_ram_request = np.ceil( + np.max([1, ram_multiplier * gb_ram_request]) + ).astype(int) + cpu_request = np.min([32, num_processes]) + num_jobs = len(input_position_dirpaths) + + # Prepare and submit jobs + echo_headline( + f"Preparing {num_jobs} job{'s, each with' if num_jobs > 1 else ' with'} " + f"{cpu_request} CPU{'s' if cpu_request > 1 else ''} and " + f"{gb_ram_request} GB of memory per CPU." + ) + + name_without_ext = os.path.splitext(Path(output_dirpath).name)[0] + executor_folder = os.path.join( + Path(output_dirpath).parent.absolute(), name_without_ext + "_logs" + ) + executor = submitit.AutoExecutor(folder=Path(executor_folder)) + + executor.update_parameters( + slurm_array_parallelism=np.min([50, num_jobs]), + slurm_mem_per_cpu=f"{gb_ram_request}G", + slurm_cpus_per_task=cpu_request, + slurm_time=60, + slurm_partition="cpu", + timeout_min=jobs_mgmt.JOBS_TIMEOUT, + # more slurm_*** resource parameters here + ) + + jobs = [] + with executor.batch(): + for input_position_dirpath in input_position_dirpaths: + job: Final = executor.submit( + apply_inverse_transfer_function_single_position, + input_position_dirpath, + transfer_function_dirpath, + config_filepath, + output_dirpath / Path(*input_position_dirpath.parts[-3:]), + num_processes, + output_metadata["channel_names"], + ) + jobs.append(job) + echo_headline( + f"{num_jobs} job{'s' if num_jobs > 1 else ''} submitted {'locally' if executor.cluster == 'local' else 'via ' + executor.cluster}." + ) + + doPrint = True # CLI prints Job status when used as cmd line + if ( + unique_id != "" + ): # no unique_id means no job submission info being listened to + JM.start_client() + i = 0 + for j in jobs: + job: submitit.Job = j + job_idx: str = job.job_id + position = input_position_dirpaths[i] + JM.put_Job_in_list( + job, + unique_id, + str(job_idx), + position, + str(executor.folder.absolute()), + ) + i += 1 + JM.send_data_thread() + JM.set_shorter_timeout() + doPrint = False # CLI printing disabled when using GUI + + monitor_jobs(jobs, input_position_dirpaths, doPrint) + + +@click.command() +@input_position_dirpaths() +@transfer_function_dirpath() +@config_filepath() +@output_dirpath() +@processes_option(default=1) +@ram_multiplier() +def apply_inv_tf( + input_position_dirpaths: list[Path], + transfer_function_dirpath: Path, + config_filepath: Path, + output_dirpath: Path, + num_processes, + ram_multiplier: float = 1.0, +) -> None: + """ + Apply an inverse transfer function to a dataset using a configuration file. + + Applies a transfer function to all positions in the list `input-position-dirpaths`, + so all positions must have the same TCZYX shape. + + Appends channels to ./output.zarr, so multiple reconstructions can fill a single store. + + See /examples for example configuration files. + + >> waveorder apply-inv-tf -i ./input.zarr/*/*/* -t ./transfer-function.zarr -c /examples/birefringence.yml -o ./output.zarr + """ + apply_inverse_transfer_function_cli( + input_position_dirpaths, + transfer_function_dirpath, + config_filepath, + output_dirpath, + num_processes, + ram_multiplier, + ) diff --git a/waveorder/cli/compute_transfer_function.py b/waveorder/cli/compute_transfer_function.py new file mode 100644 index 00000000..992301b8 --- /dev/null +++ b/waveorder/cli/compute_transfer_function.py @@ -0,0 +1,236 @@ +from pathlib import Path + +import click +import numpy as np +from iohub.ngff import Position, open_ome_zarr + +from waveorder.cli.parsing import ( + config_filepath, + input_position_dirpaths, + output_dirpath, +) +from waveorder.cli.printing import echo_headline, echo_settings +from waveorder.cli.settings import ReconstructionSettings +from waveorder.io import utils +from waveorder.models import ( + inplane_oriented_thick_pol3d, + isotropic_fluorescent_thick_3d, + isotropic_thin_3d, + phase_thick_3d, +) + + +def generate_and_save_birefringence_transfer_function(settings, dataset): + """Generates and saves the birefringence transfer function to the dataset, based on the settings. + + Parameters + ---------- + settings: ReconstructionSettings + dataset: NGFF Node + The dataset that will be updated. + """ + echo_headline("Generating birefringence transfer function with settings:") + echo_settings(settings.birefringence.transfer_function) + + # Calculate transfer functions + intensity_to_stokes_matrix = ( + inplane_oriented_thick_pol3d.calculate_transfer_function( + scheme=str(len(settings.input_channel_names)) + "-State", + **settings.birefringence.transfer_function.dict(), + ) + ) + # Save + dataset["intensity_to_stokes_matrix"] = ( + intensity_to_stokes_matrix.cpu().numpy()[None, None, None, ...] + ) + + +def generate_and_save_phase_transfer_function( + settings: ReconstructionSettings, dataset: Position, zyx_shape: tuple +): + """Generates and saves the phase transfer function to the dataset, based on the settings. + + Parameters + ---------- + settings: ReconstructionSettings + dataset: Position + The dataset that will be updated. + zyx_shape : tuple + A tuple of integers specifying the input data's shape in (Z, Y, X) order + """ + echo_headline("Generating phase transfer function with settings:") + echo_settings(settings.phase.transfer_function) + + if settings.reconstruction_dimension == 2: + # Convert zyx_shape and z_pixel_size into yx_shape and z_position_list + settings_dict = settings.phase.transfer_function.dict() + settings_dict["yx_shape"] = [zyx_shape[1], zyx_shape[2]] + settings_dict["z_position_list"] = list( + -(np.arange(zyx_shape[0]) - zyx_shape[0] // 2) + * settings_dict["z_pixel_size"] + ) + + # Remove unused parameters + settings_dict.pop("z_pixel_size") + settings_dict.pop("z_padding") + + # Calculate transfer functions + ( + absorption_transfer_function, + phase_transfer_function, + ) = isotropic_thin_3d.calculate_transfer_function( + **settings_dict, + ) + + # Save + dataset.create_image( + "absorption_transfer_function", + absorption_transfer_function.cpu().numpy()[None, None, ...], + chunks=(1, 1, 1, zyx_shape[1], zyx_shape[2]), + ) + dataset.create_image( + "phase_transfer_function", + phase_transfer_function.cpu().numpy()[None, None, ...], + chunks=(1, 1, 1, zyx_shape[1], zyx_shape[2]), + ) + + elif settings.reconstruction_dimension == 3: + # Calculate transfer functions + ( + real_potential_transfer_function, + imaginary_potential_transfer_function, + ) = phase_thick_3d.calculate_transfer_function( + zyx_shape=zyx_shape, + **settings.phase.transfer_function.dict(), + ) + # Save + dataset.create_image( + "real_potential_transfer_function", + real_potential_transfer_function.cpu().numpy()[None, None, ...], + chunks=(1, 1, 1, zyx_shape[1], zyx_shape[2]), + ) + dataset.create_image( + "imaginary_potential_transfer_function", + imaginary_potential_transfer_function.cpu().numpy()[ + None, None, ... + ], + chunks=(1, 1, 1, zyx_shape[1], zyx_shape[2]), + ) + + +def generate_and_save_fluorescence_transfer_function( + settings: ReconstructionSettings, dataset: Position, zyx_shape: tuple +): + """Generates and saves the fluorescence transfer function to the dataset, based on the settings. + + Parameters + ---------- + settings: ReconstructionSettings + dataset: Position + The dataset that will be updated. + zyx_shape : tuple + A tuple of integers specifying the input data's shape in (Z, Y, X) order + """ + echo_headline("Generating fluorescence transfer function with settings:") + echo_settings(settings.fluorescence.transfer_function) + + if settings.reconstruction_dimension == 2: + raise NotImplementedError + elif settings.reconstruction_dimension == 3: + # Calculate transfer functions + optical_transfer_function = ( + isotropic_fluorescent_thick_3d.calculate_transfer_function( + zyx_shape=zyx_shape, + **settings.fluorescence.transfer_function.dict(), + ) + ) + # Save + dataset.create_image( + "optical_transfer_function", + optical_transfer_function.cpu().numpy()[None, None, ...], + chunks=(1, 1, 1, zyx_shape[1], zyx_shape[2]), + ) + + +def compute_transfer_function_cli( + input_position_dirpath: Path, config_filepath: Path, output_dirpath: Path +) -> None: + """CLI command to compute the transfer function given a configuration file path + and a desired output path. + """ + + # Load config file + settings = utils.yaml_to_model(config_filepath, ReconstructionSettings) + + echo_headline( + f"Generating transfer functions and storing in {output_dirpath}\n" + ) + + # Read shape from input dataset + input_dataset = open_ome_zarr( + input_position_dirpath, layout="fov", mode="r" + ) + zyx_shape = input_dataset.data.shape[ + 2: + ] # only loads a single position "0" + + # Check input channel names + if not set(settings.input_channel_names).issubset( + input_dataset.channel_names + ): + raise ValueError( + f"Each of the input_channel_names = {settings.input_channel_names} in {config_filepath} must appear in the dataset {input_position_dirpaths[0]} which currently contains channel_names = {input_dataset.channel_names}." + ) + + # Prepare output dataset + output_dataset = open_ome_zarr( + output_dirpath, layout="fov", mode="w", channel_names=["None"] + ) + + # Pass settings to appropriate calculate_transfer_function and save + if settings.birefringence is not None: + generate_and_save_birefringence_transfer_function( + settings, output_dataset + ) + if settings.phase is not None: + generate_and_save_phase_transfer_function( + settings, output_dataset, zyx_shape + ) + if settings.fluorescence is not None: + generate_and_save_fluorescence_transfer_function( + settings, output_dataset, zyx_shape + ) + + # Write settings to metadata + output_dataset.zattrs["settings"] = settings.dict() + + echo_headline(f"Closing {output_dirpath}\n") + output_dataset.close() + + echo_headline( + f"Recreate this transfer function with:\n$ waveorder compute-tf {input_position_dirpaths} -c {config_filepath} -o {output_dirpath}" + ) + + +@click.command() +@input_position_dirpaths() +@config_filepath() +@output_dirpath() +def compute_tf( + input_position_dirpaths: list[Path], + config_filepath: Path, + output_dirpath: Path, +) -> None: + """ + Compute a transfer function using a dataset and configuration file. + + Calculates the transfer function based on the shape of the first position + in the list `input-position-dirpaths`. + + See /examples for example configuration files. + + >> waveorder compute-tf -i ./input.zarr/0/0/0 -c ./examples/birefringence.yml -o ./transfer_function.zarr + """ + compute_transfer_function_cli( + input_position_dirpaths[0], config_filepath, output_dirpath + ) diff --git a/waveorder/cli/gui_widget.py b/waveorder/cli/gui_widget.py new file mode 100644 index 00000000..1ec62040 --- /dev/null +++ b/waveorder/cli/gui_widget.py @@ -0,0 +1,57 @@ +import sys + +import click + +try: + from waveorder.plugin import tab_recon +except: + pass + +try: + from qtpy.QtWidgets import QApplication, QStyle, QVBoxLayout, QWidget +except: + pass + +try: + import qdarktheme +except: + pass + +PLUGIN_NAME = "waveorder: Computational Toolkit for Label-Free Imaging" +PLUGIN_ICON = "🔬" + + +@click.command() +def gui(): + """GUI for waveorder: Computational Toolkit for Label-Free Imaging""" + + app = QApplication(sys.argv) + app.setStyle( + "Fusion" + ) # Other options: "Fusion", "Windows", "macOS", "WindowsVista" + try: + qdarktheme.setup_theme("dark") + except: + pass + window = MainWindow() + window.setWindowTitle(PLUGIN_ICON + " " + PLUGIN_NAME + " " + PLUGIN_ICON) + + pixmapi = getattr(QStyle.StandardPixmap, "SP_TitleBarMenuButton") + icon = app.style().standardIcon(pixmapi) + window.setWindowIcon(icon) + + window.show() + sys.exit(app.exec()) + + +class MainWindow(QWidget): + def __init__(self): + super().__init__() + recon_tab = tab_recon.Ui_ReconTab_Form(stand_alone=True) + layout = QVBoxLayout() + self.setLayout(layout) + layout.addWidget(recon_tab.recon_tab_mainScrollArea) + + +if __name__ == "__main__": + gui() diff --git a/waveorder/cli/jobs_mgmt.py b/waveorder/cli/jobs_mgmt.py new file mode 100644 index 00000000..73368a02 --- /dev/null +++ b/waveorder/cli/jobs_mgmt.py @@ -0,0 +1,206 @@ +import json +import os +import socket +import threading +import time +from pathlib import Path + +import submitit + +DIR_PATH = os.path.dirname(os.path.realpath(__file__)) +FILE_PATH = os.path.join(DIR_PATH, "main.py") + +SERVER_PORT = 8089 # Choose an available port +JOBS_TIMEOUT = 5 # 5 mins +SERVER_uIDsjobIDs = {} # uIDsjobIDs[uid][jid] = job + + +class JobsManagement: + + def __init__(self, *args, **kwargs): + self.clientsocket = None + self.uIDsjobIDs = {} # uIDsjobIDs[uid][jid] = job + self.DATA_QUEUE = [] + + def check_for_jobID_File(self, jobID, logs_path, extension="out"): + + if Path(logs_path).exists(): + files = os.listdir(logs_path) + try: + for file in files: + if file.endswith(extension): + if jobID in file: + file_path = os.path.join(logs_path, file) + f = open(file_path, "r") + txt = f.read() + f.close() + return txt + except Exception as exc: + print(exc.args) + return "" + + def set_shorter_timeout(self): + self.clientsocket.settimeout(30) + + def start_client(self): + try: + self.clientsocket = socket.socket( + socket.AF_INET, socket.SOCK_STREAM + ) + self.clientsocket.settimeout(300) + self.clientsocket.connect(("localhost", SERVER_PORT)) + self.clientsocket.settimeout(None) + + thread = threading.Thread(target=self.stop_client) + thread.start() + except Exception as exc: + print(exc.args) + + # The stopClient() is called right with the startClient() but does not stop + # and essentially is a wait thread listening and is triggered by either a + # connection or timeout. Based on condition triggered by user, reconstruction + # completion or errors the end goal is to close the socket connection which + # would let the CLI exit. I could break it down to 2 parts but the idea was to + # keep the clientsocket.close() call within one method to make it easier to follow. + def stop_client(self): + try: + time.sleep(2) + while True: + time.sleep(1) + buf = "" + try: + buf = self.clientsocket.recv(1024) + except: + pass + if len(buf) > 0: + if b"\n" in buf: + dataList = buf.split(b"\n") + for data in dataList: + if len(data) > 0: + decoded_string = data.decode() + json_str = str(decoded_string) + json_obj = json.loads(json_str) + u_idx = json_obj["uID"] + job_idx = str(json_obj["jID"]) + cmd = json_obj["command"] + if cmd == "clientRelease": + if self.has_submitted_job(u_idx, job_idx): + self.clientsocket.close() + break + if cmd == "cancel": + if self.has_submitted_job(u_idx, job_idx): + try: + job = self.uIDsjobIDs[u_idx][ + job_idx + ] + job.cancel() + except Exception as exc: + pass # possibility of throwing an exception based on diff. OS + forDeletions = [] + for uID in self.uIDsjobIDs.keys(): + for jID in self.uIDsjobIDs[uID].keys(): + job = self.uIDsjobIDs[uID][jID] + if job.done(): + forDeletions.append((uID, jID)) + for idx in range(len(forDeletions)): + del self.uIDsjobIDs[forDeletions[idx][0]][ + forDeletions[idx][1] + ] + forDeletions = [] + for uID in self.uIDsjobIDs.keys(): + if len(self.uIDsjobIDs[uID].keys()) == 0: + forDeletions.append(uID) + for idx in range(len(forDeletions)): + del self.uIDsjobIDs[forDeletions[idx]] + if len(self.uIDsjobIDs.keys()) == 0: + self.clientsocket.close() + break + except Exception as exc: + self.clientsocket.close() + print(exc.args) + + def check_all_ExpJobs_completion(self, uID): + if uID in SERVER_uIDsjobIDs.keys(): + for jobEntry in SERVER_uIDsjobIDs[uID].keys(): + job: submitit.Job = SERVER_uIDsjobIDs[uID][jobEntry]["job"] + jobBool = SERVER_uIDsjobIDs[uID][jobEntry]["bool"] + if job is not None and job.done() == False: + return False + if jobBool == False: + return False + return True + + def put_Job_completion_in_list( + self, job_bool, uID: str, jID: str, mode="client" + ): + if uID in SERVER_uIDsjobIDs.keys(): + if jID in SERVER_uIDsjobIDs[uID].keys(): + SERVER_uIDsjobIDs[uID][jID]["bool"] = job_bool + + def add_data(self, data): + self.DATA_QUEUE.append(data) + + def send_data_thread(self): + thread = threading.Thread(target=self.send_data) + thread.start() + + def send_data(self): + data = "".join(self.DATA_QUEUE) + self.clientsocket.send(data.encode()) + self.DATA_QUEUE = [] + + def put_Job_in_list( + self, + job, + uID: str, + jID: str, + well: str, + log_folder_path: str = "", + mode="client", + ): + try: + well = str(well) + jID = str(jID) + if ".zarr" in well: + wells = well.split(".zarr") + well = wells[1].replace("\\", "-").replace("/", "-")[1:] + if mode == "client": + if uID not in self.uIDsjobIDs.keys(): + self.uIDsjobIDs[uID] = {} + self.uIDsjobIDs[uID][jID] = job + else: + if jID not in self.uIDsjobIDs[uID].keys(): + self.uIDsjobIDs[uID][jID] = job + json_obj = { + uID: {"jID": str(jID), "pos": well, "log": log_folder_path} + } + json_str = json.dumps(json_obj) + "\n" + self.add_data(json_str) + else: + # from server side jobs object entry is a None object + # this will be later checked as completion boolean for a ExpID which might + # have several Jobs associated with it + if uID not in SERVER_uIDsjobIDs.keys(): + SERVER_uIDsjobIDs[uID] = {} + SERVER_uIDsjobIDs[uID][jID] = {} + SERVER_uIDsjobIDs[uID][jID]["job"] = job + SERVER_uIDsjobIDs[uID][jID]["bool"] = False + else: + SERVER_uIDsjobIDs[uID][jID] = {} + SERVER_uIDsjobIDs[uID][jID]["job"] = job + SERVER_uIDsjobIDs[uID][jID]["bool"] = False + except Exception as exc: + print(exc.args) + + def has_submitted_job(self, uID: str, jID: str, mode="client") -> bool: + jID = str(jID) + if mode == "client": + if uID in self.uIDsjobIDs.keys(): + if jID in self.uIDsjobIDs[uID].keys(): + return True + return False + else: + if uID in SERVER_uIDsjobIDs.keys(): + if jID in SERVER_uIDsjobIDs[uID].keys(): + return True + return False diff --git a/waveorder/cli/main.py b/waveorder/cli/main.py new file mode 100644 index 00000000..81346d51 --- /dev/null +++ b/waveorder/cli/main.py @@ -0,0 +1,35 @@ +import click + +from waveorder.cli.apply_inverse_transfer_function import apply_inv_tf +from waveorder.cli.compute_transfer_function import compute_tf +from waveorder.cli.reconstruct import reconstruct + +try: + from waveorder.cli.gui_widget import gui +except: + pass + +CONTEXT = {"help_option_names": ["-h", "--help"]} + + +# `waveorder -h` will show subcommands in the order they are added +class NaturalOrderGroup(click.Group): + def list_commands(self, ctx): + return self.commands.keys() + + +@click.group(context_settings=CONTEXT, cls=NaturalOrderGroup) +def cli(): + """\033[92mwaveorder: Computational Toolkit for Label-Free Imaging\033[0m\n""" + + +cli.add_command(reconstruct) +cli.add_command(compute_tf) +cli.add_command(apply_inv_tf) +try: + cli.add_command(gui) +except: + pass + +if __name__ == "__main__": + cli() diff --git a/waveorder/cli/monitor.py b/waveorder/cli/monitor.py new file mode 100644 index 00000000..30a698e7 --- /dev/null +++ b/waveorder/cli/monitor.py @@ -0,0 +1,163 @@ +import shutil +import sys +import time +from pathlib import Path + +import numpy as np +import submitit + + +def _move_cursor_up(n_lines, do_print=True): + if do_print: + sys.stdout.write("\033[F" * n_lines) + + +def _print_status( + jobs, position_dirpaths, elapsed_list, print_indices=None, do_print=True +): + + columns = [15, 30, 40, 50] + + # header + if do_print: + sys.stdout.write( + "\033[K" # clear line + "\033[96mID" # cyan + f"\033[{columns[0]}G WELL " + f"\033[{columns[1]}G STATUS " + f"\033[{columns[2]}G NODE " + f"\033[{columns[2]}G ELAPSED\n" + ) + + if print_indices is None: + print_indices = range(len(jobs)) + + complete_count = 0 + + for i, (job, position_dirpath) in enumerate(zip(jobs, position_dirpaths)): + try: + node_name = job.get_info()["NodeList"] # slowest, so do this first + except: + node_name = "SUBMITTED" + + if job.state == "COMPLETED": + color = "\033[32m" # green + complete_count += 1 + elif job.state == "RUNNING": + color = "\033[93m" # yellow + elapsed_list[i] += 1 # inexact timing + else: + color = "\033[91m" # red + + if i in print_indices: + if do_print: + sys.stdout.write( + f"\033[K" # clear line + f"{color}{job.job_id}" + f"\033[{columns[0]}G {'/'.join(position_dirpath.parts[-3:])}" + f"\033[{columns[1]}G {job.state}" + f"\033[{columns[2]}G {node_name}" + f"\033[{columns[3]}G {elapsed_list[i]} s\n" + ) + sys.stdout.flush() + if do_print: + print( + f"\033[32m{complete_count}/{len(jobs)} jobs complete. " + " to move monitor to background. " + " twice to cancel jobs." + ) + + return elapsed_list + + +def _get_jobs_to_print(jobs, num_to_print): + job_indices_to_print = [] + + # if number of jobs is smaller than termanal size, print all + if len(jobs) <= num_to_print: + return list(range(len(jobs))) + + # prioritize incomplete jobs + for i, job in enumerate(jobs): + if not job.done(): + job_indices_to_print.append(i) + if len(job_indices_to_print) == num_to_print: + return job_indices_to_print + + # fill in the rest with complete jobs + for i, job in enumerate(jobs): + job_indices_to_print.append(i) + if len(job_indices_to_print) == num_to_print: + return job_indices_to_print + + # shouldn't reach here + return job_indices_to_print + + +def monitor_jobs( + jobs: list[submitit.Job], position_dirpaths: list[Path], do_print=True +): + """Displays the status of a list of submitit jobs with corresponding paths. + + Parameters + ---------- + jobs : list[submitit.Job] + List of submitit jobs + position_dirpaths : list[Path] + List of corresponding position paths + """ + NON_JOB_LINES = 3 + + if not len(jobs) == len(position_dirpaths): + raise ValueError( + "The number of jobs and position_dirpaths should be the same." + ) + + elapsed_list = [0] * len(jobs) # timer for each job + + # print all jobs once if terminal is too small + if shutil.get_terminal_size().lines - NON_JOB_LINES < len(jobs): + _print_status(jobs, position_dirpaths, elapsed_list, do_print) + + # main monitor loop + try: + while not all(job.done() for job in jobs): + terminal_lines = shutil.get_terminal_size().lines + num_jobs_to_print = np.min( + [terminal_lines - NON_JOB_LINES, len(jobs)] + ) + + job_indices_to_print = _get_jobs_to_print(jobs, num_jobs_to_print) + + elapsed_list = _print_status( + jobs, + position_dirpaths, + elapsed_list, + job_indices_to_print, + do_print, + ) + + time.sleep(1) + _move_cursor_up(num_jobs_to_print + 2, do_print) + + # Print final status + time.sleep(1) + _print_status(jobs, position_dirpaths, elapsed_list, do_print=do_print) + + # cancel jobs if ctrl+c + except KeyboardInterrupt: + for job in jobs: + job.cancel() + print("All jobs cancelled.\033[97m") + + # Print STDOUT and STDERR for first incomplete job + incomplete_count = 0 + for job in jobs: + if not job.done(): + if incomplete_count == 0: + print("\033[32mSTDOUT") + print(job.stdout()) + print("\033[91mSTDERR") + print(job.stderr()) + + print("\033[97m") # print white diff --git a/waveorder/cli/option_eat_all.py b/waveorder/cli/option_eat_all.py new file mode 100644 index 00000000..1c1f47db --- /dev/null +++ b/waveorder/cli/option_eat_all.py @@ -0,0 +1,47 @@ +import click + + +# Copied directly from https://stackoverflow.com/a/48394004 +# Enables `-i ./input.zarr/*/*/*` +class OptionEatAll(click.Option): + def __init__(self, *args, **kwargs): + self.save_other_options = kwargs.pop("save_other_options", True) + nargs = kwargs.pop("nargs", -1) + assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs) + super(OptionEatAll, self).__init__(*args, **kwargs) + self._previous_parser_process = None + self._eat_all_parser = None + + def add_to_parser(self, parser, ctx): + def parser_process(value, state): + # method to hook to the parser.process + done = False + value = [value] + if self.save_other_options: + # grab everything up to the next option + while state.rargs and not done: + for prefix in self._eat_all_parser.prefixes: + if state.rargs[0].startswith(prefix): + done = True + if not done: + value.append(state.rargs.pop(0)) + else: + # grab everything remaining + value += state.rargs + state.rargs[:] = [] + value = tuple(value) + + # call the actual process + self._previous_parser_process(value, state) + + retval = super(OptionEatAll, self).add_to_parser(parser, ctx) + for name in self.opts: + our_parser = parser._long_opt.get(name) or parser._short_opt.get( + name + ) + if our_parser: + self._eat_all_parser = our_parser + self._previous_parser_process = our_parser.process + our_parser.process = parser_process + break + return retval diff --git a/waveorder/cli/parsing.py b/waveorder/cli/parsing.py new file mode 100644 index 00000000..f283921b --- /dev/null +++ b/waveorder/cli/parsing.py @@ -0,0 +1,135 @@ +from pathlib import Path +from typing import Callable + +import click +import torch.multiprocessing as mp +from iohub.ngff import Plate, open_ome_zarr +from natsort import natsorted + +from waveorder.cli.option_eat_all import OptionEatAll + + +def _validate_and_process_paths( + ctx: click.Context, opt: click.Option, value: str +) -> list[Path]: + # Sort and validate the input paths, expanding plates into lists of positions + input_paths = [Path(path) for path in natsorted(value)] + for path in input_paths: + with open_ome_zarr(path, mode="r") as dataset: + if isinstance(dataset, Plate): + plate_path = input_paths.pop() + for position in dataset.positions(): + input_paths.append(plate_path / position[0]) + + return input_paths + + +def _str_to_path(ctx: click.Context, opt: click.Option, value: str) -> Path: + return Path(value) + + +def input_position_dirpaths() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--input-position-dirpaths", + "-i", + cls=OptionEatAll, + type=tuple, + required=True, + callback=_validate_and_process_paths, + help="List of paths to input positions, each with the same TCZYX shape. Supports wildcards e.g. 'input.zarr/*/*/*'.", + )(f) + + return decorator + + +def config_filepath() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--config-filepath", + "-c", + required=True, + type=click.Path(exists=True, file_okay=True, dir_okay=False), + callback=_str_to_path, + help="Path to YAML configuration file.", + )(f) + + return decorator + + +def transfer_function_dirpath() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--transfer-function-dirpath", + "-t", + required=True, + type=click.Path(exists=False), + callback=_str_to_path, + help="Path to transfer function .zarr.", + )(f) + + return decorator + + +def output_dirpath() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--output-dirpath", + "-o", + required=True, + type=click.Path(exists=False), + callback=_str_to_path, + help="Path to output directory.", + )(f) + + return decorator + + +# TODO: this setting will have to be collected from SLURM? +def processes_option(default: int = None) -> Callable: + def check_processes_option(ctx, param, value): + max_processes = mp.cpu_count() + if value > max_processes: + raise click.BadParameter( + f"Maximum number of processes is {max_processes}" + ) + return value + + def decorator(f: Callable) -> Callable: + return click.option( + "--num_processes", + "-j", + default=default or mp.cpu_count(), + type=int, + help="Number of processes to run in parallel.", + callback=check_processes_option, + )(f) + + return decorator + + +def ram_multiplier() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--ram-multiplier", + "-rx", + default=1.0, + type=float, + help="SLURM RAM multiplier.", + )(f) + + return decorator + + +def unique_id() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--unique-id", + "-uid", + default="", + required=False, + type=str, + help="Unique ID.", + )(f) + + return decorator diff --git a/waveorder/cli/printing.py b/waveorder/cli/printing.py new file mode 100644 index 00000000..e5d687e6 --- /dev/null +++ b/waveorder/cli/printing.py @@ -0,0 +1,12 @@ +import click +import yaml + + +def echo_settings(settings): + click.echo( + yaml.dump(settings.dict(), default_flow_style=False, sort_keys=False) + ) + + +def echo_headline(headline): + click.echo(click.style(headline, fg="green")) diff --git a/waveorder/cli/reconstruct.py b/waveorder/cli/reconstruct.py new file mode 100644 index 00000000..1876eebf --- /dev/null +++ b/waveorder/cli/reconstruct.py @@ -0,0 +1,72 @@ +from pathlib import Path + +import click + +from waveorder.cli.apply_inverse_transfer_function import ( + apply_inverse_transfer_function_cli, +) +from waveorder.cli.compute_transfer_function import ( + compute_transfer_function_cli, +) +from waveorder.cli.parsing import ( + config_filepath, + input_position_dirpaths, + output_dirpath, + processes_option, + ram_multiplier, + unique_id, +) + + +@click.command() +@input_position_dirpaths() +@config_filepath() +@output_dirpath() +@processes_option(default=1) +@ram_multiplier() +@unique_id() +def reconstruct( + input_position_dirpaths, + config_filepath, + output_dirpath, + num_processes, + ram_multiplier, + unique_id, +): + """ + Reconstruct a dataset using a configuration file. This is a + convenience function for a `compute-tf` call followed by a `apply-inv-tf` + call. + + Calculates the transfer function based on the shape of the first position + in the list `input-position-dirpaths`, then applies that transfer function + to all positions in the list `input-position-dirpaths`, so all positions + must have the same TCZYX shape. + + See /examples for example configuration files. + + >> waveorder reconstruct -i ./input.zarr/*/*/* -c ./examples/birefringence.yml -o ./output.zarr + """ + + # Handle transfer function path + transfer_function_path = output_dirpath.parent / Path( + "transfer_function_" + config_filepath.stem + ".zarr" + ) + + # Compute transfer function + compute_transfer_function_cli( + input_position_dirpaths[0], + config_filepath, + transfer_function_path, + ) + + # Apply inverse transfer function + apply_inverse_transfer_function_cli( + input_position_dirpaths, + transfer_function_path, + config_filepath, + output_dirpath, + num_processes, + ram_multiplier, + unique_id, + ) diff --git a/waveorder/cli/settings.py b/waveorder/cli/settings.py new file mode 100644 index 00000000..dca82607 --- /dev/null +++ b/waveorder/cli/settings.py @@ -0,0 +1,182 @@ +import os +from pathlib import Path +from typing import List, Literal, Optional, Union + +from pydantic.v1 import ( + BaseModel, + Extra, + NonNegativeFloat, + NonNegativeInt, + PositiveFloat, + root_validator, + validator, +) + +# This file defines the configuration settings for the CLI. + +# Example settings files in `/docs/examples/settings/` are autmatically generated +# by the tests in `/tests/cli_tests/test_settings.py` - `test_generate_example_settings`. + +# To keep the example settings up to date, run `pytest` locally when this file changes. + + +# All settings classes inherit from MyBaseModel, which forbids extra parameters to guard against typos +class MyBaseModel(BaseModel, extra=Extra.forbid): + pass + + +# Bottom level settings +class WavelengthIllumination(MyBaseModel): + wavelength_illumination: PositiveFloat = 0.532 + + +class BirefringenceTransferFunctionSettings(MyBaseModel): + swing: float = 0.1 + + @validator("swing") + def swing_range(cls, v): + if v <= 0 or v >= 1.0: + raise ValueError(f"swing = {v} should be between 0 and 1.") + return v + + +class BirefringenceApplyInverseSettings(WavelengthIllumination): + background_path: Union[str, Path] = "" + remove_estimated_background: bool = False + flip_orientation: bool = False + rotate_orientation: bool = False + + @validator("background_path") + def check_background_path(cls, v): + if v == "": + return v + + raw_dir = r"{}".format(v) + if not os.path.isdir(raw_dir): + raise ValueError(f"{v} is not a existing directory") + return raw_dir + + +class FourierTransferFunctionSettings(MyBaseModel): + yx_pixel_size: PositiveFloat = 6.5 / 20 + z_pixel_size: PositiveFloat = 2.0 + z_padding: NonNegativeInt = 0 + index_of_refraction_media: PositiveFloat = 1.3 + numerical_aperture_detection: PositiveFloat = 1.2 + + @validator("numerical_aperture_detection") + def na_det(cls, v, values): + n = values["index_of_refraction_media"] + if v > n: + raise ValueError( + f"numerical_aperture_detection = {v} must be less than or equal to index_of_refraction_media = {n}" + ) + return v + + @validator("z_pixel_size") + def warn_unit_consistency(cls, v, values): + yx_pixel_size = values["yx_pixel_size"] + ratio = yx_pixel_size / v + if ratio < 1.0 / 20 or ratio > 20: + raise Warning( + f"yx_pixel_size ({yx_pixel_size}) / z_pixel_size ({v}) = {ratio}. Did you use consistent units?" + ) + return v + + +class FourierApplyInverseSettings(MyBaseModel): + reconstruction_algorithm: Literal["Tikhonov", "TV"] = "Tikhonov" + regularization_strength: NonNegativeFloat = 1e-3 + TV_rho_strength: PositiveFloat = 1e-3 + TV_iterations: NonNegativeInt = 1 + + +class PhaseTransferFunctionSettings( + FourierTransferFunctionSettings, + WavelengthIllumination, +): + numerical_aperture_illumination: NonNegativeFloat = 0.5 + invert_phase_contrast: bool = False + + @validator("numerical_aperture_illumination") + def na_ill(cls, v, values): + n = values.get("index_of_refraction_media") + if v > n: + raise ValueError( + f"numerical_aperture_illumination = {v} must be less than or equal to index_of_refraction_media = {n}" + ) + return v + + +class FluorescenceTransferFunctionSettings(FourierTransferFunctionSettings): + wavelength_emission: PositiveFloat = 0.507 + + @validator("wavelength_emission") + def warn_unit_consistency(cls, v, values): + yx_pixel_size = values.get("yx_pixel_size") + ratio = yx_pixel_size / v + if ratio < 1.0 / 20 or ratio > 20: + raise Warning( + f"yx_pixel_size ({yx_pixel_size}) / wavelength_illumination ({v}) = {ratio}. Did you use consistent units?" + ) + return v + + +# Second level settings +class BirefringenceSettings(MyBaseModel): + transfer_function: BirefringenceTransferFunctionSettings = ( + BirefringenceTransferFunctionSettings() + ) + apply_inverse: BirefringenceApplyInverseSettings = ( + BirefringenceApplyInverseSettings() + ) + + +class PhaseSettings(MyBaseModel): + transfer_function: PhaseTransferFunctionSettings = ( + PhaseTransferFunctionSettings() + ) + apply_inverse: FourierApplyInverseSettings = FourierApplyInverseSettings() + + +class FluorescenceSettings(MyBaseModel): + transfer_function: FluorescenceTransferFunctionSettings = ( + FluorescenceTransferFunctionSettings() + ) + apply_inverse: FourierApplyInverseSettings = FourierApplyInverseSettings() + + +# Top level settings +class ReconstructionSettings(MyBaseModel): + input_channel_names: List[str] = [f"State{i}" for i in range(4)] + time_indices: Union[ + NonNegativeInt, List[NonNegativeInt], Literal["all"] + ] = "all" + reconstruction_dimension: Literal[2, 3] = 3 + birefringence: Optional[BirefringenceSettings] + phase: Optional[PhaseSettings] + fluorescence: Optional[FluorescenceSettings] + + @root_validator(pre=False) + def validate_reconstruction_types(cls, values): + if (values.get("birefringence") or values.get("phase")) and values.get( + "fluorescence" + ) is not None: + raise ValueError( + '"fluorescence" cannot be present alongside "birefringence" or "phase". Please use one configuration file for a "fluorescence" reconstruction and another configuration file for a "birefringence" and/or "phase" reconstructions.' + ) + num_channel_names = len(values.get("input_channel_names")) + if values.get("birefringence") is None: + if ( + values.get("phase") is None + and values.get("fluorescence") is None + ): + raise ValueError( + "Provide settings for either birefringence, phase, birefringence + phase, or fluorescence." + ) + if num_channel_names != 1: + raise ValueError( + f"{num_channel_names} channels names provided. Please provide a single channel for fluorescence/phase reconstructions." + ) + + return values diff --git a/waveorder/cli/utils.py b/waveorder/cli/utils.py new file mode 100644 index 00000000..850ed441 --- /dev/null +++ b/waveorder/cli/utils.py @@ -0,0 +1,105 @@ +from pathlib import Path +from typing import Tuple + +import click +import numpy as np +import torch +from iohub.ngff import Position, open_ome_zarr +from iohub.ngff_meta import TransformationMeta +from numpy.typing import DTypeLike + + +def create_empty_hcs_zarr( + store_path: Path, + position_keys: list[Tuple[str]], + shape: Tuple[int], + chunks: Tuple[int], + scale: Tuple[float], + channel_names: list[str], + dtype: DTypeLike, + plate_metadata: dict = {}, +) -> None: + """If the plate does not exist, create an empty zarr plate. + + If the plate exists, append positions and channels if they are not + already in the plate. + + Parameters + ---------- + store_path : Path + hcs plate path + position_keys : list[Tuple[str]] + Position keys, will append if not present in the plate. + e.g. [("A", "1", "0"), ("A", "1", "1")] + shape : Tuple[int] + chunks : Tuple[int] + scale : Tuple[float] + channel_names : list[str] + Channel names, will append if not present in metadata. + dtype : DTypeLike + plate_metadata : dict + """ + + # Create plate + output_plate = open_ome_zarr( + str(store_path), layout="hcs", mode="a", channel_names=channel_names + ) + + # Pass metadata + output_plate.zattrs.update(plate_metadata) + + # Create positions + for position_key in position_keys: + position_key_string = "/".join(position_key) + # Check if position is already in the store, if not create it + if position_key_string not in output_plate.zgroup: + position = output_plate.create_position(*position_key) + + _ = position.create_zeros( + name="0", + shape=shape, + chunks=chunks, + dtype=dtype, + transform=[TransformationMeta(type="scale", scale=scale)], + ) + else: + position = output_plate[position_key_string] + + # Check if channel_names are already in the store, if not append them + for channel_name in channel_names: + # Read channel names directly from metadata to avoid race conditions + metadata_channel_names = [ + channel.label for channel in position.metadata.omero.channels + ] + if channel_name not in metadata_channel_names: + position.append_channel(channel_name, resize_arrays=True) + + +def apply_inverse_to_zyx_and_save( + func, + position: Position, + output_path: Path, + input_channel_indices: list[int], + output_channel_indices: list[int], + t_idx: int = 0, + **kwargs, +) -> None: + """Load a zyx array from a Position object, apply a transformation and save the result to file""" + click.echo(f"Reconstructing t={t_idx}") + + # Load data + czyx_uint16_numpy = position.data.oindex[t_idx, input_channel_indices] + + # convert to np.int32 (torch doesn't accept np.uint16), then convert to tensor float32 + czyx_data = torch.tensor(np.int32(czyx_uint16_numpy), dtype=torch.float32) + + # Apply transformation + reconstruction_czyx = func(czyx_data, **kwargs) + + # Write to file + # for c, recon_zyx in enumerate(reconstruction_zyx): + with open_ome_zarr(output_path, mode="r+") as output_dataset: + output_dataset[0].oindex[ + t_idx, output_channel_indices + ] = reconstruction_czyx + click.echo(f"Finished Writing.. t={t_idx}") diff --git a/waveorder/io/__init__.py b/waveorder/io/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/waveorder/io/_reader.py b/waveorder/io/_reader.py new file mode 100644 index 00000000..c7fd8773 --- /dev/null +++ b/waveorder/io/_reader.py @@ -0,0 +1,61 @@ +from typing import Dict, List, Tuple, Union + +import zarr +from iohub import read_micromanager +from napari_ome_zarr._reader import napari_get_reader as fallback_reader + + +def napari_get_reader(path): + if isinstance(path, str): + if ".zarr" in path: + with zarr.open(path) as root: + if "plate" in root.attrs: + return hcs_zarr_reader + else: + return fallback_reader(path) + else: + return ome_tif_reader + else: + return None + + +def hcs_zarr_reader( + path: Union[str, List[str]], +) -> List[Tuple[zarr.Array, Dict]]: + reader = read_micromanager(path) + results = list() + + zs = zarr.open(path, "r") + names = [] + + dict_ = zs.attrs.asdict() + wells = dict_["plate"]["wells"] + for well in wells: + path = well["path"] + well_dict = zs[path].attrs.asdict() + for name in well_dict["well"]["images"]: + names.append(name["path"]) + for pos in range(reader.get_num_positions()): + meta = dict() + name = names[pos] + meta["name"] = name + results.append((reader.get_zarr(pos), meta)) + return results + + +def ome_tif_reader( + path: Union[str, List[str]], +) -> List[Tuple[zarr.Array, Dict]]: + reader = read_micromanager(path) + results = list() + + npos = reader.get_num_positions() + for pos in range(npos): + meta = dict() + if npos == 1: + meta["name"] = "Pos000_000" + else: + meta["name"] = reader.stage_positions[pos]["Label"][2:] + results.append((reader.get_zarr(pos), meta)) + + return results diff --git a/waveorder/io/core_functions.py b/waveorder/io/core_functions.py new file mode 100644 index 00000000..90e51bf9 --- /dev/null +++ b/waveorder/io/core_functions.py @@ -0,0 +1,272 @@ +import time +from contextlib import contextmanager + +import numpy as np + + +@contextmanager +def suspend_live_sm(snap_manager): + """Context manager that suspends/unsuspends MM live mode for `SnapLiveManager`. + + Parameters + ---------- + snap_manager : object + `org.micromanager.internal.SnapLiveManager` object via pycromanager + + Yields + ------ + object + `org.micromanager.internal.SnapLiveManager` object via pycromanager + + Usage + ----- + ```py + with suspend_live_sm(snap_manager) as sm: + pass # do something with MM that can't be done in live mode + ``` + """ + snap_manager.setSuspended(True) + try: + yield snap_manager + finally: + snap_manager.setSuspended(False) + + +def snap_and_get_image(snap_manager): + """ + Snap and get image using Snap Live Window Manager + transfer of ZMQ + + Parameters + ---------- + snap_manager: (object) MM Snap Live Window object + + Returns + ------- + image: (array) 2D array of size (Y, X) + + """ + snap_manager.snap(True) + time.sleep( + 0.3 + ) # sleep after snap to make sure the image we grab is the correct one + + # get pixels + dimensions + height = snap_manager.getDisplay().getDisplayedImages().get(0).getHeight() + width = snap_manager.getDisplay().getDisplayedImages().get(0).getWidth() + array = ( + snap_manager.getDisplay().getDisplayedImages().get(0).getRawPixels() + ) + + return np.reshape(array, (height, width)) + + +def snap_and_average(snap_manager, display=True): + """ + Snap an image with Snap Live manager + grab only the mean (computed in java) + + Parameters + ---------- + snap_manager: (object) MM Snap Live Window object + display: (bool) Whether to show the snap on the Snap Live Window in MM + + Returns + ------- + mean: (float) mean of snapped image + + """ + + snap_manager.snap(display) + time.sleep( + 0.3 + ) # sleep after snap to make sure the image we grab is the correct one + + return snap_manager.getDisplay().getImagePlus().getStatistics().umean + + +def set_lc_waves(mmc, device_property: tuple, value: float): + """ + Set retardance in waves for LC in device_property + + Parameters + ---------- + mmc : object + MM Core object + device_property : tuple + (device_name, property_name) set + value : float + Retardance to set as fraction of a wavelength + + Returns + ------- + + """ + device_name = device_property[0] + prop_name = device_property[1] + + if value > 1.6 or value < 0.001: + raise ValueError( + f"Requested retardance value is {value} waves. " + f"Retardance must be greater than 0.001 and less than 1.6 waves." + ) + + mmc.setProperty(device_name, prop_name, str(value)) + time.sleep(20 / 1000) + + +def set_lc_voltage(mmc, device_property: tuple, value: float): + """ + Set LC retardance by specifying LC voltage + + Parameters + ---------- + mmc : object + MM Core object + device_property : tuple + (device_name, property_name) set + value : float + LC voltage in volts. Applied voltage is limited to 20V + + Returns + ------- + + """ + device_name = device_property[0] + prop_name = device_property[1] + + if value > 20.0 or value < 0.0: + raise ValueError( + f"Requested LC voltage is {value} V. " + f"LC voltage must be greater than 0.0 and less than 20.0 V." + ) + + mmc.setProperty(device_name, prop_name, str(value)) + time.sleep(20 / 1000) + + +def set_lc_daq(mmc, device_property: tuple, value: float): + """ + Set LC retardance based on DAQ output + + Parameters + ---------- + mmc : object + MM Core object + device_property : tuple + (device_name, property_name) set + value : float + DAQ output voltage in volts. DAQ output must be in 0-5V range + + Returns + ------- + + """ + device_name = device_property[0] + prop_name = device_property[1] + + if value > 5.0 or value < 0.0: + raise ValueError( + "DAC voltage must be greater than 0.0 and less than 5.0" + ) + + mmc.setProperty(device_name, prop_name, str(value)) + time.sleep(20 / 1000) + + +def get_lc(mmc, device_property: tuple): + """ + Get LC state in the native units of the device property + + Parameters + ---------- + mmc : object + MM Core object + device_property : tuple + (device_name, property_name) set + + Returns + ------- + + """ + + device_name = device_property[0] + prop_name = device_property[1] + + val = float(mmc.getProperty(device_name, prop_name)) + return val + + +def define_meadowlark_state(mmc, device_property: tuple): + """ + Defines pallet element in the Meadowlark device adapter for the given state. + Make sure LC values for this state are set before calling this function + + Parameters + ---------- + mmc : object + MM Core object + device_property : tuple + (device_name, property_name) set, e.g. + ('MeadowlarkLC', 'Pal. elem. 00; enter 0 to define; 1 to activate') + + Returns + ------- + + """ + + device_name = device_property[0] + prop_name = device_property[1] + + # define LC state + # setting pallet elements to 0 defines LC state + mmc.setProperty(device_name, prop_name, 0) + mmc.waitForDevice(device_name) + + +def define_config_state( + mmc, group: str, config: str, device_properties: list, values: list +): + """ + Define config state by specifying the values for all device properties in this config + + Parameters + ---------- + mmc : object + MM Core object + group : str + Name of config group + config : str + Name of config, e.g. State0 + device_properties: list + List of (device_name, property_name) tuples in config + values: list + List of matching device property values + + Returns + ------- + + """ + + for device_property, value in zip(device_properties, values): + device_name = device_property[0] + prop_name = device_property[1] + mmc.defineConfig(group, config, device_name, prop_name, str(value)) + mmc.waitForConfig(group, config) + + +def set_lc_state(mmc, group: str, config: str): + """ + Change to the specific LC State + + Parameters + ---------- + mmc : object + MM Core object + group : str + Name of config group + config : str + Name of config, e.g. State0 + + """ + + mmc.setConfig(group, config) + time.sleep(20 / 1000) # delay for LC settle time diff --git a/waveorder/io/metadata_reader.py b/waveorder/io/metadata_reader.py new file mode 100644 index 00000000..aa0d06db --- /dev/null +++ b/waveorder/io/metadata_reader.py @@ -0,0 +1,195 @@ +import json +import os + +from natsort import natsorted + + +def load_json(path): + with open(path, "r") as f: + data = json.load(f) + + return data + + +def get_last_metadata_file(path): + last_metadata_file = natsorted( + [ + file + for file in os.listdir(path) + if file.startswith("calibration_metadata") + ] + )[-1] + return os.path.join(path, last_metadata_file) + + +class MetadataReader: + """ + Calibration metadata reader class. Helps load metadata from different metadata formats and naming conventions + """ + + def __init__(self, path: str): + """ + + Parameters + ---------- + path: full path to calibration metadata + """ + self.metadata_path = path + self.json_metadata = load_json(self.metadata_path) + + self.Timestamp = self.get_summary_calibration_attr("Timestamp") + self.waveorder_version = self.get_summary_calibration_attr( + "waveorder version" + ) + self.Calibration_scheme = self.get_calibration_scheme() + self.Swing = self.get_swing() + self.Wavelength = self.get_summary_calibration_attr("Wavelength (nm)") + self.Black_level = self.get_black_level() + self.Extinction_ratio = self.get_extinction_ratio() + self.Channel_names = self.get_channel_names() + self.LCA_retardance = self.get_lc_retardance("LCA") + self.LCB_retardance = self.get_lc_retardance("LCB") + self.LCA_voltage = self.get_lc_voltage("LCA") + self.LCB_voltage = self.get_lc_voltage("LCB") + self.Swing_measured = self.get_swing_measured() + self.Notes = self.get_notes() + + def get_summary_calibration_attr(self, attr): + try: + val = self.json_metadata["Summary"][attr] + except KeyError: + try: + val = self.json_metadata["Calibration"][attr] + except KeyError: + val = None + return val + + def get_cal_states(self): + if self.Calibration_scheme == "4-State": + states = ["ext", "0", "60", "120"] + elif self.Calibration_scheme == "5-State": + states = ["ext", "0", "45", "90", "135"] + return states + + def get_lc_retardance(self, lc): + """ + + Parameters + ---------- + lc: 'LCA' or 'LCB' + + Returns + ------- + + """ + states = self.get_cal_states() + + val = None + try: + val = [ + self.json_metadata["Calibration"]["LC retardance"][ + f"{lc}_{state}" + ] + for state in states + ] + except KeyError: + states[0] = "Ext" + if lc == "LCA": + val = [ + self.json_metadata["Summary"][ + f"[LCA_{state}, LCB_{state}]" + ][0] + for state in states + ] + elif lc == "LCB": + val = [ + self.json_metadata["Summary"][ + f"[LCA_{state}, LCB_{state}]" + ][1] + for state in states + ] + + return val + + def get_lc_voltage(self, lc): + """ + + Parameters + ---------- + lc: 'LCA' or 'LCB' + + Returns + ------- + + """ + states = self.get_cal_states() + + val = None + if "Calibration" in self.json_metadata: + lc_voltage = self.json_metadata["Calibration"]["LC voltage"] + if lc_voltage: + val = [ + self.json_metadata["Calibration"]["LC voltage"][ + f"{lc}_{state}" + ] + for state in states + ] + + return val + + def get_swing(self): + try: + val = self.json_metadata["Calibration"]["Swing (waves)"] + except KeyError: + val = self.json_metadata["Summary"]["Swing (fraction)"] + return val + + def get_swing_measured(self): + states = self.get_cal_states() + try: + val = [ + self.json_metadata["Calibration"][f"Swing_{state}"] + for state in states[1:] + ] + except KeyError: + val = [ + self.json_metadata["Summary"][f"Swing{state}"] + for state in states[1:] + ] + + return val + + def get_calibration_scheme(self): + try: + val = self.json_metadata["Calibration"]["Calibration scheme"] + except KeyError: + val = self.json_metadata["Summary"]["Acquired Using"] + return val + + def get_black_level(self): + try: + val = self.json_metadata["Calibration"]["Black level"] + except KeyError: + val = self.json_metadata["Summary"]["BlackLevel"] + return val + + def get_extinction_ratio(self): + try: + val = self.json_metadata["Calibration"]["Extinction ratio"] + except KeyError: + val = self.json_metadata["Summary"]["Extinction Ratio"] + return val + + def get_channel_names(self): + try: + val = self.json_metadata["Calibration"]["Channel names"] + except KeyError: + val = self.json_metadata["Summary"]["ChNames"] + return val + + def get_notes(self): + try: + val = self.json_metadata["Notes"] + except KeyError: + val = None + return val diff --git a/waveorder/io/utils.py b/waveorder/io/utils.py new file mode 100644 index 00000000..1ebed3fa --- /dev/null +++ b/waveorder/io/utils.py @@ -0,0 +1,173 @@ +import os +import textwrap +from pathlib import Path + +import psutil +import torch +import yaml +from iohub import open_ome_zarr + + +def add_index_to_path(path: Path): + """Takes a path to a file or folder and appends the smallest index that does + not already exist in that folder. + + For example: + './output.txt' -> './output_0.txt' if no other files named './output*.txt' exist. + './output.txt' -> './output_2.txt' if './output_0.txt' and './output_1.txt' already exist. + + Parameters + ---------- + path: Path + Base path to add index to + + Returns + ------- + Path + """ + index = 0 + new_stem = f"{path.stem}_{index}" + + while (path.parent / (new_stem + path.suffix)).exists(): + index += 1 + new_stem = f"{path.stem}_{index}" + + return path.parent / (new_stem + path.suffix) + + +def load_background(background_path): + with open_ome_zarr( + os.path.join(background_path, "background.zarr", "0", "0", "0") + ) as dataset: + cyx_data = dataset["0"][0, :, 0] + return torch.tensor(cyx_data, dtype=torch.float32) + + +class MockEmitter: + def emit(self, value): + pass + + +def ram_message(): + """ + Determine if the system's RAM capacity is sufficient for running reconstruction. + The message should be treated as a warning if the RAM detected is less than 32 GB. + + Returns + ------- + ram_report (is_warning, message) + """ + BYTES_PER_GB = 2**30 + gb_available = psutil.virtual_memory().total / BYTES_PER_GB + is_warning = gb_available < 32 + + if is_warning: + message = " \n".join( + textwrap.wrap( + f"waveorder reconstructions often require more than the {gb_available:.1f} " + f"GB of RAM that this computer is equipped with. We recommend starting with reconstructions of small " + f"volumes ~1000 x 1000 x 10 and working up to larger volumes while monitoring your RAM usage with " + f"Task Manager or htop.", + ) + ) + else: + message = f"{gb_available:.1f} GB of RAM is available." + + return (is_warning, message) + + +def model_to_yaml(model, yaml_path: Path) -> None: + """ + Save a model's dictionary representation to a YAML file. + + Parameters + ---------- + model : object + The model object to convert to YAML. + yaml_path : Path + The path to the output YAML file. + + Raises + ------ + TypeError + If the `model` object does not have a `dict()` method. + + Notes + ----- + This function converts a model object into a dictionary representation + using the `dict()` method. It removes any fields with None values before + writing the dictionary to a YAML file. + + Examples + -------- + >>> from my_model import MyModel + >>> model = MyModel() + >>> model_to_yaml(model, 'model.yaml') + + """ + yaml_path = Path(yaml_path) + + if not hasattr(model, "dict"): + raise TypeError("The 'model' object does not have a 'dict()' method.") + + model_dict = model.dict() + + # Remove None-valued fields + clean_model_dict = { + key: value for key, value in model_dict.items() if value is not None + } + + with open(yaml_path, "w+") as f: + yaml.dump( + clean_model_dict, f, default_flow_style=False, sort_keys=False + ) + + +def yaml_to_model(yaml_path: Path, model): + """ + Load model settings from a YAML file and create a model instance. + + Parameters + ---------- + yaml_path : Path + The path to the YAML file containing the model settings. + model : class + The model class used to create an instance with the loaded settings. + + Returns + ------- + object + An instance of the model class with the loaded settings. + + Raises + ------ + TypeError + If the provided model is not a class or does not have a callable constructor. + FileNotFoundError + If the YAML file specified by `yaml_path` does not exist. + + Notes + ----- + This function loads model settings from a YAML file using `yaml.safe_load()`. + It then creates an instance of the provided `model` class using the loaded settings. + + Examples + -------- + # >>> from my_model import MyModel + # >>> model = yaml_to_model('model.yaml', MyModel) + + """ + yaml_path = Path(yaml_path) + + if not callable(getattr(model, "__init__", None)): + raise TypeError( + "The provided model must be a class with a callable constructor." + ) + + try: + with open(yaml_path, "r") as file: + raw_settings = yaml.safe_load(file) + except FileNotFoundError: + raise FileNotFoundError(f"The YAML file '{yaml_path}' does not exist.") + + return model(**raw_settings) diff --git a/waveorder/io/visualization.py b/waveorder/io/visualization.py new file mode 100644 index 00000000..ee363909 --- /dev/null +++ b/waveorder/io/visualization.py @@ -0,0 +1,157 @@ +from typing import Literal, Union + +import numpy as np +from colorspacious import cspace_convert +from matplotlib.colors import hsv_to_rgb +from skimage.color import hsv2rgb +from skimage.exposure import rescale_intensity + + +def ret_ori_overlay( + czyx, + ret_min: float = 1, + ret_max: Union[float, Literal["auto"]] = 10, + cmap: Literal["JCh", "HSV"] = "HSV", +): + """ + Creates an overlay of retardance and orientation with two different colormap options. + "HSV" maps orientation to hue and retardance to value with maximum saturation. + "JCh" is a similar colormap but is perceptually uniform. + + Parameters + ---------- + czyx: (nd-array) czyx[0] is retardance in nanometers, czyx[1] is orientation in radians [0, pi], + czyx.shape = (2, ...) + + ret_min: (float) minimum displayed retardance. Typically a noise floor. + ret_max: (float) maximum displayed retardance. Typically used to adjust contrast limits. + + cmap: (str) 'JCh' or 'HSV' + + Returns + ------- + overlay (nd-array) RGB image with shape (3, ...) + + """ + if czyx.shape[0] != 2: + raise ValueError( + f"Input must have shape (2, ...) instead of ({czyx.shape[0]}, ...)" + ) + + retardance = czyx[0] + orientation = czyx[1] + + if ret_max == "auto": + ret_max = np.percentile(np.ravel(retardance), 99.99) + + # Prepare input and output arrays + ret_ = np.clip(retardance, 0, ret_max) # clip and copy + # Convert 180 degree range into 360 to match periodicity of hue. + ori_ = orientation * 360 / np.pi + overlay_final = np.zeros_like(retardance) + + if cmap == "JCh": + J = ret_ + C = np.ones_like(J) * 60 + C[ret_ < ret_min] = 0 + h = ori_ + + JCh = np.stack((J, C, h), axis=-1) + JCh_rgb = cspace_convert(JCh, "JCh", "sRGB1") + + JCh_rgb[JCh_rgb < 0] = 0 + JCh_rgb[JCh_rgb > 1] = 1 + + overlay_final = JCh_rgb + elif cmap == "HSV": + I_hsv = np.moveaxis( + np.stack( + [ + ori_ / 360, + np.ones_like(ori_), + ret_ / np.max(ret_), + ] + ), + source=0, + destination=-1, + ) + overlay_final = hsv_to_rgb(I_hsv) + else: + raise ValueError(f"Colormap {cmap} not understood") + + return np.moveaxis( + overlay_final, source=-1, destination=0 + ) # .shape = (3, ...) + + +def ret_ori_phase_overlay( + czyx, max_val_V: float = 1.0, max_val_S: float = 1.0 +): + """ + Creates an overlay of retardance, orientation, and phase. + Maps orientation to hue, retardance to saturation, and phase to value. + + HSV encoding of retardance + orientation + phase image with hsv colormap + (orientation in h, retardance in s, phase in v) + Parameters + ---------- + czyx : numpy.ndarray + czyx[0] corresponds to the retardance image + czyx[1]is the orientation image (range from 0 to pi) + czyx[2] is the the phase image + + max_val_V : float + raise the brightness of the phase channel by 1/max_val_V + + max_val_S : float + raise the brightness of the retardance channel by 1/max_val_S + + Returns + ------- + overlay (nd-array) RGB image with shape (3, ...) + + Returns: + RGB with HSV + """ + + if czyx.shape[0] != 3: + raise ValueError( + f"Input must have shape (3, ...) instead of ({czyx.shape[0]}, ...)" + ) + + czyx_out = np.zeros_like(czyx, dtype=np.float32) + + retardance = czyx[0] + orientation = czyx[1] + phase = czyx[2] + + # Normalize the stack + ordered_stack = np.stack( + ( + # Normalize the first channel by dividing by pi + orientation / np.pi, + # Normalize the second channel and rescale intensity + rescale_intensity( + retardance, + in_range=( + np.min(retardance), + np.max(retardance), + ), + out_range=(0, 1), + ) + / max_val_S, + # Normalize the third channel and rescale intensity + rescale_intensity( + phase, + in_range=( + np.min(phase), + np.max(phase), + ), + out_range=(0, 1), + ) + / max_val_V, + ), + axis=0, + ) + czyx_out = hsv2rgb(ordered_stack, channel_axis=0) + return czyx_out diff --git a/waveorder/napari.yaml b/waveorder/napari.yaml new file mode 100644 index 00000000..befd3b02 --- /dev/null +++ b/waveorder/napari.yaml @@ -0,0 +1,36 @@ +name: waveorder +display_name: waveorder +contributions: + commands: + - id: waveorder.MainWidget + title: Create Main Widget + python_name: waveorder.plugin.main_widget:MainWidget + - id: waveorder.get_reader + title: Read ome-zarr and ome-tif files + python_name: waveorder.io._reader:napari_get_reader + - id: waveorder.polarization_target_data + title: Polarization Target Data + python_name: waveorder.scripts.samples:read_polarization_target_data + - id: waveorder.polarization_target_reconstruction + title: Polarization Target Data + python_name: waveorder.scripts.samples:read_polarization_target_reconstruction + - id: waveorder.zebrafish_embryo_reconstruction + title: Zebrafish Embryo Reconstruction + python_name: waveorder.scripts.samples:read_zebrafish_embryo_reconstruction + readers: + - command: waveorder.get_reader + accepts_directories: true + filename_patterns: ['*.zarr', '*.tif'] + widgets: + - command: waveorder.MainWidget + display_name: Main Menu + sample_data: + - command: waveorder.polarization_target_data + key: polarization-target-data + display_name: Polarization Target Data (10 MB) + - command: waveorder.polarization_target_reconstruction + key: polarization-target-reconstruction + display_name: Polarization Target Reconstruction (10 MB) + - command: waveorder.zebrafish_embryo_reconstruction + key: zebrafish-embryo-reconstruction + display_name: Zebrafish Embryo Reconstruction (92 MB) \ No newline at end of file diff --git a/waveorder/plugin/__init__.py b/waveorder/plugin/__init__.py new file mode 100644 index 00000000..ceb8b81b --- /dev/null +++ b/waveorder/plugin/__init__.py @@ -0,0 +1,9 @@ +# qtpy defaults to PyQt5/PySide2 which can be present in upgraded environments +try: + import qtpy + + qtpy.API_NAME # check qtpy API name - one is required for GUI + +except RuntimeError as error: + if type(error).__name__ == "QtBindingsNotFoundError": + print("WARNING: QtBindings (PyQT or PySide) was not found for GUI") diff --git a/waveorder/plugin/gui.py b/waveorder/plugin/gui.py new file mode 100644 index 00000000..f8b5c280 --- /dev/null +++ b/waveorder/plugin/gui.py @@ -0,0 +1,1695 @@ +# -*- coding: utf-8 -*- + +# Form implementation generated from reading ui file 'gui.ui' +# +# Created by: PyQt5 UI code generator 5.15.4 +# +# WARNING: Any manual changes made to this file will be lost when pyuic5 is +# run again. Do not edit this file unless you know what you are doing. + + +from qtpy import QtCore, QtGui, QtWidgets + +from waveorder.plugin import tab_recon + + +class Ui_Form(object): + def setupUi(self, Form): + Form.setObjectName("Form") + Form.setWindowModality(QtCore.Qt.NonModal) + Form.resize(630, 1165) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth()) + Form.setSizePolicy(sizePolicy) + Form.setMinimumSize(QtCore.QSize(0, 0)) + Form.setLayoutDirection(QtCore.Qt.LeftToRight) + self.gridLayout_7 = QtWidgets.QGridLayout(Form) + self.gridLayout_7.setObjectName("gridLayout_7") + self.label_logo = QtWidgets.QLabel(Form) + self.label_logo.setText("") + self.label_logo.setAlignment(QtCore.Qt.AlignCenter) + self.label_logo.setObjectName("label_logo") + self.gridLayout_7.addWidget(self.label_logo, 0, 0, 1, 1) + self.recon_status = QtWidgets.QGroupBox(Form) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.recon_status.setFont(font) + self.recon_status.setObjectName("recon_status") + self.gridLayout_22 = QtWidgets.QGridLayout(self.recon_status) + self.gridLayout_22.setContentsMargins(-1, 25, -1, -1) + self.gridLayout_22.setObjectName("gridLayout_22") + self.le_mm_status = QtWidgets.QLineEdit(self.recon_status) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_mm_status.sizePolicy().hasHeightForWidth() + ) + self.le_mm_status.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_mm_status.setFont(font) + self.le_mm_status.setAlignment(QtCore.Qt.AlignCenter) + self.le_mm_status.setReadOnly(True) + self.le_mm_status.setObjectName("le_mm_status") + self.gridLayout_22.addWidget(self.le_mm_status, 0, 1, 1, 1) + self.qbutton_connect_to_mm = QtWidgets.QPushButton(self.recon_status) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.qbutton_connect_to_mm.sizePolicy().hasHeightForWidth() + ) + self.qbutton_connect_to_mm.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_connect_to_mm.setFont(font) + self.qbutton_connect_to_mm.setObjectName("qbutton_connect_to_mm") + self.gridLayout_22.addWidget(self.qbutton_connect_to_mm, 0, 0, 1, 1) + self.gridLayout_7.addWidget( + self.recon_status, 1, 0, 1, 1, QtCore.Qt.AlignTop + ) + self.tabWidget = QtWidgets.QTabWidget(Form) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.tabWidget.sizePolicy().hasHeightForWidth() + ) + self.tabWidget.setSizePolicy(sizePolicy) + self.tabWidget.setMinimumSize(QtCore.QSize(0, 0)) + self.tabWidget.setBaseSize(QtCore.QSize(0, 0)) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.tabWidget.setFont(font) + self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North) + self.tabWidget.setElideMode(QtCore.Qt.ElideMiddle) + self.tabWidget.setUsesScrollButtons(False) + self.tabWidget.setTabsClosable(False) + self.tabWidget.setMovable(False) + self.tabWidget.setObjectName("tabWidget") + self.Calibration = QtWidgets.QWidget() + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.Calibration.sizePolicy().hasHeightForWidth() + ) + self.Calibration.setSizePolicy(sizePolicy) + self.Calibration.setMinimumSize(QtCore.QSize(0, 0)) + self.Calibration.setObjectName("Calibration") + self.gridLayout_5 = QtWidgets.QGridLayout(self.Calibration) + self.gridLayout_5.setSizeConstraint( + QtWidgets.QLayout.SetDefaultConstraint + ) + self.gridLayout_5.setContentsMargins(4, 4, 4, 4) + self.gridLayout_5.setObjectName("gridLayout_5") + self.scrollArea = QtWidgets.QScrollArea(self.Calibration) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.scrollArea.setFont(font) + self.scrollArea.setWidgetResizable(True) + self.scrollArea.setObjectName("scrollArea") + self.scrollAreaWidgetContents = QtWidgets.QWidget() + self.scrollAreaWidgetContents.setGeometry( + QtCore.QRect(0, 0, 590, 1032) + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.scrollAreaWidgetContents.sizePolicy().hasHeightForWidth() + ) + self.scrollAreaWidgetContents.setSizePolicy(sizePolicy) + self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents") + self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents) + self.gridLayout.setContentsMargins(4, 4, 4, 4) + self.gridLayout.setVerticalSpacing(20) + self.gridLayout.setObjectName("gridLayout") + self.run_calib = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.run_calib.setFont(font) + self.run_calib.setObjectName("run_calib") + self.gridLayout_12 = QtWidgets.QGridLayout(self.run_calib) + self.gridLayout_12.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_12.setVerticalSpacing(6) + self.gridLayout_12.setObjectName("gridLayout_12") + self.labell_calib_assessment = QtWidgets.QLabel(self.run_calib) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.labell_calib_assessment.setFont(font) + self.labell_calib_assessment.setObjectName("labell_calib_assessment") + self.gridLayout_12.addWidget(self.labell_calib_assessment, 8, 0, 1, 1) + self.label_progress = QtWidgets.QLabel(self.run_calib) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_progress.setFont(font) + self.label_progress.setObjectName("label_progress") + self.gridLayout_12.addWidget(self.label_progress, 4, 0, 1, 1) + self.label_extinction = QtWidgets.QLabel(self.run_calib) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_extinction.sizePolicy().hasHeightForWidth() + ) + self.label_extinction.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_extinction.setFont(font) + self.label_extinction.setObjectName("label_extinction") + self.gridLayout_12.addWidget(self.label_extinction, 6, 0, 1, 1) + self.qbutton_calibrate = QtWidgets.QPushButton(self.run_calib) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.qbutton_calibrate.sizePolicy().hasHeightForWidth() + ) + self.qbutton_calibrate.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_calibrate.setFont(font) + self.qbutton_calibrate.setAutoDefault(False) + self.qbutton_calibrate.setObjectName("qbutton_calibrate") + self.gridLayout_12.addWidget(self.qbutton_calibrate, 0, 0, 1, 2) + self.progress_bar = QtWidgets.QProgressBar(self.run_calib) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.progress_bar.setFont(font) + self.progress_bar.setProperty("value", 0) + self.progress_bar.setObjectName("progress_bar") + self.gridLayout_12.addWidget(self.progress_bar, 5, 0, 1, 2) + self.qbutton_calc_extinction = QtWidgets.QPushButton(self.run_calib) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_calc_extinction.setFont(font) + self.qbutton_calc_extinction.setObjectName("qbutton_calc_extinction") + self.gridLayout_12.addWidget(self.qbutton_calc_extinction, 2, 0, 1, 2) + self.le_extinction = QtWidgets.QLineEdit(self.run_calib) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_extinction.sizePolicy().hasHeightForWidth() + ) + self.le_extinction.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setPointSize(13) + font.setBold(True) + font.setItalic(False) + self.le_extinction.setFont(font) + self.le_extinction.setReadOnly(True) + self.le_extinction.setObjectName("le_extinction") + self.gridLayout_12.addWidget(self.le_extinction, 6, 1, 1, 1) + self.qbutton_stop_calib = QtWidgets.QPushButton(self.run_calib) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_stop_calib.setFont(font) + self.qbutton_stop_calib.setObjectName("qbutton_stop_calib") + self.gridLayout_12.addWidget(self.qbutton_stop_calib, 3, 0, 1, 2) + self.qbutton_load_calib = QtWidgets.QPushButton(self.run_calib) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_load_calib.setFont(font) + self.qbutton_load_calib.setObjectName("qbutton_load_calib") + self.gridLayout_12.addWidget(self.qbutton_load_calib, 1, 0, 1, 2) + self.tb_calib_assessment = QtWidgets.QTextBrowser(self.run_calib) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.tb_calib_assessment.setFont(font) + self.tb_calib_assessment.setObjectName("tb_calib_assessment") + self.gridLayout_12.addWidget(self.tb_calib_assessment, 8, 1, 1, 1) + self.gridLayout.addWidget(self.run_calib, 1, 0, 1, 1) + self.calib_params = QtWidgets.QGroupBox(self.scrollAreaWidgetContents) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.calib_params.setFont(font) + self.calib_params.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu) + self.calib_params.setAlignment( + QtCore.Qt.AlignLeading + | QtCore.Qt.AlignLeft + | QtCore.Qt.AlignVCenter + ) + self.calib_params.setCheckable(False) + self.calib_params.setObjectName("calib_params") + self.gridLayout_2 = QtWidgets.QGridLayout(self.calib_params) + self.gridLayout_2.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_2.setVerticalSpacing(6) + self.gridLayout_2.setObjectName("gridLayout_2") + self.label_wavelength = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_wavelength.setFont(font) + self.label_wavelength.setObjectName("label_wavelength") + self.gridLayout_2.addWidget(self.label_wavelength, 3, 0, 1, 1) + self.label_dir = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_dir.setFont(font) + self.label_dir.setObjectName("label_dir") + self.gridLayout_2.addWidget(self.label_dir, 0, 0, 1, 1) + self.cb_lca = QtWidgets.QComboBox(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_lca.setFont(font) + self.cb_lca.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_lca.setObjectName("cb_lca") + self.cb_lca.addItem("") + self.gridLayout_2.addWidget(self.cb_lca, 5, 2, 1, 1) + self.cb_calib_scheme = QtWidgets.QComboBox(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_calib_scheme.setFont(font) + self.cb_calib_scheme.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_calib_scheme.setObjectName("cb_calib_scheme") + self.cb_calib_scheme.addItem("") + self.cb_calib_scheme.addItem("") + self.gridLayout_2.addWidget(self.cb_calib_scheme, 4, 1, 1, 1) + self.cb_lcb = QtWidgets.QComboBox(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_lcb.setFont(font) + self.cb_lcb.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_lcb.setObjectName("cb_lcb") + self.cb_lcb.addItem("") + self.gridLayout_2.addWidget(self.cb_lcb, 5, 3, 1, 1) + self.label_lca = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_lca.setFont(font) + self.label_lca.setAlignment(QtCore.Qt.AlignCenter) + self.label_lca.setObjectName("label_lca") + self.gridLayout_2.addWidget(self.label_lca, 4, 2, 1, 1) + self.le_swing = QtWidgets.QLineEdit(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_swing.setFont(font) + self.le_swing.setObjectName("le_swing") + self.gridLayout_2.addWidget(self.le_swing, 2, 1, 1, 1) + self.le_directory = QtWidgets.QLineEdit(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_directory.setFont(font) + self.le_directory.setText("") + self.le_directory.setObjectName("le_directory") + self.gridLayout_2.addWidget(self.le_directory, 0, 1, 1, 1) + self.le_wavelength = QtWidgets.QLineEdit(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_wavelength.setFont(font) + self.le_wavelength.setObjectName("le_wavelength") + self.gridLayout_2.addWidget(self.le_wavelength, 3, 1, 1, 1) + self.label_lcb = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_lcb.setFont(font) + self.label_lcb.setAlignment(QtCore.Qt.AlignCenter) + self.label_lcb.setObjectName("label_lcb") + self.gridLayout_2.addWidget(self.label_lcb, 4, 3, 1, 1) + self.label_swing = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_swing.setFont(font) + self.label_swing.setObjectName("label_swing") + self.gridLayout_2.addWidget(self.label_swing, 2, 0, 1, 1) + self.label_calib_mode = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_calib_mode.setFont(font) + self.label_calib_mode.setObjectName("label_calib_mode") + self.gridLayout_2.addWidget(self.label_calib_mode, 5, 0, 1, 1) + self.label_cfg_group = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_cfg_group.setFont(font) + self.label_cfg_group.setObjectName("label_cfg_group") + self.gridLayout_2.addWidget(self.label_cfg_group, 6, 0, 1, 1) + self.label_scheme = QtWidgets.QLabel(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_scheme.setFont(font) + self.label_scheme.setObjectName("label_scheme") + self.gridLayout_2.addWidget(self.label_scheme, 4, 0, 1, 1) + self.cb_config_group = QtWidgets.QComboBox(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_config_group.setFont(font) + self.cb_config_group.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_config_group.setObjectName("cb_config_group") + self.gridLayout_2.addWidget(self.cb_config_group, 6, 1, 1, 1) + self.cb_calib_mode = QtWidgets.QComboBox(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + font.setKerning(True) + self.cb_calib_mode.setFont(font) + self.cb_calib_mode.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_calib_mode.setObjectName("cb_calib_mode") + self.gridLayout_2.addWidget(self.cb_calib_mode, 5, 1, 1, 1) + self.qbutton_browse = QtWidgets.QPushButton(self.calib_params) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_browse.setFont(font) + self.qbutton_browse.setAutoFillBackground(False) + self.qbutton_browse.setCheckable(False) + self.qbutton_browse.setDefault(False) + self.qbutton_browse.setObjectName("qbutton_browse") + self.gridLayout_2.addWidget(self.qbutton_browse, 0, 2, 1, 1) + self.gridLayout.addWidget(self.calib_params, 0, 0, 1, 1) + self.tabWidget_2 = QtWidgets.QTabWidget(self.scrollAreaWidgetContents) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.tabWidget_2.sizePolicy().hasHeightForWidth() + ) + self.tabWidget_2.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.tabWidget_2.setFont(font) + self.tabWidget_2.setFocusPolicy(QtCore.Qt.ClickFocus) + self.tabWidget_2.setObjectName("tabWidget_2") + self.tab_plot = QtWidgets.QWidget() + self.tab_plot.setObjectName("tab_plot") + self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.tab_plot) + self.verticalLayout_4.setObjectName("verticalLayout_4") + self.scrollArea_3 = QtWidgets.QScrollArea(self.tab_plot) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.scrollArea_3.setFont(font) + self.scrollArea_3.setWidgetResizable(True) + self.scrollArea_3.setObjectName("scrollArea_3") + self.scrollAreaWidgetContents_3 = QtWidgets.QWidget() + self.scrollAreaWidgetContents_3.setGeometry( + QtCore.QRect(0, 0, 550, 302) + ) + self.scrollAreaWidgetContents_3.setObjectName( + "scrollAreaWidgetContents_3" + ) + self.verticalLayout_6 = QtWidgets.QVBoxLayout( + self.scrollAreaWidgetContents_3 + ) + self.verticalLayout_6.setObjectName("verticalLayout_6") + self.label_inten_plot = QtWidgets.QLabel( + self.scrollAreaWidgetContents_3 + ) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_inten_plot.setFont(font) + self.label_inten_plot.setObjectName("label_inten_plot") + self.verticalLayout_6.addWidget(self.label_inten_plot) + self.plot_widget = PlotWidget(self.scrollAreaWidgetContents_3) + self.plot_widget.setMinimumSize(QtCore.QSize(0, 250)) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.plot_widget.setFont(font) + self.plot_widget.setObjectName("plot_widget") + self.verticalLayout_6.addWidget(self.plot_widget) + self.scrollArea_3.setWidget(self.scrollAreaWidgetContents_3) + self.verticalLayout_4.addWidget(self.scrollArea_3) + self.tabWidget_2.addTab(self.tab_plot, "") + self.tab_advanced = QtWidgets.QWidget() + self.tab_advanced.setObjectName("tab_advanced") + self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab_advanced) + self.verticalLayout_3.setObjectName("verticalLayout_3") + self.scrollArea_2 = QtWidgets.QScrollArea(self.tab_advanced) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.scrollArea_2.sizePolicy().hasHeightForWidth() + ) + self.scrollArea_2.setSizePolicy(sizePolicy) + self.scrollArea_2.setWidgetResizable(True) + self.scrollArea_2.setObjectName("scrollArea_2") + self.scrollAreaWidgetContents_2 = QtWidgets.QWidget() + self.scrollAreaWidgetContents_2.setGeometry( + QtCore.QRect(0, 0, 550, 302) + ) + self.scrollAreaWidgetContents_2.setObjectName( + "scrollAreaWidgetContents_2" + ) + self.verticalLayout_5 = QtWidgets.QVBoxLayout( + self.scrollAreaWidgetContents_2 + ) + self.verticalLayout_5.setObjectName("verticalLayout_5") + self.label_loglevel = QtWidgets.QLabel(self.scrollAreaWidgetContents_2) + self.label_loglevel.setObjectName("label_loglevel") + self.verticalLayout_5.addWidget(self.label_loglevel) + self.cb_loglevel = QtWidgets.QComboBox(self.scrollAreaWidgetContents_2) + self.cb_loglevel.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_loglevel.setMaxVisibleItems(2) + self.cb_loglevel.setObjectName("cb_loglevel") + self.cb_loglevel.addItem("") + self.cb_loglevel.addItem("") + self.verticalLayout_5.addWidget(self.cb_loglevel) + self.label_log = QtWidgets.QLabel(self.scrollAreaWidgetContents_2) + self.label_log.setObjectName("label_log") + self.verticalLayout_5.addWidget(self.label_log) + self.te_log = QtWidgets.QPlainTextEdit(self.scrollAreaWidgetContents_2) + self.te_log.setMinimumSize(QtCore.QSize(0, 150)) + self.te_log.setLineWidth(1) + self.te_log.setReadOnly(True) + self.te_log.setBackgroundVisible(True) + self.te_log.setObjectName("te_log") + self.verticalLayout_5.addWidget(self.te_log) + self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2) + self.verticalLayout_3.addWidget(self.scrollArea_2) + self.tabWidget_2.addTab(self.tab_advanced, "") + self.tab_notes = QtWidgets.QWidget() + self.tab_notes.setObjectName("tab_notes") + self.formLayout = QtWidgets.QFormLayout(self.tab_notes) + self.formLayout.setObjectName("formLayout") + self.le_notes_field = QtWidgets.QLineEdit(self.tab_notes) + self.le_notes_field.setObjectName("le_notes_field") + self.formLayout.setWidget( + 1, QtWidgets.QFormLayout.SpanningRole, self.le_notes_field + ) + self.qbutton_push_note = QtWidgets.QPushButton(self.tab_notes) + self.qbutton_push_note.setObjectName("qbutton_push_note") + self.formLayout.setWidget( + 2, QtWidgets.QFormLayout.SpanningRole, self.qbutton_push_note + ) + self.label_note = QtWidgets.QLabel(self.tab_notes) + self.label_note.setObjectName("label_note") + self.formLayout.setWidget( + 0, QtWidgets.QFormLayout.LabelRole, self.label_note + ) + self.tabWidget_2.addTab(self.tab_notes, "") + self.gridLayout.addWidget( + self.tabWidget_2, 3, 0, 1, 1, QtCore.Qt.AlignTop + ) + self.capture_background = QtWidgets.QGroupBox( + self.scrollAreaWidgetContents + ) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.capture_background.setFont(font) + self.capture_background.setObjectName("capture_background") + self.gridLayout_3 = QtWidgets.QGridLayout(self.capture_background) + self.gridLayout_3.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_3.setVerticalSpacing(6) + self.gridLayout_3.setObjectName("gridLayout_3") + self.label_bg_folder = QtWidgets.QLabel(self.capture_background) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_bg_folder.setFont(font) + self.label_bg_folder.setObjectName("label_bg_folder") + self.gridLayout_3.addWidget(self.label_bg_folder, 0, 0, 1, 1) + self.qbutton_capture_bg = QtWidgets.QPushButton( + self.capture_background + ) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_capture_bg.setFont(font) + self.qbutton_capture_bg.setObjectName("qbutton_capture_bg") + self.gridLayout_3.addWidget(self.qbutton_capture_bg, 2, 0, 1, 2) + self.le_bg_folder = QtWidgets.QLineEdit(self.capture_background) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_bg_folder.setFont(font) + self.le_bg_folder.setObjectName("le_bg_folder") + self.gridLayout_3.addWidget(self.le_bg_folder, 0, 1, 1, 1) + self.le_n_avg = QtWidgets.QLineEdit(self.capture_background) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_n_avg.setFont(font) + self.le_n_avg.setObjectName("le_n_avg") + self.gridLayout_3.addWidget(self.le_n_avg, 1, 1, 1, 1) + self.label_n_avg = QtWidgets.QLabel(self.capture_background) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_n_avg.setFont(font) + self.label_n_avg.setObjectName("label_n_avg") + self.gridLayout_3.addWidget(self.label_n_avg, 1, 0, 1, 1) + self.gridLayout.addWidget(self.capture_background, 2, 0, 1, 1) + self.scrollArea.setWidget(self.scrollAreaWidgetContents) + self.gridLayout_5.addWidget(self.scrollArea, 0, 0, 1, 1) + self.tabWidget.addTab(self.Calibration, "") + self.Acquisition = QtWidgets.QWidget() + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.Acquisition.sizePolicy().hasHeightForWidth() + ) + self.Acquisition.setSizePolicy(sizePolicy) + self.Acquisition.setMinimumSize(QtCore.QSize(0, 0)) + self.Acquisition.setMaximumSize(QtCore.QSize(16777215, 16777215)) + self.Acquisition.setObjectName("Acquisition") + self.gridLayout_6 = QtWidgets.QGridLayout(self.Acquisition) + self.gridLayout_6.setSizeConstraint( + QtWidgets.QLayout.SetDefaultConstraint + ) + self.gridLayout_6.setContentsMargins(4, 4, 4, 4) + self.gridLayout_6.setSpacing(0) + self.gridLayout_6.setObjectName("gridLayout_6") + self.scrollArea_4 = QtWidgets.QScrollArea(self.Acquisition) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.scrollArea_4.sizePolicy().hasHeightForWidth() + ) + self.scrollArea_4.setSizePolicy(sizePolicy) + self.scrollArea_4.setWidgetResizable(True) + self.scrollArea_4.setObjectName("scrollArea_4") + self.scrollAreaWidgetContents_4 = QtWidgets.QWidget() + self.scrollAreaWidgetContents_4.setGeometry( + QtCore.QRect(0, 0, 614, 990) + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.scrollAreaWidgetContents_4.sizePolicy().hasHeightForWidth() + ) + self.scrollAreaWidgetContents_4.setSizePolicy(sizePolicy) + self.scrollAreaWidgetContents_4.setObjectName( + "scrollAreaWidgetContents_4" + ) + self.gridLayout_15 = QtWidgets.QGridLayout( + self.scrollAreaWidgetContents_4 + ) + self.gridLayout_15.setVerticalSpacing(20) + self.gridLayout_15.setObjectName("gridLayout_15") + self.phase = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_4) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.phase.sizePolicy().hasHeightForWidth() + ) + self.phase.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.phase.setFont(font) + self.phase.setObjectName("phase") + self.gridLayout_23 = QtWidgets.QGridLayout(self.phase) + self.gridLayout_23.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_23.setVerticalSpacing(6) + self.gridLayout_23.setObjectName("gridLayout_23") + self.label_phase_regularizer = QtWidgets.QLabel(self.phase) + self.label_phase_regularizer.setObjectName("label_phase_regularizer") + self.gridLayout_23.addWidget(self.label_phase_regularizer, 1, 0, 1, 1) + self.label_phase_rho = QtWidgets.QLabel(self.phase) + self.label_phase_rho.setObjectName("label_phase_rho") + self.gridLayout_23.addWidget(self.label_phase_rho, 3, 0, 1, 1) + self.le_phase_strength = QtWidgets.QLineEdit(self.phase) + self.le_phase_strength.setObjectName("le_phase_strength") + self.gridLayout_23.addWidget(self.le_phase_strength, 2, 1, 1, 1) + self.le_itr = QtWidgets.QLineEdit(self.phase) + self.le_itr.setObjectName("le_itr") + self.gridLayout_23.addWidget(self.le_itr, 4, 1, 1, 1) + self.cb_phase_denoiser = QtWidgets.QComboBox(self.phase) + self.cb_phase_denoiser.setObjectName("cb_phase_denoiser") + self.cb_phase_denoiser.addItem("") + self.cb_phase_denoiser.addItem("") + self.gridLayout_23.addWidget(self.cb_phase_denoiser, 1, 1, 1, 1) + self.le_rho = QtWidgets.QLineEdit(self.phase) + self.le_rho.setObjectName("le_rho") + self.gridLayout_23.addWidget(self.le_rho, 3, 1, 1, 1) + self.label_itr = QtWidgets.QLabel(self.phase) + self.label_itr.setObjectName("label_itr") + self.gridLayout_23.addWidget(self.label_itr, 4, 0, 1, 1) + self.label_phase_strength = QtWidgets.QLabel(self.phase) + self.label_phase_strength.setObjectName("label_phase_strength") + self.gridLayout_23.addWidget(self.label_phase_strength, 2, 0, 1, 1) + self.label_pad_z = QtWidgets.QLabel(self.phase) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_pad_z.sizePolicy().hasHeightForWidth() + ) + self.label_pad_z.setSizePolicy(sizePolicy) + self.label_pad_z.setObjectName("label_pad_z") + self.gridLayout_23.addWidget(self.label_pad_z, 0, 0, 1, 1) + self.le_pad_z = QtWidgets.QLineEdit(self.phase) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_pad_z.sizePolicy().hasHeightForWidth() + ) + self.le_pad_z.setSizePolicy(sizePolicy) + self.le_pad_z.setObjectName("le_pad_z") + self.gridLayout_23.addWidget(self.le_pad_z, 0, 1, 1, 1) + self.gridLayout_15.addWidget(self.phase, 3, 0, 1, 1) + self.acq_settings = QtWidgets.QGroupBox( + self.scrollAreaWidgetContents_4 + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.acq_settings.sizePolicy().hasHeightForWidth() + ) + self.acq_settings.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.acq_settings.setFont(font) + self.acq_settings.setFocusPolicy(QtCore.Qt.TabFocus) + self.acq_settings.setFlat(False) + self.acq_settings.setObjectName("acq_settings") + self.gridLayout_8 = QtWidgets.QGridLayout(self.acq_settings) + self.gridLayout_8.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_8.setVerticalSpacing(6) + self.gridLayout_8.setObjectName("gridLayout_8") + self.cb_acq_mode = QtWidgets.QComboBox(self.acq_settings) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_acq_mode.setFont(font) + self.cb_acq_mode.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_acq_mode.setObjectName("cb_acq_mode") + self.cb_acq_mode.addItem("") + self.cb_acq_mode.addItem("") + self.gridLayout_8.addWidget(self.cb_acq_mode, 3, 1, 1, 1) + self.label_zstart = QtWidgets.QLabel(self.acq_settings) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_zstart.setFont(font) + self.label_zstart.setObjectName("label_zstart") + self.gridLayout_8.addWidget(self.label_zstart, 1, 0, 1, 1) + self.label_acq_mode = QtWidgets.QLabel(self.acq_settings) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_acq_mode.setFont(font) + self.label_acq_mode.setObjectName("label_acq_mode") + self.gridLayout_8.addWidget(self.label_acq_mode, 3, 0, 1, 1) + self.le_zstep = QtWidgets.QLineEdit(self.acq_settings) + self.le_zstep.setObjectName("le_zstep") + self.gridLayout_8.addWidget(self.le_zstep, 2, 2, 1, 1) + self.le_zend = QtWidgets.QLineEdit(self.acq_settings) + self.le_zend.setObjectName("le_zend") + self.gridLayout_8.addWidget(self.le_zend, 2, 1, 1, 1) + self.label_zstep = QtWidgets.QLabel(self.acq_settings) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_zstep.setFont(font) + self.label_zstep.setObjectName("label_zstep") + self.gridLayout_8.addWidget(self.label_zstep, 1, 2, 1, 1) + self.label_zend = QtWidgets.QLabel(self.acq_settings) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_zend.setFont(font) + self.label_zend.setObjectName("label_zend") + self.gridLayout_8.addWidget(self.label_zend, 1, 1, 1, 1) + self.cb_acq_channel = QtWidgets.QComboBox(self.acq_settings) + self.cb_acq_channel.setCurrentText("") + self.cb_acq_channel.setObjectName("cb_acq_channel") + self.gridLayout_8.addWidget(self.cb_acq_channel, 4, 1, 1, 1) + self.labe_acq_channel = QtWidgets.QLabel(self.acq_settings) + self.labe_acq_channel.setObjectName("labe_acq_channel") + self.gridLayout_8.addWidget(self.labe_acq_channel, 4, 0, 1, 1) + self.le_zstart = QtWidgets.QLineEdit(self.acq_settings) + self.le_zstart.setObjectName("le_zstart") + self.gridLayout_8.addWidget(self.le_zstart, 2, 0, 1, 1) + self.gridLayout_15.addWidget(self.acq_settings, 1, 0, 1, 1) + self.ReconSettings = QtWidgets.QGroupBox( + self.scrollAreaWidgetContents_4 + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.ReconSettings.sizePolicy().hasHeightForWidth() + ) + self.ReconSettings.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.ReconSettings.setFont(font) + self.ReconSettings.setAlignment( + QtCore.Qt.AlignLeading + | QtCore.Qt.AlignLeft + | QtCore.Qt.AlignVCenter + ) + self.ReconSettings.setObjectName("ReconSettings") + self.gridLayout_9 = QtWidgets.QGridLayout(self.ReconSettings) + self.gridLayout_9.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_9.setVerticalSpacing(6) + self.gridLayout_9.setObjectName("gridLayout_9") + self.label_mag = QtWidgets.QLabel(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_mag.sizePolicy().hasHeightForWidth() + ) + self.label_mag.setSizePolicy(sizePolicy) + self.label_mag.setObjectName("label_mag") + self.gridLayout_9.addWidget(self.label_mag, 10, 0, 1, 1) + self.le_data_save_name = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_data_save_name.sizePolicy().hasHeightForWidth() + ) + self.le_data_save_name.setSizePolicy(sizePolicy) + self.le_data_save_name.setText("") + self.le_data_save_name.setObjectName("le_data_save_name") + self.gridLayout_9.addWidget(self.le_data_save_name, 1, 1, 1, 1) + self.cb_bg_method = QtWidgets.QComboBox(self.ReconSettings) + self.cb_bg_method.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_bg_method.setObjectName("cb_bg_method") + self.cb_bg_method.addItem("") + self.cb_bg_method.addItem("") + self.cb_bg_method.addItem("") + self.gridLayout_9.addWidget(self.cb_bg_method, 2, 1, 1, 1) + self.label_flip_orientation = QtWidgets.QLabel(self.ReconSettings) + self.label_flip_orientation.setObjectName("label_flip_orientation") + self.gridLayout_9.addWidget(self.label_flip_orientation, 12, 0, 1, 1) + self.le_mag = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_mag.sizePolicy().hasHeightForWidth() + ) + self.le_mag.setSizePolicy(sizePolicy) + self.le_mag.setObjectName("le_mag") + self.gridLayout_9.addWidget(self.le_mag, 10, 1, 1, 1) + self.le_cond_na = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_cond_na.sizePolicy().hasHeightForWidth() + ) + self.le_cond_na.setSizePolicy(sizePolicy) + self.le_cond_na.setObjectName("le_cond_na") + self.gridLayout_9.addWidget(self.le_cond_na, 7, 1, 1, 1) + self.label_data_save_name = QtWidgets.QLabel(self.ReconSettings) + self.label_data_save_name.setObjectName("label_data_save_name") + self.gridLayout_9.addWidget(self.label_data_save_name, 1, 0, 1, 1) + self.label_gpu_id = QtWidgets.QLabel(self.ReconSettings) + self.label_gpu_id.setAlignment( + QtCore.Qt.AlignLeading + | QtCore.Qt.AlignLeft + | QtCore.Qt.AlignVCenter + ) + self.label_gpu_id.setObjectName("label_gpu_id") + self.gridLayout_9.addWidget(self.label_gpu_id, 4, 0, 1, 1) + self.label_bg_method = QtWidgets.QLabel(self.ReconSettings) + self.label_bg_method.setObjectName("label_bg_method") + self.gridLayout_9.addWidget(self.label_bg_method, 2, 0, 1, 1) + self.label_recon_wavelength = QtWidgets.QLabel(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_recon_wavelength.sizePolicy().hasHeightForWidth() + ) + self.label_recon_wavelength.setSizePolicy(sizePolicy) + self.label_recon_wavelength.setObjectName("label_recon_wavelength") + self.gridLayout_9.addWidget(self.label_recon_wavelength, 5, 0, 1, 1) + self.le_save_dir = QtWidgets.QLineEdit(self.ReconSettings) + self.le_save_dir.setPlaceholderText("") + self.le_save_dir.setObjectName("le_save_dir") + self.gridLayout_9.addWidget(self.le_save_dir, 0, 1, 1, 1) + self.le_gpu_id = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_gpu_id.sizePolicy().hasHeightForWidth() + ) + self.le_gpu_id.setSizePolicy(sizePolicy) + self.le_gpu_id.setObjectName("le_gpu_id") + self.gridLayout_9.addWidget(self.le_gpu_id, 4, 1, 1, 1) + self.label_cond_na = QtWidgets.QLabel(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_cond_na.sizePolicy().hasHeightForWidth() + ) + self.label_cond_na.setSizePolicy(sizePolicy) + self.label_cond_na.setObjectName("label_cond_na") + self.gridLayout_9.addWidget(self.label_cond_na, 7, 0, 1, 1) + self.cb_rotate_orientation = QtWidgets.QCheckBox(self.ReconSettings) + self.cb_rotate_orientation.setText("") + self.cb_rotate_orientation.setObjectName("cb_rotate_orientation") + self.gridLayout_9.addWidget(self.cb_rotate_orientation, 11, 1, 1, 1) + self.qbutton_browse_bg_path = QtWidgets.QPushButton(self.ReconSettings) + self.qbutton_browse_bg_path.setObjectName("qbutton_browse_bg_path") + self.gridLayout_9.addWidget(self.qbutton_browse_bg_path, 3, 2, 1, 1) + self.chb_use_gpu = QtWidgets.QCheckBox(self.ReconSettings) + self.chb_use_gpu.setObjectName("chb_use_gpu") + self.gridLayout_9.addWidget(self.chb_use_gpu, 4, 2, 1, 1) + self.label_bg_path = QtWidgets.QLabel(self.ReconSettings) + self.label_bg_path.setObjectName("label_bg_path") + self.gridLayout_9.addWidget(self.label_bg_path, 3, 0, 1, 1) + self.le_recon_wavelength = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_recon_wavelength.sizePolicy().hasHeightForWidth() + ) + self.le_recon_wavelength.setSizePolicy(sizePolicy) + self.le_recon_wavelength.setObjectName("le_recon_wavelength") + self.gridLayout_9.addWidget(self.le_recon_wavelength, 5, 1, 1, 1) + self.le_n_media = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_n_media.sizePolicy().hasHeightForWidth() + ) + self.le_n_media.setSizePolicy(sizePolicy) + self.le_n_media.setObjectName("le_n_media") + self.gridLayout_9.addWidget(self.le_n_media, 9, 1, 1, 1) + self.label_orientation_offset = QtWidgets.QLabel(self.ReconSettings) + self.label_orientation_offset.setObjectName("label_orientation_offset") + self.gridLayout_9.addWidget(self.label_orientation_offset, 11, 0, 1, 1) + self.le_obj_na = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_obj_na.sizePolicy().hasHeightForWidth() + ) + self.le_obj_na.setSizePolicy(sizePolicy) + self.le_obj_na.setObjectName("le_obj_na") + self.gridLayout_9.addWidget(self.le_obj_na, 6, 1, 1, 1) + self.label_obj_na = QtWidgets.QLabel(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_obj_na.sizePolicy().hasHeightForWidth() + ) + self.label_obj_na.setSizePolicy(sizePolicy) + self.label_obj_na.setObjectName("label_obj_na") + self.gridLayout_9.addWidget(self.label_obj_na, 6, 0, 1, 1) + self.le_ps = QtWidgets.QLineEdit(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_ps.sizePolicy().hasHeightForWidth() + ) + self.le_ps.setSizePolicy(sizePolicy) + self.le_ps.setText("") + self.le_ps.setObjectName("le_ps") + self.gridLayout_9.addWidget(self.le_ps, 8, 1, 1, 1) + self.label_n_media = QtWidgets.QLabel(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_n_media.sizePolicy().hasHeightForWidth() + ) + self.label_n_media.setSizePolicy(sizePolicy) + self.label_n_media.setObjectName("label_n_media") + self.gridLayout_9.addWidget(self.label_n_media, 9, 0, 1, 1) + self.label_save_dir = QtWidgets.QLabel(self.ReconSettings) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_save_dir.setFont(font) + self.label_save_dir.setObjectName("label_save_dir") + self.gridLayout_9.addWidget(self.label_save_dir, 0, 0, 1, 1) + self.label_ps = QtWidgets.QLabel(self.ReconSettings) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_ps.sizePolicy().hasHeightForWidth() + ) + self.label_ps.setSizePolicy(sizePolicy) + self.label_ps.setObjectName("label_ps") + self.gridLayout_9.addWidget(self.label_ps, 8, 0, 1, 1) + self.qbutton_browse_save_dir = QtWidgets.QPushButton( + self.ReconSettings + ) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_browse_save_dir.setFont(font) + self.qbutton_browse_save_dir.setObjectName("qbutton_browse_save_dir") + self.gridLayout_9.addWidget(self.qbutton_browse_save_dir, 0, 2, 1, 1) + self.le_bg_path = QtWidgets.QLineEdit(self.ReconSettings) + self.le_bg_path.setPlaceholderText("") + self.le_bg_path.setObjectName("le_bg_path") + self.gridLayout_9.addWidget(self.le_bg_path, 3, 1, 1, 1) + self.label_invert_phase_contrast = QtWidgets.QLabel(self.ReconSettings) + self.label_invert_phase_contrast.setObjectName( + "label_invert_phase_contrast" + ) + self.gridLayout_9.addWidget( + self.label_invert_phase_contrast, 13, 0, 1, 1 + ) + self.cb_flip_orientation = QtWidgets.QCheckBox(self.ReconSettings) + self.cb_flip_orientation.setText("") + self.cb_flip_orientation.setObjectName("cb_flip_orientation") + self.gridLayout_9.addWidget(self.cb_flip_orientation, 12, 1, 1, 1) + self.cb_invert_phase_contrast = QtWidgets.QCheckBox(self.ReconSettings) + self.cb_invert_phase_contrast.setText("") + self.cb_invert_phase_contrast.setObjectName("cb_invert_phase_contrast") + self.gridLayout_9.addWidget(self.cb_invert_phase_contrast, 13, 1, 1, 1) + self.gridLayout_15.addWidget(self.ReconSettings, 2, 0, 1, 1) + self.acquire = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_4) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.acquire.sizePolicy().hasHeightForWidth() + ) + self.acquire.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.acquire.setFont(font) + self.acquire.setObjectName("acquire") + self.gridLayout_13 = QtWidgets.QGridLayout(self.acquire) + self.gridLayout_13.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_13.setVerticalSpacing(6) + self.gridLayout_13.setObjectName("gridLayout_13") + self.qbutton_acq_ret_ori = QtWidgets.QPushButton(self.acquire) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_acq_ret_ori.setFont(font) + self.qbutton_acq_ret_ori.setObjectName("qbutton_acq_ret_ori") + self.gridLayout_13.addWidget(self.qbutton_acq_ret_ori, 2, 0, 1, 1) + self.qbutton_acq_phase_from_bf = QtWidgets.QPushButton(self.acquire) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_acq_phase_from_bf.setFont(font) + self.qbutton_acq_phase_from_bf.setObjectName( + "qbutton_acq_phase_from_bf" + ) + self.gridLayout_13.addWidget( + self.qbutton_acq_phase_from_bf, 2, 1, 1, 1 + ) + self.qbutton_acq_ret_ori_phase = QtWidgets.QPushButton(self.acquire) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_acq_ret_ori_phase.setFont(font) + self.qbutton_acq_ret_ori_phase.setObjectName( + "qbutton_acq_ret_ori_phase" + ) + self.gridLayout_13.addWidget( + self.qbutton_acq_ret_ori_phase, 2, 2, 1, 1 + ) + self.qbutton_stop_acq = QtWidgets.QPushButton(self.acquire) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_stop_acq.setFont(font) + self.qbutton_stop_acq.setObjectName("qbutton_stop_acq") + self.gridLayout_13.addWidget(self.qbutton_stop_acq, 3, 0, 1, 3) + self.gridLayout_15.addWidget(self.acquire, 0, 0, 1, 2) + self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_4) + self.gridLayout_6.addWidget(self.scrollArea_4, 4, 0, 1, 1) + self.tabWidget.addTab(self.Acquisition, "") + + self.tab_reconstruction = tab_recon.Ui_ReconTab_Form(Form) + self.tabWidget.addTab( + self.tab_reconstruction.recon_tab_mainScrollArea, "Reconstruction" + ) + + self.Display = QtWidgets.QWidget() + self.Display.setObjectName("Display") + self.gridLayout_18 = QtWidgets.QGridLayout(self.Display) + self.gridLayout_18.setObjectName("gridLayout_18") + self.scrollArea_5 = QtWidgets.QScrollArea(self.Display) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.scrollArea_5.sizePolicy().hasHeightForWidth() + ) + self.scrollArea_5.setSizePolicy(sizePolicy) + self.scrollArea_5.setWidgetResizable(True) + self.scrollArea_5.setObjectName("scrollArea_5") + self.scrollAreaWidgetContents_5 = QtWidgets.QWidget() + self.scrollAreaWidgetContents_5.setGeometry( + QtCore.QRect(0, 0, 574, 974) + ) + self.scrollAreaWidgetContents_5.setObjectName( + "scrollAreaWidgetContents_5" + ) + self.gridLayout_4 = QtWidgets.QGridLayout( + self.scrollAreaWidgetContents_5 + ) + self.gridLayout_4.setObjectName("gridLayout_4") + self.label_orientation_image = QtWidgets.QLabel( + self.scrollAreaWidgetContents_5 + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_orientation_image.sizePolicy().hasHeightForWidth() + ) + self.label_orientation_image.setSizePolicy(sizePolicy) + self.label_orientation_image.setText("") + self.label_orientation_image.setAlignment(QtCore.Qt.AlignCenter) + self.label_orientation_image.setObjectName("label_orientation_image") + self.gridLayout_4.addWidget(self.label_orientation_image, 6, 0, 1, 1) + self.DisplayOptions = QtWidgets.QGroupBox( + self.scrollAreaWidgetContents_5 + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.DisplayOptions.sizePolicy().hasHeightForWidth() + ) + self.DisplayOptions.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(True) + font.setItalic(False) + self.DisplayOptions.setFont(font) + self.DisplayOptions.setObjectName("DisplayOptions") + self.gridLayout_17 = QtWidgets.QGridLayout(self.DisplayOptions) + self.gridLayout_17.setContentsMargins(-1, 12, -1, -1) + self.gridLayout_17.setVerticalSpacing(6) + self.gridLayout_17.setObjectName("gridLayout_17") + self.cb_hue = QtWidgets.QComboBox(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_hue.setFont(font) + self.cb_hue.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_hue.setObjectName("cb_hue") + self.gridLayout_17.addWidget(self.cb_hue, 8, 1, 1, 1) + self.slider_saturation = QtWidgets.QSlider(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.slider_saturation.setFont(font) + self.slider_saturation.setOrientation(QtCore.Qt.Horizontal) + self.slider_saturation.setObjectName("slider_saturation") + self.gridLayout_17.addWidget(self.slider_saturation, 10, 2, 1, 2) + self.line = QtWidgets.QFrame(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.line.setFont(font) + self.line.setLineWidth(3) + self.line.setFrameShape(QtWidgets.QFrame.HLine) + self.line.setFrameShadow(QtWidgets.QFrame.Sunken) + self.line.setObjectName("line") + self.gridLayout_17.addWidget(self.line, 6, 0, 1, 4) + self.label_saturation = QtWidgets.QLabel(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_saturation.setFont(font) + self.label_saturation.setObjectName("label_saturation") + self.gridLayout_17.addWidget(self.label_saturation, 10, 0, 1, 1) + self.le_overlay_slice = QtWidgets.QLineEdit(self.DisplayOptions) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.le_overlay_slice.sizePolicy().hasHeightForWidth() + ) + self.le_overlay_slice.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_overlay_slice.setFont(font) + self.le_overlay_slice.setObjectName("le_overlay_slice") + self.gridLayout_17.addWidget(self.le_overlay_slice, 15, 0, 1, 1) + self.le_sat_min = QtWidgets.QLineEdit(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_sat_min.setFont(font) + self.le_sat_min.setInputMask("") + self.le_sat_min.setFrame(False) + self.le_sat_min.setObjectName("le_sat_min") + self.gridLayout_17.addWidget(self.le_sat_min, 9, 2, 1, 1) + self.cb_colormap = QtWidgets.QComboBox(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_colormap.setFont(font) + self.cb_colormap.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_colormap.setObjectName("cb_colormap") + self.cb_colormap.addItem("") + self.cb_colormap.addItem("") + self.gridLayout_17.addWidget(self.cb_colormap, 3, 1, 1, 2) + self.cb_value = QtWidgets.QComboBox(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_value.setFont(font) + self.cb_value.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_value.setObjectName("cb_value") + self.gridLayout_17.addWidget(self.cb_value, 12, 1, 1, 1) + self.le_val_min = QtWidgets.QLineEdit(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_val_min.setFont(font) + self.le_val_min.setFrame(False) + self.le_val_min.setObjectName("le_val_min") + self.gridLayout_17.addWidget(self.le_val_min, 11, 2, 1, 1) + self.cb_saturation = QtWidgets.QComboBox(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.cb_saturation.setFont(font) + self.cb_saturation.setFocusPolicy(QtCore.Qt.StrongFocus) + self.cb_saturation.setObjectName("cb_saturation") + self.gridLayout_17.addWidget(self.cb_saturation, 10, 1, 1, 1) + self.le_sat_max = QtWidgets.QLineEdit(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_sat_max.setFont(font) + self.le_sat_max.setFrame(False) + self.le_sat_max.setAlignment( + QtCore.Qt.AlignRight + | QtCore.Qt.AlignTrailing + | QtCore.Qt.AlignVCenter + ) + self.le_sat_max.setObjectName("le_sat_max") + self.gridLayout_17.addWidget(self.le_sat_max, 9, 3, 1, 1) + self.slider_value = QtWidgets.QSlider(self.DisplayOptions) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.slider_value.sizePolicy().hasHeightForWidth() + ) + self.slider_value.setSizePolicy(sizePolicy) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.slider_value.setFont(font) + self.slider_value.setOrientation(QtCore.Qt.Horizontal) + self.slider_value.setObjectName("slider_value") + self.gridLayout_17.addWidget(self.slider_value, 12, 2, 1, 2) + self.label_value = QtWidgets.QLabel(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_value.setFont(font) + self.label_value.setObjectName("label_value") + self.gridLayout_17.addWidget(self.label_value, 12, 0, 1, 1) + self.chb_display_volume = QtWidgets.QCheckBox(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.chb_display_volume.setFont(font) + self.chb_display_volume.setObjectName("chb_display_volume") + self.gridLayout_17.addWidget(self.chb_display_volume, 15, 1, 1, 1) + self.label_colormap = QtWidgets.QLabel(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_colormap.setFont(font) + self.label_colormap.setObjectName("label_colormap") + self.gridLayout_17.addWidget(self.label_colormap, 3, 0, 1, 1) + self.le_val_max = QtWidgets.QLineEdit(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.le_val_max.setFont(font) + self.le_val_max.setFrame(False) + self.le_val_max.setAlignment( + QtCore.Qt.AlignRight + | QtCore.Qt.AlignTrailing + | QtCore.Qt.AlignVCenter + ) + self.le_val_max.setObjectName("le_val_max") + self.gridLayout_17.addWidget(self.le_val_max, 11, 3, 1, 1) + self.label_hue = QtWidgets.QLabel(self.DisplayOptions) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.label_hue.setFont(font) + self.label_hue.setObjectName("label_hue") + self.gridLayout_17.addWidget(self.label_hue, 8, 0, 1, 1) + self.qbutton_create_overlay = QtWidgets.QPushButton( + self.DisplayOptions + ) + font = QtGui.QFont() + font.setBold(False) + font.setItalic(False) + self.qbutton_create_overlay.setFont(font) + self.qbutton_create_overlay.setObjectName("qbutton_create_overlay") + self.gridLayout_17.addWidget(self.qbutton_create_overlay, 16, 0, 1, 4) + self.gridLayout_4.addWidget(self.DisplayOptions, 8, 0, 1, 1) + self.label = QtWidgets.QLabel(self.scrollAreaWidgetContents_5) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label.sizePolicy().hasHeightForWidth() + ) + self.label.setSizePolicy(sizePolicy) + self.label.setObjectName("label") + self.gridLayout_4.addWidget(self.label, 1, 0, 1, 1) + self.retMaxSlider = QtWidgets.QSlider(self.scrollAreaWidgetContents_5) + self.retMaxSlider.setMaximum(50) + self.retMaxSlider.setSliderPosition(25) + self.retMaxSlider.setOrientation(QtCore.Qt.Horizontal) + self.retMaxSlider.setObjectName("retMaxSlider") + self.gridLayout_4.addWidget(self.retMaxSlider, 2, 0, 1, 1) + self.label_orientation_legend = QtWidgets.QLabel( + self.scrollAreaWidgetContents_5 + ) + sizePolicy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed + ) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + sizePolicy.setHeightForWidth( + self.label_orientation_legend.sizePolicy().hasHeightForWidth() + ) + self.label_orientation_legend.setSizePolicy(sizePolicy) + self.label_orientation_legend.setAlignment(QtCore.Qt.AlignCenter) + self.label_orientation_legend.setObjectName("label_orientation_legend") + self.gridLayout_4.addWidget(self.label_orientation_legend, 3, 0, 1, 1) + spacerItem = QtWidgets.QSpacerItem( + 20, + 40, + QtWidgets.QSizePolicy.Minimum, + QtWidgets.QSizePolicy.Expanding, + ) + self.gridLayout_4.addItem(spacerItem, 7, 0, 1, 1) + self.scrollArea_5.setWidget(self.scrollAreaWidgetContents_5) + self.gridLayout_18.addWidget(self.scrollArea_5, 0, 0, 2, 2) + self.tabWidget.addTab(self.Display, "") + self.gridLayout_7.addWidget(self.tabWidget, 2, 0, 1, 1) + + self.retranslateUi(Form) + self.tabWidget.setCurrentIndex(2) + self.tabWidget_2.setCurrentIndex(2) + self.cb_loglevel.setCurrentIndex(0) + QtCore.QMetaObject.connectSlotsByName(Form) + Form.setTabOrder(self.qbutton_connect_to_mm, self.le_mm_status) + Form.setTabOrder(self.le_mm_status, self.scrollArea) + Form.setTabOrder(self.scrollArea, self.le_directory) + Form.setTabOrder(self.le_directory, self.qbutton_browse) + Form.setTabOrder(self.qbutton_browse, self.le_swing) + Form.setTabOrder(self.le_swing, self.le_wavelength) + Form.setTabOrder(self.le_wavelength, self.cb_calib_scheme) + Form.setTabOrder(self.cb_calib_scheme, self.cb_calib_mode) + Form.setTabOrder(self.cb_calib_mode, self.cb_lca) + Form.setTabOrder(self.cb_lca, self.cb_lcb) + Form.setTabOrder(self.cb_lcb, self.cb_config_group) + Form.setTabOrder(self.cb_config_group, self.qbutton_calibrate) + Form.setTabOrder(self.qbutton_calibrate, self.qbutton_load_calib) + Form.setTabOrder(self.qbutton_load_calib, self.qbutton_calc_extinction) + Form.setTabOrder(self.qbutton_calc_extinction, self.qbutton_stop_calib) + Form.setTabOrder(self.qbutton_stop_calib, self.le_extinction) + Form.setTabOrder(self.le_extinction, self.tb_calib_assessment) + Form.setTabOrder(self.tb_calib_assessment, self.le_bg_folder) + Form.setTabOrder(self.le_bg_folder, self.le_n_avg) + Form.setTabOrder(self.le_n_avg, self.qbutton_capture_bg) + Form.setTabOrder(self.qbutton_capture_bg, self.scrollArea_3) + Form.setTabOrder(self.scrollArea_3, self.scrollArea_2) + Form.setTabOrder(self.scrollArea_2, self.cb_loglevel) + Form.setTabOrder(self.cb_loglevel, self.te_log) + Form.setTabOrder(self.te_log, self.le_notes_field) + Form.setTabOrder(self.le_notes_field, self.qbutton_push_note) + Form.setTabOrder(self.qbutton_push_note, self.scrollArea_4) + Form.setTabOrder(self.scrollArea_4, self.qbutton_acq_ret_ori) + Form.setTabOrder(self.qbutton_acq_ret_ori, self.qbutton_stop_acq) + Form.setTabOrder(self.qbutton_stop_acq, self.acq_settings) + Form.setTabOrder(self.acq_settings, self.le_zstart) + Form.setTabOrder(self.le_zstart, self.le_zend) + Form.setTabOrder(self.le_zend, self.le_zstep) + Form.setTabOrder(self.le_zstep, self.cb_acq_mode) + Form.setTabOrder(self.cb_acq_mode, self.cb_acq_channel) + Form.setTabOrder(self.cb_acq_channel, self.le_save_dir) + Form.setTabOrder(self.le_save_dir, self.qbutton_browse_save_dir) + Form.setTabOrder(self.qbutton_browse_save_dir, self.le_data_save_name) + Form.setTabOrder(self.le_data_save_name, self.cb_bg_method) + Form.setTabOrder(self.cb_bg_method, self.le_bg_path) + Form.setTabOrder(self.le_bg_path, self.qbutton_browse_bg_path) + Form.setTabOrder(self.qbutton_browse_bg_path, self.le_gpu_id) + Form.setTabOrder(self.le_gpu_id, self.chb_use_gpu) + Form.setTabOrder(self.chb_use_gpu, self.le_recon_wavelength) + Form.setTabOrder(self.le_recon_wavelength, self.le_obj_na) + Form.setTabOrder(self.le_obj_na, self.le_cond_na) + Form.setTabOrder(self.le_cond_na, self.le_ps) + Form.setTabOrder(self.le_ps, self.le_n_media) + Form.setTabOrder(self.le_n_media, self.le_mag) + Form.setTabOrder(self.le_mag, self.le_pad_z) + Form.setTabOrder(self.le_pad_z, self.cb_phase_denoiser) + Form.setTabOrder(self.cb_phase_denoiser, self.le_phase_strength) + Form.setTabOrder(self.le_phase_strength, self.le_rho) + Form.setTabOrder(self.le_rho, self.le_itr) + Form.setTabOrder(self.le_itr, self.cb_colormap) + Form.setTabOrder(self.cb_colormap, self.cb_hue) + Form.setTabOrder(self.cb_hue, self.le_sat_min) + Form.setTabOrder(self.le_sat_min, self.le_sat_max) + Form.setTabOrder(self.le_sat_max, self.cb_saturation) + Form.setTabOrder(self.cb_saturation, self.slider_saturation) + Form.setTabOrder(self.slider_saturation, self.le_val_min) + Form.setTabOrder(self.le_val_min, self.le_val_max) + Form.setTabOrder(self.le_val_max, self.cb_value) + Form.setTabOrder(self.cb_value, self.slider_value) + Form.setTabOrder(self.slider_value, self.le_overlay_slice) + Form.setTabOrder(self.le_overlay_slice, self.chb_display_volume) + Form.setTabOrder(self.chb_display_volume, self.qbutton_create_overlay) + Form.setTabOrder(self.qbutton_create_overlay, self.tabWidget) + Form.setTabOrder(self.tabWidget, self.scrollArea_5) + + def retranslateUi(self, Form): + _translate = QtCore.QCoreApplication.translate + Form.setWindowTitle(_translate("Form", "Form")) + self.recon_status.setTitle(_translate("Form", "MM Connection Status")) + self.le_mm_status.setText(_translate("Form", "Disconnected")) + self.qbutton_connect_to_mm.setText(_translate("Form", "Connect to MM")) + self.run_calib.setTitle(_translate("Form", "Run Calibration")) + self.labell_calib_assessment.setText( + _translate("Form", "Calibration Assessment") + ) + self.label_progress.setText(_translate("Form", "Progress")) + self.label_extinction.setText(_translate("Form", "Extinction")) + self.qbutton_calibrate.setText(_translate("Form", "Run Calibration")) + self.qbutton_calc_extinction.setText( + _translate("Form", "Calculate Extinction") + ) + self.qbutton_stop_calib.setText(_translate("Form", "STOP")) + self.qbutton_load_calib.setText(_translate("Form", "Load Calibration")) + self.calib_params.setTitle( + _translate("Form", "Calibration Parameters") + ) + self.label_wavelength.setText(_translate("Form", "Wavelength (nm)")) + self.label_dir.setText(_translate("Form", "Directory")) + self.cb_lca.setItemText(0, _translate("Form", "-")) + self.cb_calib_scheme.setItemText( + 0, _translate("Form", "4-State (Ext, 0, 60, 120)") + ) + self.cb_calib_scheme.setItemText( + 1, _translate("Form", "5-State (Ext, 0, 45, 90, 135)") + ) + self.cb_lcb.setItemText(0, _translate("Form", "-")) + self.label_lca.setText(_translate("Form", "LC-A")) + self.le_swing.setText(_translate("Form", "0.1")) + self.le_directory.setPlaceholderText( + _translate("Form", "/path/to/directory") + ) + self.le_wavelength.setText(_translate("Form", "532")) + self.label_lcb.setText(_translate("Form", "LC-B")) + self.label_swing.setText(_translate("Form", "Swing")) + self.label_calib_mode.setText(_translate("Form", "Calibration Mode")) + self.label_cfg_group.setText(_translate("Form", "Config Group")) + self.label_scheme.setText(_translate("Form", "Illumination Scheme")) + self.qbutton_browse.setText(_translate("Form", "Browse")) + self.label_inten_plot.setText(_translate("Form", "Intensity Plot")) + self.tabWidget_2.setTabText( + self.tabWidget_2.indexOf(self.tab_plot), _translate("Form", "Plot") + ) + self.label_loglevel.setText(_translate("Form", "Log Level")) + self.cb_loglevel.setItemText(0, _translate("Form", "Basic")) + self.cb_loglevel.setItemText(1, _translate("Form", "Debug")) + self.label_log.setText(_translate("Form", "Log")) + self.tabWidget_2.setTabText( + self.tabWidget_2.indexOf(self.tab_advanced), + _translate("Form", "Advanced"), + ) + self.qbutton_push_note.setText( + _translate("Form", "Push note to last metadata file") + ) + self.label_note.setText(_translate("Form", "Note")) + self.tabWidget_2.setTabText( + self.tabWidget_2.indexOf(self.tab_notes), + _translate("Form", "Notes"), + ) + self.capture_background.setTitle( + _translate("Form", "Capture Background") + ) + self.label_bg_folder.setText( + _translate("Form", "Background Folder Name") + ) + self.qbutton_capture_bg.setText( + _translate("Form", "Capture Background") + ) + self.le_bg_folder.setText(_translate("Form", "bg")) + self.le_n_avg.setText(_translate("Form", "5")) + self.label_n_avg.setText( + _translate("Form", "Number of Images to Average") + ) + self.tabWidget.setTabText( + self.tabWidget.indexOf(self.Calibration), + _translate("Form", "LC Calibration"), + ) + self.phase.setTitle( + _translate("Form", "Phase Reconstruction Settings") + ) + self.label_phase_regularizer.setText(_translate("Form", "Regularizer")) + self.label_phase_rho.setText(_translate("Form", "Rho")) + self.le_phase_strength.setText(_translate("Form", ".0001")) + self.le_itr.setText(_translate("Form", "50")) + self.le_itr.setPlaceholderText(_translate("Form", "TV Only")) + self.cb_phase_denoiser.setItemText(0, _translate("Form", "Tikhonov")) + self.cb_phase_denoiser.setItemText(1, _translate("Form", "TV")) + self.le_rho.setText(_translate("Form", "0.001")) + self.le_rho.setPlaceholderText(_translate("Form", "TV Only")) + self.label_itr.setText(_translate("Form", "Iterations")) + self.label_phase_strength.setText(_translate("Form", "Strength")) + self.label_pad_z.setText(_translate("Form", "Z Padding")) + self.le_pad_z.setText(_translate("Form", "0")) + self.acq_settings.setTitle(_translate("Form", "Acquisition Settings")) + self.cb_acq_mode.setItemText(0, _translate("Form", "2D")) + self.cb_acq_mode.setItemText(1, _translate("Form", "3D")) + self.label_zstart.setText(_translate("Form", "Z Start (um)")) + self.label_acq_mode.setText(_translate("Form", "Acquisition Mode")) + self.label_zstep.setText(_translate("Form", "Z Step (um)")) + self.label_zend.setText(_translate("Form", "Z End (um)")) + self.labe_acq_channel.setText(_translate("Form", "BF Channel")) + self.ReconSettings.setTitle( + _translate("Form", "General Reconstruction Settings") + ) + self.label_mag.setText(_translate("Form", "Magnification")) + self.le_data_save_name.setPlaceholderText( + _translate("Form", "Optional") + ) + self.cb_bg_method.setItemText(0, _translate("Form", "None")) + self.cb_bg_method.setItemText(1, _translate("Form", "Global")) + self.cb_bg_method.setItemText(2, _translate("Form", "Local Fit")) + self.label_flip_orientation.setText( + _translate("Form", "Flip Orientation") + ) + self.label_data_save_name.setText(_translate("Form", "Save Name")) + self.label_gpu_id.setText(_translate("Form", "GPU ID")) + self.label_bg_method.setText(_translate("Form", "BG Correction")) + self.label_recon_wavelength.setText( + _translate("Form", "Wavelength (nm)") + ) + self.le_gpu_id.setText(_translate("Form", "0")) + self.le_gpu_id.setPlaceholderText(_translate("Form", "Optional")) + self.label_cond_na.setText(_translate("Form", "Condenser NA")) + self.qbutton_browse_bg_path.setText(_translate("Form", "Browse")) + self.chb_use_gpu.setText(_translate("Form", "Use GPU")) + self.label_bg_path.setText(_translate("Form", "Background Path")) + self.le_recon_wavelength.setPlaceholderText( + _translate("Form", "list: 530, 645 or single value: 532") + ) + self.le_n_media.setText(_translate("Form", "1.003")) + self.label_orientation_offset.setText( + _translate("Form", "Rotate Orientation (90 deg)") + ) + self.label_obj_na.setText(_translate("Form", "Objective NA")) + self.label_n_media.setText(_translate("Form", "RI of Obj. Media")) + self.label_save_dir.setText(_translate("Form", "Save Directory")) + self.label_ps.setText(_translate("Form", "Camera Pixel Size (um)")) + self.qbutton_browse_save_dir.setText(_translate("Form", "Browse")) + self.label_invert_phase_contrast.setText( + _translate("Form", "Invert Phase Contrast") + ) + self.acquire.setTitle(_translate("Form", "Acquire")) + self.qbutton_acq_ret_ori.setText( + _translate("Form", "Retardance + Orientation") + ) + self.qbutton_acq_phase_from_bf.setText( + _translate("Form", "Phase From BF") + ) + self.qbutton_acq_ret_ori_phase.setText( + _translate("Form", "Retardance + Orientation + Phase") + ) + self.qbutton_stop_acq.setText(_translate("Form", "STOP")) + self.tabWidget.setTabText( + self.tabWidget.indexOf(self.Acquisition), + _translate("Form", "Acquisition / Reconstruction"), + ) + self.DisplayOptions.setTitle(_translate("Form", "Display Options")) + self.label_saturation.setText(_translate("Form", "Saturation")) + self.le_overlay_slice.setPlaceholderText(_translate("Form", "Slice")) + self.le_sat_min.setText(_translate("Form", "20")) + self.cb_colormap.setItemText(0, _translate("Form", "HSV")) + self.cb_colormap.setItemText( + 1, _translate("Form", "JCh (Perceptually Uniform)") + ) + self.le_val_min.setText(_translate("Form", "20")) + self.le_sat_max.setText(_translate("Form", "80")) + self.label_value.setText(_translate("Form", "Value")) + self.chb_display_volume.setText(_translate("Form", "Use Full Volume")) + self.label_colormap.setText( + _translate("Form", "BirefringenceOverlay Colormap") + ) + self.le_val_max.setText(_translate("Form", "80")) + self.label_hue.setText(_translate("Form", "Hue")) + self.qbutton_create_overlay.setText( + _translate("Form", "Create Overlay") + ) + self.label.setText(_translate("Form", "Overlay Retardance Maximum ")) + self.label_orientation_legend.setText( + _translate("Form", "Retardance Orientation Overlay Legend") + ) + self.tabWidget.setTabText( + self.tabWidget.indexOf(self.Display), _translate("Form", "Display") + ) + + +from pyqtgraph import PlotWidget + +if __name__ == "__main__": + import sys + + app = QtWidgets.QApplication(sys.argv) + Form = QtWidgets.QWidget() + ui = Ui_Form() + ui.setupUi(Form) + Form.show() + sys.exit(app.exec_()) diff --git a/waveorder/plugin/gui.ui b/waveorder/plugin/gui.ui new file mode 100644 index 00000000..dde9cae4 --- /dev/null +++ b/waveorder/plugin/gui.ui @@ -0,0 +1,2235 @@ + + + Form + + + Qt::NonModal + + + + 0 + 0 + 630 + 1165 + + + + + 0 + 0 + + + + + 0 + 0 + + + + Form + + + Qt::LeftToRight + + + + + + + + + Qt::AlignCenter + + + + + + + + false + true + + + + MM Connection Status + + + + 25 + + + + + + 0 + 0 + + + + + false + false + + + + Disconnected + + + Qt::AlignCenter + + + true + + + + + + + + 0 + 0 + + + + + false + false + + + + Connect to MM + + + + + + + + + + + 0 + 0 + + + + + 0 + 0 + + + + + 0 + 0 + + + + + false + false + + + + QTabWidget::North + + + 2 + + + Qt::ElideMiddle + + + false + + + false + + + false + + + + + 0 + 0 + + + + + 0 + 0 + + + + LC Calibration + + + + QLayout::SetDefaultConstraint + + + 4 + + + 4 + + + 4 + + + 4 + + + + + + false + false + + + + true + + + + + 0 + 0 + 590 + 1032 + + + + + 0 + 0 + + + + + 4 + + + 4 + + + 4 + + + 4 + + + 20 + + + + + + false + true + + + + Run Calibration + + + + 12 + + + 6 + + + + + + false + false + + + + Calibration Assessment + + + + + + + + false + false + + + + Progress + + + + + + + + 0 + 0 + + + + + false + false + + + + Extinction + + + + + + + + 0 + 0 + + + + + false + false + + + + Run Calibration + + + false + + + + + + + + false + true + + + + 0 + + + + + + + + false + false + + + + Calculate Extinction + + + + + + + + 0 + 0 + + + + + 13 + false + true + + + + true + + + + + + + + false + false + + + + STOP + + + + + + + + false + false + + + + Load Calibration + + + + + + + + false + false + + + + + + + + + + + + false + true + + + + Qt::DefaultContextMenu + + + Calibration Parameters + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + false + + + + 12 + + + 6 + + + + + + false + false + + + + Wavelength (nm) + + + + + + + + false + false + + + + Directory + + + + + + + + false + false + + + + Qt::StrongFocus + + + + - + + + + + + + + + false + false + + + + Qt::StrongFocus + + + + 4-State (Ext, 0, 60, 120) + + + + + 5-State (Ext, 0, 45, 90, 135) + + + + + + + + + false + false + + + + Qt::StrongFocus + + + + - + + + + + + + + + false + false + + + + LC-A + + + Qt::AlignCenter + + + + + + + + false + false + + + + 0.1 + + + + + + + + false + false + + + + + + + /path/to/directory + + + + + + + + false + false + + + + 532 + + + + + + + + false + false + + + + LC-B + + + Qt::AlignCenter + + + + + + + + false + false + + + + Swing + + + + + + + + false + false + + + + Calibration Mode + + + + + + + + false + false + + + + Config Group + + + + + + + + false + false + + + + Illumination Scheme + + + + + + + + false + false + + + + Qt::StrongFocus + + + + + + + + false + false + true + + + + Qt::StrongFocus + + + + + + + + false + false + + + + false + + + Browse + + + false + + + false + + + + + + + + + + + 0 + 0 + + + + + false + false + + + + Qt::ClickFocus + + + 2 + + + + Plot + + + + + + + false + false + + + + true + + + + + 0 + 0 + 550 + 302 + + + + + + + + false + false + + + + Intensity Plot + + + + + + + + 0 + 250 + + + + + false + false + + + + + + + + + + + + + Advanced + + + + + + + 0 + 0 + + + + true + + + + + 0 + 0 + 550 + 302 + + + + + + + Log Level + + + + + + + Qt::StrongFocus + + + 0 + + + 2 + + + + Basic + + + + + Debug + + + + + + + + Log + + + + + + + + 0 + 150 + + + + 1 + + + true + + + true + + + + + + + + + + + + Notes + + + + + + + + + Push note to last metadata file + + + + + + + Note + + + + + + + + + + + + false + true + + + + Capture Background + + + + 12 + + + 6 + + + + + + false + false + + + + Background Folder Name + + + + + + + + false + false + + + + Capture Background + + + + + + + + false + false + + + + bg + + + + + + + + false + false + + + + 5 + + + + + + + + false + false + + + + Number of Images to Average + + + + + + + + + + + + + + + + 0 + 0 + + + + + 0 + 0 + + + + + 16777215 + 16777215 + + + + Acquisition / Reconstruction + + + + QLayout::SetDefaultConstraint + + + 4 + + + 4 + + + 4 + + + 4 + + + 0 + + + + + + 0 + 0 + + + + true + + + + + 0 + 0 + 614 + 990 + + + + + 0 + 0 + + + + + 20 + + + + + + 0 + 0 + + + + + false + true + + + + Phase Reconstruction Settings + + + + 12 + + + 6 + + + + + Regularizer + + + + + + + Rho + + + + + + + .0001 + + + + + + + 50 + + + TV Only + + + + + + + + Tikhonov + + + + + TV + + + + + + + + 0.001 + + + TV Only + + + + + + + Iterations + + + + + + + Strength + + + + + + + + 0 + 0 + + + + Z Padding + + + + + + + + 0 + 0 + + + + 0 + + + + + + + + + + + 0 + 0 + + + + + false + true + + + + Qt::TabFocus + + + Acquisition Settings + + + false + + + + 12 + + + 6 + + + + + + false + false + + + + Qt::StrongFocus + + + + 2D + + + + + 3D + + + + + + + + + false + false + + + + Z Start (um) + + + + + + + + false + false + + + + Acquisition Mode + + + + + + + + + + + + + + false + false + + + + Z Step (um) + + + + + + + + false + false + + + + Z End (um) + + + + + + + + + + + + + + BF Channel + + + + + + + + + + + + + + 0 + 0 + + + + + false + true + + + + General Reconstruction Settings + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + + 12 + + + 6 + + + + + + 0 + 0 + + + + Magnification + + + + + + + + 0 + 0 + + + + + + + Optional + + + + + + + Qt::StrongFocus + + + + None + + + + + Global + + + + + Local Fit + + + + + + + + Flip Orientation + + + + + + + + 0 + 0 + + + + + + + + + 0 + 0 + + + + + + + + Save Name + + + + + + + GPU ID + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + + + + + BG Correction + + + + + + + + 0 + 0 + + + + Wavelength (nm) + + + + + + + + + + + + + + + 0 + 0 + + + + 0 + + + Optional + + + + + + + + 0 + 0 + + + + Condenser NA + + + + + + + + + + + + + + Browse + + + + + + + Use GPU + + + + + + + Background Path + + + + + + + + 0 + 0 + + + + list: 530, 645 or single value: 532 + + + + + + + + 0 + 0 + + + + 1.003 + + + + + + + Rotate Orientation (90 deg) + + + + + + + + 0 + 0 + + + + + + + + + 0 + 0 + + + + Objective NA + + + + + + + + 0 + 0 + + + + + + + + + + + + 0 + 0 + + + + RI of Obj. Media + + + + + + + + false + false + + + + Save Directory + + + + + + + + 0 + 0 + + + + Camera Pixel Size (um) + + + + + + + + false + false + + + + Browse + + + + + + + + + + + + + + Invert Phase Contrast + + + + + + + + + + + + + + + + + + + + + + + + + 0 + 0 + + + + + false + true + + + + Acquire + + + + 12 + + + 6 + + + + + + false + false + + + + Retardance + Orientation + + + + + + + + false + false + + + + Phase From BF + + + + + + + + false + false + + + + Retardance + Orientation + Phase + + + + + + + + false + false + + + + STOP + + + + + + + + + + + + + + + Display + + + + + + + 0 + 0 + + + + true + + + + + 0 + 0 + 574 + 974 + + + + + + + + 0 + 0 + + + + + + + Qt::AlignCenter + + + + + + + + 0 + 0 + + + + + false + true + + + + Display Options + + + + 12 + + + 6 + + + + + + false + false + + + + Qt::StrongFocus + + + + + + + + false + false + + + + Qt::Horizontal + + + + + + + + false + false + + + + 3 + + + Qt::Horizontal + + + + + + + + false + false + + + + Saturation + + + + + + + + 0 + 0 + + + + + false + false + + + + Slice + + + + + + + + false + false + + + + + + + 20 + + + false + + + + + + + + false + false + + + + Qt::StrongFocus + + + + HSV + + + + + JCh (Perceptually Uniform) + + + + + + + + + false + false + + + + Qt::StrongFocus + + + + + + + + false + false + + + + 20 + + + false + + + + + + + + false + false + + + + Qt::StrongFocus + + + + + + + + false + false + + + + 80 + + + false + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + 0 + 0 + + + + + false + false + + + + Qt::Horizontal + + + + + + + + false + false + + + + Value + + + + + + + + false + false + + + + Use Full Volume + + + + + + + + false + false + + + + BirefringenceOverlay Colormap + + + + + + + + false + false + + + + 80 + + + false + + + Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter + + + + + + + + false + false + + + + Hue + + + + + + + + false + false + + + + Create Overlay + + + + + + + + + + + 0 + 0 + + + + Overlay Retardance Maximum + + + + + + + 50 + + + 25 + + + Qt::Horizontal + + + + + + + + 0 + 0 + + + + Retardance Orientation Overlay Legend + + + Qt::AlignCenter + + + + + + + Qt::Vertical + + + + 20 + 40 + + + + + + + + + + + + + + + + + PlotWidget + QWidget +
pyqtgraph
+ 1 +
+
+ + qbutton_connect_to_mm + le_mm_status + scrollArea + le_directory + qbutton_browse + le_swing + le_wavelength + cb_calib_scheme + cb_calib_mode + cb_lca + cb_lcb + cb_config_group + qbutton_calibrate + qbutton_load_calib + qbutton_calc_extinction + qbutton_stop_calib + le_extinction + tb_calib_assessment + le_bg_folder + le_n_avg + qbutton_capture_bg + scrollArea_3 + scrollArea_2 + cb_loglevel + te_log + le_notes_field + qbutton_push_note + scrollArea_4 + qbutton_acq_ret_ori + qbutton_stop_acq + acq_settings + le_zstart + le_zend + le_zstep + cb_acq_mode + cb_acq_channel + le_save_dir + qbutton_browse_save_dir + le_data_save_name + cb_bg_method + le_bg_path + qbutton_browse_bg_path + le_gpu_id + chb_use_gpu + le_recon_wavelength + le_obj_na + le_cond_na + le_ps + le_n_media + le_mag + le_pad_z + cb_phase_denoiser + le_phase_strength + le_rho + le_itr + cb_colormap + cb_hue + le_sat_min + le_sat_max + cb_saturation + slider_saturation + le_val_min + le_val_max + cb_value + slider_value + le_overlay_slice + chb_display_volume + qbutton_create_overlay + tabWidget + scrollArea_5 + + + +
diff --git a/waveorder/plugin/main_widget.py b/waveorder/plugin/main_widget.py new file mode 100644 index 00000000..c8a663cb --- /dev/null +++ b/waveorder/plugin/main_widget.py @@ -0,0 +1,2173 @@ +from __future__ import annotations + +import json +import logging +import os +import textwrap +from os.path import dirname +from pathlib import Path, PurePath + +# type hint/check +from typing import TYPE_CHECKING + +import dask.array as da +import numpy as np +from numpy.typing import NDArray +from numpydoc.docscrape import NumpyDocString +from packaging import version +from qtpy.QtCore import Qt, Signal, Slot +from qtpy.QtGui import QColor, QPixmap +from qtpy.QtWidgets import QFileDialog, QSizePolicy, QSlider, QWidget +from superqt import QDoubleRangeSlider, QRangeSlider + +from waveorder.waveorder_reconstructor import waveorder_microscopy + +try: + from pycromanager import Core, Studio, zmq_bridge +except: + pass + +try: + from napari import Viewer + from napari.components import LayerList + from napari.utils.events import Event + from napari.utils.notifications import show_warning +except: + pass + +try: + from waveorder.acq.acquisition_workers import ( + BFAcquisitionWorker, + PolarizationAcquisitionWorker, + ) +except: + pass +from waveorder.calib import Calibration +from waveorder.calib.Calibration import LC_DEVICE_NAME, QLIPP_Calibration +from waveorder.calib.calibration_workers import ( + BackgroundCaptureWorker, + CalibrationWorker, + load_calibration, +) +from waveorder.io.core_functions import set_lc_state, snap_and_average +from waveorder.io.metadata_reader import MetadataReader +from waveorder.io.visualization import ret_ori_overlay +from waveorder.plugin import gui + +# avoid runtime import error +if TYPE_CHECKING: + pass + + +class MainWidget(QWidget): + """ + This is the main waveorder widget that houses all of the GUI components of waveorder. + The GUI is designed in QT Designer in /waveorder/plugin/gui.ui and converted to a python file + with the pyuic5 command. + """ + + # Initialize Custom Signals + log_changed = Signal(str) + + # Initialize class attributes + disabled_button_style = "border: 1px solid rgb(65,72,81);" + bf_keywords = [ + "bf", + "brightfield", + "bright", + "labelfree", + "label-free", + "lf", + "label", + "phase", + "ph", + ] + no_bf_msg = "\n".join( + textwrap.wrap( + f"No brightfield channel found. If you would like to acquire phase from brightfield," + " please restart waveorder after adding a new channel to Micro-Manager with one of the" + " following case-insensitive keywords: " + ", ".join(bf_keywords), + width=70, + ) + ) + + def __init__(self, napari_viewer: Viewer): + super().__init__() + self.viewer = napari_viewer + + # Setup GUI elements + self.ui = gui.Ui_Form() + self.ui.setupUi(self) + self.ui.tab_reconstruction.set_viewer(napari_viewer) + + # Override initial tab focus + self.ui.tabWidget.setCurrentIndex(0) + + # Set attributes need for enabling/disabling buttons + self.bf_channel_found = False + + # Disable buttons until connected to MM + self._set_buttons_enabled(False) + + # Set up overlay sliders (Commenting for 0.3.0. Consider debugging or deleting for 1.0.0.) + # self._promote_slider_init() + + ## Connect GUI elements to functions + # Top bar + self.ui.qbutton_connect_to_mm.clicked[bool].connect( + self.toggle_mm_connection + ) + + # Calibration tab + self.ui.qbutton_browse.clicked[bool].connect(self.browse_dir_path) + self.ui.le_directory.editingFinished.connect(self.enter_dir_path) + self.ui.le_directory.setText(str(Path.cwd())) + + self.ui.le_swing.editingFinished.connect(self.enter_swing) + self.ui.le_swing.setText("0.1") + self.enter_swing() + + self.ui.le_wavelength.editingFinished.connect(self.enter_wavelength) + self.ui.le_wavelength.setText("532") + self.enter_wavelength() + + self.ui.cb_calib_scheme.currentIndexChanged[int].connect( + self.enter_calib_scheme + ) + self.ui.cb_calib_mode.currentIndexChanged[int].connect( + self.enter_calib_mode + ) + self.ui.cb_lca.currentIndexChanged[int].connect(self.enter_dac_lca) + self.ui.cb_lcb.currentIndexChanged[int].connect(self.enter_dac_lcb) + self.ui.qbutton_calibrate.clicked[bool].connect(self.run_calibration) + self.ui.qbutton_load_calib.clicked[bool].connect(self.load_calibration) + self.ui.qbutton_calc_extinction.clicked[bool].connect( + self.calc_extinction + ) + self.ui.cb_config_group.currentIndexChanged[int].connect( + self.enter_config_group + ) + + self.ui.le_bg_folder.editingFinished.connect(self.enter_bg_folder_name) + self.ui.le_n_avg.editingFinished.connect(self.enter_n_avg) + self.ui.qbutton_capture_bg.clicked[bool].connect(self.capture_bg) + + # Advanced tab + self.ui.cb_loglevel.currentIndexChanged[int].connect( + self.enter_log_level + ) + self.ui.qbutton_push_note.clicked[bool].connect(self.push_note) + + # Acquisition tab + self.ui.qbutton_browse_save_dir.clicked[bool].connect( + self.browse_save_path + ) + self.ui.le_save_dir.editingFinished.connect(self.enter_save_path) + self.ui.le_save_dir.setText(str(Path.cwd())) + self.ui.le_data_save_name.editingFinished.connect(self.enter_save_name) + + self.ui.le_zstart.editingFinished.connect(self.enter_zstart) + self.ui.le_zstart.setText("-10") + self.enter_zstart() + + self.ui.le_zend.editingFinished.connect(self.enter_zend) + self.ui.le_zend.setText("10") + self.enter_zend() + + self.ui.le_zstep.editingFinished.connect(self.enter_zstep) + self.ui.le_zstep.setText("1") + self.enter_zstep() + + self.ui.chb_use_gpu.stateChanged[int].connect(self.enter_use_gpu) + self.ui.le_gpu_id.editingFinished.connect(self.enter_gpu_id) + + self.ui.cb_rotate_orientation.stateChanged[int].connect( + self.enter_rotate_orientation + ) + self.ui.cb_flip_orientation.stateChanged[int].connect( + self.enter_flip_orientation + ) + self.ui.cb_invert_phase_contrast.stateChanged[int].connect( + self.enter_invert_phase_contrast + ) + + # This parameter seems to be wired differently than others...investigate later + self.ui.le_recon_wavelength.editingFinished.connect( + self.enter_recon_wavelength + ) + self.ui.le_recon_wavelength.setText("532") + self.enter_recon_wavelength() + + self.ui.le_obj_na.editingFinished.connect(self.enter_obj_na) + self.ui.le_obj_na.setText("1.3") + self.enter_obj_na() + + self.ui.le_cond_na.editingFinished.connect(self.enter_cond_na) + self.ui.le_cond_na.setText("0.5") + self.enter_cond_na() + + self.ui.le_mag.editingFinished.connect(self.enter_mag) + self.ui.le_mag.setText("60") + self.enter_mag() + + self.ui.le_ps.editingFinished.connect(self.enter_ps) + self.ui.le_ps.setText("6.9") + self.enter_ps() + + self.ui.le_n_media.editingFinished.connect(self.enter_n_media) + self.ui.le_n_media.setText("1.3") + self.enter_n_media() + + self.ui.le_pad_z.editingFinished.connect(self.enter_pad_z) + self.ui.cb_acq_mode.currentIndexChanged[int].connect( + self.enter_acq_mode + ) + + self.ui.cb_bg_method.currentIndexChanged[int].connect( + self.enter_bg_correction + ) + + self.ui.le_bg_path.editingFinished.connect(self.enter_acq_bg_path) + self.ui.qbutton_browse_bg_path.clicked[bool].connect( + self.browse_acq_bg_path + ) + self.ui.qbutton_acq_ret_ori.clicked[bool].connect(self.acq_ret_ori) + self.ui.qbutton_acq_phase_from_bf.clicked[bool].connect( + self.acq_phase_from_bf + ) + + self.ui.qbutton_acq_ret_ori_phase.clicked[bool].connect( + self.acq_ret_ori_phase + ) + + # hook to render overlay + # acquistion updates existing layers and moves them to the top which triggers this event + self.viewer.layers.events.moved.connect(self.handle_layers_updated) + self.viewer.layers.events.inserted.connect(self.handle_layers_updated) + + # Birefringence overlay controls + self.ui.retMaxSlider.sliderMoved[int].connect( + self.handle_ret_max_slider_move + ) + + # Reconstruction tab + self.ui.cb_phase_denoiser.currentIndexChanged[int].connect( + self.enter_phase_denoiser + ) + self.enter_phase_denoiser() + + ## Initialize logging + log_box = QtLogger(self.ui.te_log) + log_box.setFormatter(logging.Formatter("%(levelname)s - %(message)s")) + logging.getLogger().addHandler(log_box) + logging.getLogger().setLevel(logging.INFO) + + ## Initialize attributes + self.connected_to_mm = False + self.bridge = None + self.mm = None + self.mmc = None + self.calib = None + self.current_dir_path = str(Path.cwd()) + self.current_save_path = str(Path.cwd()) + self.current_bg_path = str(Path.cwd()) + self.directory = str(Path.cwd()) + self.calib_scheme = "4-State" + self.calib_mode = "MM-Retardance" + self.interp_method = "schnoor_fit" + self.config_group = "Channel" + self.calib_channels = [ + "State0", + "State1", + "State2", + "State3", + "State4", + ] + self.last_calib_meta_file = None + self.use_cropped_roi = False + self.bg_folder_name = "bg" + self.n_avg = 5 + self.intensity_monitor = [] + self.save_directory = str(Path.cwd()) + self.save_name = None + self.bg_option = "None" + self.acq_mode = "2D" + self.gpu_id = 0 + self.use_gpu = False + self.rotate_orientation = False + self.flip_orientation = False + self.invert_phase_contrast = False + self.pad_z = 0 + self.phase_reconstructor = None + self.acq_bg_directory = "" + self.auto_shutter = True + self.lca_dac = None + self.lcb_dac = None + self.pause_updates = False + self.method = "QLIPP" + self.mode = "3D" + self.calib_path = str(Path.cwd()) + self.data_dir = str(Path.cwd()) + self.config_path = str(Path.cwd()) + self.save_config_path = str(Path.cwd()) + self.colormap = "HSV" + self.use_full_volume = False + self.display_slice = 0 + self.last_p = 0 + self.reconstruction_data_path = None + self.reconstruction_data = None + self.calib_assessment_level = None + self.ret_max = 25 + waveorder_dir = dirname(dirname(dirname(os.path.abspath(__file__)))) + self.worker = None + + ## Initialize calibration plot + self.plot_item = self.ui.plot_widget.getPlotItem() + self.plot_item.enableAutoRange() + self.plot_item.setLabel("left", "Intensity") + self.ui.plot_widget.setBackground((32, 34, 40)) + self.plot_sequence = "Coarse" + + ## Initialize visuals + # Initialiaze GUI Images (plotting legends, waveorder logo) + jch_legend_path = os.path.join( + waveorder_dir, "docs/images/JCh_legend.png" + ) + hsv_legend_path = os.path.join( + waveorder_dir, "docs/images/HSV_legend.png" + ) + self.jch_pixmap = QPixmap(jch_legend_path) + self.hsv_pixmap = QPixmap(hsv_legend_path) + self.ui.label_orientation_image.setPixmap(self.hsv_pixmap) + logo_path = os.path.join( + waveorder_dir, "docs/images/waveorder_plugin_logo.png" + ) + logo_pixmap = QPixmap(logo_path) + self.ui.label_logo.setPixmap(logo_pixmap) + + # Hide UI elements for popups + # DAC mode popups + self.ui.label_lca.hide() + self.ui.label_lcb.hide() + self.ui.cb_lca.hide() + self.ui.cb_lcb.hide() + + # Background correction popups + self.ui.label_bg_path.setHidden(True) + self.ui.le_bg_path.setHidden(True) + self.ui.qbutton_browse_bg_path.setHidden(True) + + # Reconstruction parameter popups + self.ui.le_rho.setHidden(True) + self.ui.label_phase_rho.setHidden(True) + self.ui.le_itr.setHidden(True) + self.ui.label_itr.setHidden(True) + + # Hide temporarily unsupported "Overlay" functions + self.ui.tabWidget.setTabText( + self.ui.tabWidget.indexOf(self.ui.Display), "Visualization" + ) + self.ui.label_orientation_legend.setHidden(True) + self.ui.DisplayOptions.setHidden(True) + + # Set initial UI Properties + self.ui.label_extinction.setText("Extinction Ratio") + self.ui.le_mm_status.setStyleSheet( + "border: 1px solid rgb(200,0,0); color: rgb(200,0,0);" + ) + self.ui.te_log.setStyleSheet("background-color: rgb(32,34,40);") + self.ui.le_sat_min.setStyleSheet("background-color: rgba(0, 0, 0, 0);") + self.ui.le_sat_max.setStyleSheet("background-color: rgba(0, 0, 0, 0);") + self.ui.le_val_min.setStyleSheet("background-color: rgba(0, 0, 0, 0);") + self.ui.le_val_max.setStyleSheet("background-color: rgba(0, 0, 0, 0);") + self.setStyleSheet("QTabWidget::tab-bar {alignment: center;}") + self.red_text = QColor(200, 0, 0, 255) + + # Populate background correction GUI element + for i in range(3): + self.ui.cb_bg_method.removeItem(0) + bg_options = ["None", "Measured", "Estimated", "Measured + Estimated"] + tooltips = [ + "No background correction.", + 'Correct sample images with a background image acquired at an empty field of view, loaded from "Background Path".', + "Estimate sample background by fitting a 2D surface to the sample images. Works well when structures are spatially distributed across the field of view and a clear background is unavailable.", + 'Apply "Measured" background correction and then "Estimated" background correction. Use to remove residual background after the sample retardance is corrected with measured background.', + ] + for i, bg_option in enumerate(bg_options): + wrapped_tooltip = "\n".join(textwrap.wrap(tooltips[i], width=70)) + self.ui.cb_bg_method.addItem(bg_option) + self.ui.cb_bg_method.setItemData( + i, wrapped_tooltip, Qt.ToolTipRole + ) + + # Populate calibration modes from docstring + cal_docs = NumpyDocString( + Calibration.QLIPP_Calibration.__init__.__doc__ + ) + mode_docs = " ".join(cal_docs["Parameters"][3].desc).split("* ")[1:] + for i, mode_doc in enumerate(mode_docs): + mode_name, mode_tooltip = mode_doc.split(": ") + wrapped_tooltip = "\n".join(textwrap.wrap(mode_tooltip, width=70)) + self.ui.cb_calib_mode.addItem(mode_name) + self.ui.cb_calib_mode.setItemData( + i, wrapped_tooltip, Qt.ToolTipRole + ) + + # Populate acquisition mode tooltips + acq_tooltips = [ + "Acquires data to estimate parameters in a 2D plane. For birefringence acquisitions, this mode will acquire 2D data. For phase acquisitions, this mode will acquire 3D data.", + "Acquires 3D data to estimate parameters in a 3D volume.", + ] + for i, tooltip in enumerate(acq_tooltips): + wrapped_tooltip = "\n".join(textwrap.wrap(tooltip, width=70)) + self.ui.cb_acq_mode.setItemData(i, wrapped_tooltip, Qt.ToolTipRole) + + # make sure the top says waveorder and not 'Form' + self.ui.tabWidget.parent().setObjectName("waveorder") + + ## Set GUI behaviors + # set focus to "Plot" tab by default + self.ui.tabWidget_2.setCurrentIndex(0) + + # disable wheel events for combo boxes + for attr_name in dir(self.ui): + if "cb_" in attr_name: + attr = getattr(self.ui, attr_name) + attr.wheelEvent = lambda event: None + + # Display GUI using maximum resolution + self.showMaximized() + + def _demote_slider_offline(self, ui_slider, range_): + """ + This function converts a promoted superqt.QRangeSlider to a QSlider element + + Parameters + ---------- + ui_slider (superqt.QRangeSlider) QSlider UI element to demote + range_ (tuple) initial range to set for the slider + + Returns + ------- + + """ + sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + + # Get positioning information from regular sliders + slider_idx = self.ui.gridLayout_26.indexOf(ui_slider) + slider_position = self.ui.gridLayout_26.getItemPosition(slider_idx) + slider_parent = ui_slider.parent().objectName() + slider_name = ui_slider.objectName() + + # Remove regular sliders from the UI + self.ui.gridLayout_26.removeWidget(ui_slider) + + # Add back the sliders as range sliders with the same properties + ui_slider = QSlider(getattr(self.ui, slider_parent)) + sizePolicy.setHeightForWidth( + ui_slider.sizePolicy().hasHeightForWidth() + ) + ui_slider.setSizePolicy(sizePolicy) + ui_slider.setOrientation(Qt.Horizontal) + ui_slider.setObjectName(slider_name) + self.ui.gridLayout_26.addWidget( + ui_slider, + slider_position[0], + slider_position[1], + slider_position[2], + slider_position[3], + ) + ui_slider.setRange(range_[0], range_[1]) + + def _promote_slider_offline(self, ui_slider, range_): + """ + This function converts a a QSlider element to a promoted superqt.QRangeSlider + + Parameters + ---------- + ui_slider (QT.Slider) QSlider UI element to demote + range_ (tuple) initial range to set for the slider + + Returns + ------- + + """ + + sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + + # Get Information from regular sliders + slider_idx = self.ui.gridLayout_26.indexOf(ui_slider) + slider_position = self.ui.gridLayout_26.getItemPosition(slider_idx) + slider_parent = ui_slider.parent().objectName() + slider_name = ui_slider.objectName() + + # Remove regular sliders from the UI + self.ui.gridLayout_26.removeWidget(ui_slider) + + # Add back the sliders as range sliders with the same properties + ui_slider = QRangeSlider(getattr(self.ui, slider_parent)) + sizePolicy.setHeightForWidth( + ui_slider.sizePolicy().hasHeightForWidth() + ) + ui_slider.setSizePolicy(sizePolicy) + ui_slider.setOrientation(Qt.Horizontal) + ui_slider.setObjectName(slider_name) + self.ui.gridLayout_26.addWidget( + ui_slider, + slider_position[0], + slider_position[1], + slider_position[2], + slider_position[3], + ) + ui_slider.setRange(range_[0], range_[1]) + + def _promote_slider_init(self): + """ + Used to promote the Display Tab sliders from QSlider to QDoubeRangeSlider with superqt + Returns + ------- + + """ + + sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) + sizePolicy.setHorizontalStretch(0) + sizePolicy.setVerticalStretch(0) + + # Get Information from regular sliders + value_slider_idx = self.ui.gridLayout_17.indexOf(self.ui.slider_value) + value_slider_position = self.ui.gridLayout_17.getItemPosition( + value_slider_idx + ) + value_slider_parent = self.ui.slider_value.parent().objectName() + saturation_slider_idx = self.ui.gridLayout_17.indexOf( + self.ui.slider_saturation + ) + saturation_slider_position = self.ui.gridLayout_17.getItemPosition( + saturation_slider_idx + ) + saturation_slider_parent = ( + self.ui.slider_saturation.parent().objectName() + ) + + # Remove regular sliders from the UI + self.ui.gridLayout_17.removeWidget(self.ui.slider_value) + self.ui.gridLayout_17.removeWidget(self.ui.slider_saturation) + + # Add back the sliders as range sliders with the same properties + self.ui.slider_saturation = QDoubleRangeSlider( + getattr(self.ui, saturation_slider_parent) + ) + sizePolicy.setHeightForWidth( + self.ui.slider_saturation.sizePolicy().hasHeightForWidth() + ) + self.ui.slider_saturation.setSizePolicy(sizePolicy) + self.ui.slider_saturation.setOrientation(Qt.Horizontal) + self.ui.slider_saturation.setObjectName("slider_saturation") + self.ui.gridLayout_17.addWidget( + self.ui.slider_saturation, + saturation_slider_position[0], + saturation_slider_position[1], + saturation_slider_position[2], + saturation_slider_position[3], + ) + self.ui.slider_saturation.setRange(0, 100) + + self.ui.slider_value = QDoubleRangeSlider( + getattr(self.ui, value_slider_parent) + ) + sizePolicy.setHeightForWidth( + self.ui.slider_value.sizePolicy().hasHeightForWidth() + ) + self.ui.slider_value.setSizePolicy(sizePolicy) + self.ui.slider_value.setOrientation(Qt.Horizontal) + self.ui.slider_value.setObjectName("slider_value") + self.ui.gridLayout_17.addWidget( + self.ui.slider_value, + value_slider_position[0], + value_slider_position[1], + value_slider_position[2], + value_slider_position[3], + ) + self.ui.slider_value.setRange(0, 100) + + def _set_buttons_enabled(self, val): + """ + enables/disables buttons that require a connection to MM + """ + action_buttons = [ + self.ui.qbutton_calibrate, + self.ui.qbutton_capture_bg, + self.ui.qbutton_calc_extinction, + self.ui.qbutton_acq_ret_ori, + self.ui.qbutton_acq_phase_from_bf, + self.ui.qbutton_acq_ret_ori_phase, + self.ui.qbutton_load_calib, + self.ui.qbutton_create_overlay, + ] + + for action_button in action_buttons: + action_button.setEnabled(val) + if val: + action_button.setToolTip("") + action_button.setStyleSheet(self.disabled_button_style) + else: + action_button.setToolTip( + "Action temporarily disabled. Connect to MM or wait for acquisition to finish." + ) + action_button.setStyleSheet(self.disabled_button_style) + + if not self.bf_channel_found: + self.ui.qbutton_acq_phase_from_bf.setEnabled(False) + self.ui.qbutton_acq_phase_from_bf.setStyleSheet( + self.disabled_button_style + ) + self.ui.qbutton_acq_phase_from_bf.setToolTip(self.no_bf_msg) + + def _enable_buttons(self): + self._set_buttons_enabled(True) + + def _disable_buttons(self): + self._set_buttons_enabled(False) + + def _handle_error(self, exc): + """ + Handles errors from calibration and restores Micro-Manager to its state prior to the start of calibration + Parameters + ---------- + exc: (Error) Propogated error message to display + + Returns + ------- + + """ + + self.ui.tb_calib_assessment.setText(f"Error: {str(exc)}") + self.ui.tb_calib_assessment.setStyleSheet( + "border: 1px solid rgb(200,0,0);" + ) + + # Reset ROI if it was cropped down during reconstruction + if self.use_cropped_roi: + self.mmc.clearROI() + + # Reset the autoshutter setting if errored during blacklevel calculation + self.mmc.setAutoShutter(self.auto_shutter) + + # Reset the progress bar to 0 + self.ui.progress_bar.setValue(0) + + # Raise the error + raise exc + + def _handle_calib_abort(self): + if self.use_cropped_roi: + self.mmc.clearROI() + self.mmc.setAutoShutter(self.auto_shutter) + self.ui.progress_bar.setValue(0) + + def _handle_acq_error(self, exc): + raise exc + + def _handle_load_finished(self): + """ + Updates the calibration assessment when the user loads a previous calibration metadata file. + + Returns + ------- + + """ + self.ui.tb_calib_assessment.setText( + "Previous calibration successfully loaded" + ) + self.ui.tb_calib_assessment.setStyleSheet("border: 1px solid green;") + self.ui.progress_bar.setValue(100) + + def _update_calib(self, val): + self.calib = val + + def _check_line_edit(self, name): + """ + Convenience function used in checking whether a line edit is present or missing. Will place a red border + around the line edit if it is empty, otherwise it will remove the red border. + + Parameters + ---------- + name: (str) name of the LineEdit element as specified in QT Designer file. + + Returns + ------- + + """ + le = getattr(self.ui, f"le_{name}") + text = le.text() + + if text == "": + le.setStyleSheet("border: 1px solid rgb(200,0,0);") + return False + else: + le.setStyleSheet("") + return True + + def _check_requirements_for_acq(self, mode): + """ + This function will loop through the parameters from a specific acquisition and make sure the user has + specified the necessary parameters. If it finds an empty or missing parameters, it will set missing fields red + and stop the acquisition process. + + Parameters + ---------- + mode: (str) 'birefringence' or 'phase' which denotes the type of acquisition + + Returns + ------- + + """ + # check if a QLIPP_Calibration object has been initialized + if mode != "phase" and not self.calib: + raise RuntimeError("Please run or load calibration first.") + + # initialize the variable to keep track of the success of the requirement check + raise_error = False + + # define the fields required for the specific acquisition modes. Matches LineEdit object names + phase_required = { + "recon_wavelength", + "wavelength", + "mag", + "cond_na", + "obj_na", + "n_media", + "phase_strength", + "ps", + "zstep", + } + + # Initalize all fields in their default style (not red). + for field in phase_required: + le = getattr(self.ui, f"le_{field}") + le.setStyleSheet("") + + # Check generally required fields + if mode == "birefringence" or mode == "phase": + success = self._check_line_edit("save_dir") + if not success: + raise_error = True + + # check background path if 'Measured' or 'Measured + Estimated' is selected + if ( + self.bg_option == "Measured" + or self.bg_option == "Measured + Estimated" + ): + success = self._check_line_edit("bg_path") + if not success: + raise_error = True + + # Check phase specific fields + if mode == "phase": + for field in phase_required: + cont = self._check_line_edit(field) + if not cont: + raise_error = True + else: + continue + + # Alert the user to check and enter in the missing parameters + if raise_error: + raise ValueError( + "Please enter in all of the parameters necessary for the acquisition" + ) + + @Slot(bool) + def toggle_mm_connection(self): + """ + Toggles MM connection and updates the corresponding GUI elements. + """ + if self.connected_to_mm: + self.ui.qbutton_connect_to_mm.setText("Connect to MM") + self.ui.le_mm_status.setText("Disconnected") + self.ui.le_mm_status.setStyleSheet( + "border: 1px solid rgb(200,0,0); color: rgb(200,0,0);" + ) + self.connected_to_mm = False + self._set_buttons_enabled(False) + self.ui.cb_config_group.clear() + + else: + try: + self.connect_to_mm() + self.ui.qbutton_connect_to_mm.setText("Disconnect from MM") + self.ui.le_mm_status.setText("Connected") + self.ui.le_mm_status.setStyleSheet( + "border: 1px solid green; color: green;" + ) + self.connected_to_mm = True + self._set_buttons_enabled(True) + except: + self.ui.le_mm_status.setText("Failed") + self.ui.le_mm_status.setStyleSheet( + "border: 1px solid yellow; color: yellow;" + ) + + @Slot(bool) + def connect_to_mm(self): + """ + Establishes the python/java bridge to Micro-Manager. Micro-Manager must be open with a config loaded + in order for the connection to be successful. On connection, it will populate all of the available config + groups. Config group choice is used to establish which config group the Polarization states live in. + + Returns + ------- + + """ + RECOMMENDED_MM = "20230426" + ZMQ_TARGET_VERSION = "4.2.0" + try: + self.mmc = Core(convert_camel_case=False) + # Check it works + self.mmc.getAvailableConfigGroups() + self.mm = Studio(convert_camel_case=False) + # Order is important: If the bridge is created before Core, Core will not work + self.bridge = zmq_bridge._bridge._Bridge() + logging.debug("Established ZMQ Bridge and found Core and Studio") + except NameError: + print("Is pycromanager package installed?") + except Exception as ex: + print( + "Could not establish pycromanager bridge.\n" + "Is Micro-Manager open?\n" + "Is Tools > Options > Run server on port 4827 checked?\n" + f"Are you using nightly build {RECOMMENDED_MM}?\n" + ) + template = "An exception of type {0} occurred. Arguments:\n{1!r}" + message = template.format(type(ex).__name__, ", ".join(ex.args)) + print(message) + raise EnvironmentError( + "Could not establish pycromanager bridge.\n" + "Is Micro-Manager open?\n" + "Is Tools > Options > Run server on port 4827 checked?\n" + f"Are you using nightly build {RECOMMENDED_MM}?" + ) + + # Warn the user if there is a Micro-Manager/ZMQ version mismatch + # NS: Not quite sure what this is good for, we already know the Core works + # This code uses undocumented PycroManager features, so may well break in the future + self.bridge._main_socket.send({"command": "connect", "debug": False}) + reply_json = self.bridge._main_socket.receive(timeout=500) + zmq_mm_version = reply_json["version"] + if zmq_mm_version != ZMQ_TARGET_VERSION: + upgrade_str = ( + "upgrade" + if version.parse(zmq_mm_version) + < version.parse(ZMQ_TARGET_VERSION) + else "downgrade" + ) + logging.warning( + ( + "This version of Micro-Manager has not been tested with waveorder.\n" + f"Please {upgrade_str} to Micro-Manager nightly build {RECOMMENDED_MM}." + ) + ) + + logging.debug("Confirmed correct ZMQ bridge----") + + # Find config group containing calibration channels + # calib_channels is typically ['State0', 'State1', 'State2', ...] + # config_list may be something line ['GFP', 'RFP', 'State0', 'State1', 'State2', ...] + # config_list may also be of the form ['GFP', 'RFP', 'LF-State0', 'LF-State1', 'LF-State2', ...] + # in this version of the code we correctly parse 'LF-State0', but these channels cannot be used + # by the Calibration class. + # A valid config group contains all channels in calib_channels + # self.ui.cb_config_group.clear() # This triggers the enter config we will clear when switching off + groups = self.mmc.getAvailableConfigGroups() + config_group_found = False + logging.debug("Checking MM config group") + for i in range(groups.size()): + group = groups.get(i) + configs = self.mmc.getAvailableConfigs(group) + config_list = [] + for j in range(configs.size()): + config_list.append(configs.get(j)) + if np.all( + [ + np.any([ch in config for config in config_list]) + for ch in self.calib_channels + ] + ): + if not config_group_found: + self.config_group = ( + group # set to first config group found + ) + config_group_found = True + self.ui.cb_config_group.addItem(group) + + # Populate the acquisition "BF channel" list with presets that contain any of these keywords + for ch in config_list: + if any( + [ + keyword.lower() in ch.lower() + for keyword in self.bf_keywords + ] + ): + self.ui.cb_acq_channel.addItem(ch) + self.bf_channel_found = True + + logging.debug("Checked configs.") + if not config_group_found: + msg = ( + f"No config group contains channels {self.calib_channels}. " + "Please refer to the waveorder docs on how to set up the config properly." + ) + self.ui.cb_config_group.setStyleSheet( + "border: 1px solid rgb(200,0,0);" + ) + raise KeyError(msg) + + if not self.bf_channel_found: + try: + self.ui.qbutton_acq_phase_from_bf.disconnect() + except Exception as exc: + print(exc.args) + logging.debug(exc.args) + self.ui.qbutton_acq_phase_from_bf.setStyleSheet( + self.disabled_button_style + ) + self.ui.qbutton_acq_phase_from_bf.setToolTip(self.no_bf_msg) + self.ui.cb_acq_channel.setToolTip(self.no_bf_msg) + + # set startup LC control mode + logging.debug("Setting startup LC control mode...") + _devices = self.mmc.getLoadedDevices() + loaded_devices = [_devices.get(i) for i in range(_devices.size())] + if LC_DEVICE_NAME in loaded_devices: + config_desc = self.mmc.getConfigData( + "Channel", "State0" + ).getVerbose() + if "String send to" in config_desc: + self.calib_mode = "MM-Retardance" + self.ui.cb_calib_mode.setCurrentIndex(0) + if "Voltage (V)" in config_desc: + self.calib_mode = "MM-Voltage" + self.ui.cb_calib_mode.setCurrentIndex(1) + else: + self.calib_mode = "DAC" + self.ui.cb_calib_mode.setCurrentIndex(2) + + logging.debug("Finished connecting to MM.") + + @Slot(tuple) + def handle_progress_update(self, value): + self.ui.progress_bar.setValue(value[0]) + self.ui.label_progress.setText("Progress: " + value[1]) + + @Slot(str) + def handle_extinction_update(self, value): + self.ui.le_extinction.setText(value) + + @Slot(object) + def handle_plot_update(self, value): + """ + handles the plotting of the intensity values during calibration. Calibration class will emit a signal + depending on which stage of the calibration process it is in and then we limit the scaling / range of the plot + accordingly. After the coarse search of extinction is done, the plot will shift the viewing range to only be + that of the convex optimization. Full plot will still exist if the user uses their mouse to zoom out. + + Parameters + ---------- + value: (float) new intensity value from calibration + + Returns + ------- + + """ + self.intensity_monitor.append(value) + self.ui.plot_widget.plot(self.intensity_monitor) + + if self.plot_sequence[0] == "Coarse": + self.plot_item.autoRange() + else: + self.plot_item.setRange( + xRange=(self.plot_sequence[1], len(self.intensity_monitor)), + yRange=( + 0, + np.max(self.intensity_monitor[self.plot_sequence[1] :]), + ), + padding=0.1, + ) + + @Slot(str) + def handle_calibration_assessment_update(self, value): + self.calib_assessment_level = value + + @Slot(str) + def handle_calibration_assessment_msg_update(self, value): + self.ui.tb_calib_assessment.setText(value) + + if self.calib_assessment_level == "good": + self.ui.tb_calib_assessment.setStyleSheet( + "border: 1px solid green;" + ) + elif self.calib_assessment_level == "okay": + self.ui.tb_calib_assessment.setStyleSheet( + "border: 1px solid rgb(252,190,3);" + ) + elif self.calib_assessment_level == "bad": + self.ui.tb_calib_assessment.setStyleSheet( + "border: 1px solid rgb(200,0,0);" + ) + else: + pass + + @Slot(tuple) + def handle_lc_states_emit(self, value: tuple[tuple, dict[str, list]]): + """Receive and plot polarization state and calibrated LC retardance values from the calibration worker. + + Parameters + ---------- + value : tuple[tuple, dict[str, list]] + 2-tuple consisting of a tuple of polarization state names and a dictionary of LC retardance values. + """ + pol_states, lc_values = value + + # Calculate circle + theta = np.linspace(0, 2 * np.pi, 100) + x_circ = self.swing * np.cos(theta) + lc_values["LCA"][0] + y_circ = self.swing * np.sin(theta) + lc_values["LCB"][0] + + import matplotlib.pyplot as plt + + plt.close("all") + with ( + plt.rc_context( + { + "axes.spines.right": False, + "axes.spines.top": False, + } + ) + and plt.ion() + ): + plt.figure("Calibrated LC States") + plt.scatter(lc_values["LCA"], lc_values["LCB"], c="r") + plt.plot(x_circ, y_circ, "k--", alpha=0.25) + plt.axis("equal") + plt.xlabel("LCA retardance") + plt.ylabel("LCB retardance") + for i, pol in enumerate(pol_states): + plt.annotate( + pol, + xy=(lc_values["LCA"][i], lc_values["LCB"][i]), + xycoords="data", + xytext=(10, 10), # annotation offset + textcoords="offset points", + ) + + def _add_or_update_image_layer( + self, + image: NDArray, + name: str, + cmap: str = "gray", + move_to_top: bool = True, + scale: tuple = 5 * (1,), + ): + """Add image layer of the given name if it does not exist, update existing layer otherwise. + + Parameters + ---------- + image : NDArray + image intensity values + name : str + layer key name in napari layers list + cmap : str, optional + colormap to render in, by default "gray", use "rgb" for RGB images + move_to_top : bool, optional + whether to move the updated layer to the top of layers list, by default True + """ + if image.shape[0] == 1: + image = image.squeeze(axis=0) + scale = scale[1:] + + scale = scale[-image.ndim :] # match shapes + + if name in self.viewer.layers: + self.viewer.layers[name].data = image + if move_to_top: + logging.debug(f"Moving layer {name} to the top.") + src_index = self.viewer.layers.index(name) + self.viewer.layers.move(src_index, dest_index=-1) + else: + if cmap == "rgb": + self.viewer.add_image( + image, + name=name, + rgb=True, + scale=scale, + ) + else: + self.viewer.add_image( + image, + name=name, + colormap=cmap, + scale=scale, + ) + + @Slot(tuple) + def handle_bg_image_update(self, value): + data, scale = value + self._add_or_update_image_layer(data, "Raw Background", scale=scale) + + @Slot(tuple) + def handle_bg_bire_image_update(self, value): + data, scale = value + self._add_or_update_image_layer( + data[0], "Retardance Background", scale=scale + ) + self._add_or_update_image_layer( + data[1], "Orientation Background", cmap="hsv", scale=scale + ) + + def handle_layers_updated(self, event: Event): + """Whenever a layer is inserted or moved, we check if the top layer + starts with 'Orientation*'. If it is, we search for a layer that starts + with 'Retardance*' and has the same suffix as 'Orientation*', then use the + 'Orientation*'-'Retardance*' pair to generate a 'BirefringenceOverlay*' + layer. + + We also color the 'Orientation*' layer in an HSV colormap. + """ + + layers: LayerList = event.source + # if the first channel starts with "Orientation" + if layers[-1].name.startswith("Orientation"): + orientation_name = layers[-1].name + suffix = orientation_name.replace("Orientation", "") + retardance_name = "Retardance" + suffix + overlay_name = "Birefringence Overlay" + suffix + # if the matching retardance layer is present, generate an overlay + if retardance_name in layers: + logging.info( + "Detected updated birefringence layers: " + f"'{retardance_name}', '{orientation_name}'" + ) + self._draw_bire_overlay( + retardance_name, + orientation_name, + overlay_name, + scale=layers[-1].scale, + ) + + # always display layers that start with "Orientation" in hsv + logging.info( + "Detected orientation layer in updated layer list." + "Setting its colormap to HSV." + ) + self.viewer.layers[orientation_name].colormap = "hsv" + + def _draw_bire_overlay( + self, + retardance_name: str, + orientation_name: str, + overlay_name: str, + scale: tuple, + ): + def _layer_data(name: str): + data = self.viewer.layers[name].data + if isinstance(data, da.Array): + # the ome-zarr reader will read HCS plates/wells as nested dask graph + # which will contain 'get_tile' or 'get_field' in its graph + # this object will remain a dask `Array` after calling `compute()` + if any([("get_" in k) for k in data.dask.keys()]): + data: da.Array = data.compute() + else: + chunks = (data.ndim - 2) * (1,) + data.shape[ + -2: + ] # needs to match + data = da.from_array(data, chunks=chunks) + return data + + self.overlay_scale = scale + self.overlay_name = overlay_name + self.overlay_retardance = _layer_data(retardance_name) + self.overlay_orientation = _layer_data(orientation_name) + self.update_overlay_dask_array() + + def update_overlay_dask_array(self): + self.rgb_chunks = ( + (3,) + + (self.overlay_retardance.ndim - 2) * (1,) + + self.overlay_retardance.shape[-2:] + ) + overlay = da.map_blocks( + ret_ori_overlay, + np.stack((self.overlay_retardance, self.overlay_orientation)), + ret_max=self.ret_max, + cmap=self.colormap, + chunks=self.rgb_chunks, + dtype=np.float32, + drop_axis=0, + new_axis=0, + ) + + overlay = da.moveaxis(overlay, source=0, destination=-1) + + self._add_or_update_image_layer( + overlay, self.overlay_name, cmap="rgb", scale=self.overlay_scale + ) + + @Slot(tuple) + def handle_bire_image_update(self, value): + data, scale = value + + # generate overlay in a separate thread + for i, channel in enumerate(("Retardance", "Orientation")): + name = channel + cmap = "gray" if channel != "Orientation" else "hsv" + self._add_or_update_image_layer( + data[i], name, cmap=cmap, scale=scale + ) + + @Slot(tuple) + def handle_phase_image_update(self, value): + phase, scale = value + name = "Phase2D" if self.acq_mode == "2D" else "Phase3D" + + # Add new layer if none exists, otherwise update layer data + self._add_or_update_image_layer(phase, name, scale=scale) + + if "Phase" not in [ + self.ui.cb_saturation.itemText(i) + for i in range(self.ui.cb_saturation.count()) + ]: + self.ui.cb_saturation.addItem("Retardance") + if "Phase" not in [ + self.ui.cb_value.itemText(i) + for i in range(self.ui.cb_value.count()) + ]: + self.ui.cb_value.addItem("Retardance") + + @Slot(object) + def handle_qlipp_reconstructor_update(self, value: waveorder_microscopy): + # Saves phase reconstructor to be re-used if possible + self.phase_reconstructor = value + + @Slot(Path) + def handle_calib_file_update(self, value): + self.last_calib_meta_file = value + + @Slot(str) + def handle_plot_sequence_update(self, value): + current_idx = len(self.intensity_monitor) + self.plot_sequence = (value, current_idx) + + @Slot(tuple) + def handle_sat_slider_move(self, value): + self.ui.le_sat_min.setText(str(np.round(value[0], 3))) + self.ui.le_sat_max.setText(str(np.round(value[1], 3))) + + @Slot(tuple) + def handle_val_slider_move(self, value): + self.ui.le_val_min.setText(str(np.round(value[0], 3))) + self.ui.le_val_max.setText(str(np.round(value[1], 3))) + + @Slot(str) + def handle_reconstruction_store_update(self, value): + self.reconstruction_data_path = value + + # This seems to be unused + # @Slot(tuple) + # def handle_reconstruction_dim_update(self, value): + # p, t, c = value + # layer_name = self.worker.manager.config.data_save_name + + # if p == 0 and t == 0 and c == 0: + # self.reconstruction_data = WaveorderReader( + # self.reconstruction_data_path, "zarr" + # ) + # self.viewer.add_image( + # self.reconstruction_data.get_zarr(p), + # name=layer_name + f"_Pos_{p:03d}", + # ) + + # self.viewer.dims.set_axis_label(0, "T") + # self.viewer.dims.set_axis_label(1, "C") + # self.viewer.dims.set_axis_label(2, "Z") + + # # Add each new position as a new layer in napari + # name = layer_name + f"_Pos_{p:03d}" + # if name not in self.viewer.layers: + # self.reconstruction_data = WaveorderReader( + # self.reconstruction_data_path, "zarr" + # ) + # self.viewer.add_image( + # self.reconstruction_data.get_zarr(p), name=name + # ) + + # # update the napari dimension slider position if the user hasn't specified to pause updates + # if not self.pause_updates: + # self.viewer.dims.set_current_step(0, t) + # self.viewer.dims.set_current_step(1, c) + + # self.last_p = p + + @Slot(bool) + def browse_dir_path(self): + result = self._open_file_dialog(self.current_dir_path, "dir") + self.directory = result + self.current_dir_path = result + self.ui.le_directory.setText(result) + self.ui.le_save_dir.setText(result) + self.save_directory = result + + @Slot(bool) + def browse_save_path(self): + result = self._open_file_dialog(self.current_save_path, "dir") + self.save_directory = result + self.current_save_path = result + self.ui.le_save_dir.setText(result) + + @Slot(bool) + def browse_data_dir(self): + path = self._open_file_dialog(self.data_dir, "dir") + self.data_dir = path + self.ui.le_data_dir.setText(self.data_dir) + + @Slot(bool) + def browse_calib_meta(self): + path = self._open_file_dialog(self.calib_path, "file") + self.calib_path = path + self.ui.le_calibration_metadata.setText(self.calib_path) + + @Slot() + def enter_dir_path(self): + path = self.ui.le_directory.text() + if os.path.exists(path): + self.directory = path + self.save_directory = path + self.ui.le_save_dir.setText(path) + else: + self.ui.le_directory.setText("Path Does Not Exist") + + @Slot() + def enter_swing(self): + self.swing = float(self.ui.le_swing.text()) + + @Slot() + def enter_wavelength(self): + self.wavelength = int(self.ui.le_wavelength.text()) + + @Slot() + def enter_calib_scheme(self): + index = self.ui.cb_calib_scheme.currentIndex() + if index == 0: + self.calib_scheme = "4-State" + else: + self.calib_scheme = "5-State" + + @Slot() + def enter_calib_mode(self): + index = self.ui.cb_calib_mode.currentIndex() + if index == 0: + self.calib_mode = "MM-Retardance" + self.ui.label_lca.hide() + self.ui.label_lcb.hide() + self.ui.cb_lca.hide() + self.ui.cb_lcb.hide() + elif index == 1: + self.calib_mode = "MM-Voltage" + self.ui.label_lca.hide() + self.ui.label_lcb.hide() + self.ui.cb_lca.hide() + self.ui.cb_lcb.hide() + elif index == 2: + self.calib_mode = "DAC" + self.ui.cb_lca.clear() + self.ui.cb_lcb.clear() + self.ui.cb_lca.show() + self.ui.cb_lcb.show() + self.ui.label_lca.show() + self.ui.label_lcb.show() + + cfg = self.mmc.getConfigData(self.config_group, "State0") + + # Update the DAC combo boxes with available DAC's from the config. Necessary for the user + # to specify which DAC output corresponds to which LC for voltage-space calibration + memory = set() + for i in range(cfg.size()): + prop = cfg.getSetting(i) + if "TS_DAC" in prop.getDeviceLabel(): + dac = prop.getDeviceLabel()[-2:] + if dac not in memory: + self.ui.cb_lca.addItem("DAC" + dac) + self.ui.cb_lcb.addItem("DAC" + dac) + memory.add(dac) + else: + continue + self.ui.cb_lca.setCurrentIndex(0) + self.ui.cb_lcb.setCurrentIndex(1) + + @Slot() + def enter_dac_lca(self): + dac = self.ui.cb_lca.currentText() + self.lca_dac = dac + + @Slot() + def enter_dac_lcb(self): + dac = self.ui.cb_lcb.currentText() + self.lcb_dac = dac + + @Slot() + def enter_config_group(self): + """ + callback for changing the config group combo box. User needs to specify a config group that has the + hardcoded states 'State0', 'State1', ... , 'State4'. Calibration will not work unless a proper config + group is specific + + Returns + ------- + + """ + # if/else takes care of the clearing of config + if self.ui.cb_config_group.count() != 0: + self.mmc = Core(convert_camel_case=False) + self.mm = Studio(convert_camel_case=False) + + # Gather config groups and their children + self.config_group = self.ui.cb_config_group.currentText() + config = self.mmc.getAvailableConfigs(self.config_group) + + channels = [] + for i in range(config.size()): + channels.append(config.get(i)) + + # Check to see if any states are missing + states = ["State0", "State1", "State2", "State3", "State4"] + missing = [] + for state in states: + if state not in channels: + missing.append(state) + + # if states are missing, set the combo box red and alert the user + if len(missing) != 0: + msg = ( + f"The chosen config group ({self.config_group}) is missing states: {missing}. " + "Please refer to the waveorder wiki on how to set up the config properly." + ) + + self.ui.cb_config_group.setStyleSheet( + "border: 1px solid rgb(200,0,0);" + ) + raise KeyError(msg) + else: + self.ui.cb_config_group.setStyleSheet("") + + @Slot() + def enter_bg_folder_name(self): + self.bg_folder_name = self.ui.le_bg_folder.text() + + @Slot() + def enter_n_avg(self): + self.n_avg = int(self.ui.le_n_avg.text()) + + @Slot() + def enter_log_level(self): + index = self.ui.cb_loglevel.currentIndex() + if index == 0: + logging.getLogger().setLevel(logging.INFO) + else: + logging.getLogger().setLevel(logging.DEBUG) + + @Slot() + def enter_save_path(self): + path = self.ui.le_save_dir.text() + if os.path.exists(path): + self.save_directory = path + self.current_save_path = path + else: + self.ui.le_save_dir.setText("Path Does Not Exist") + + @Slot() + def enter_save_name(self): + name = self.ui.le_data_save_name.text() + self.save_name = name + + @Slot() + def enter_zstart(self): + self.z_start = float(self.ui.le_zstart.text()) + + @Slot() + def enter_zend(self): + self.z_end = float(self.ui.le_zend.text()) + + @Slot() + def enter_zstep(self): + self.z_step = float(self.ui.le_zstep.text()) + + @Slot() + def enter_acq_mode(self): + state = self.ui.cb_acq_mode.currentIndex() + if state == 0: + self.acq_mode = "2D" + elif state == 1: + self.acq_mode = "3D" + + @Slot() + def enter_phase_denoiser(self): + state = self.ui.cb_phase_denoiser.currentIndex() + if state == 0: + self.phase_regularizer = "Tikhonov" + self.ui.label_itr.setHidden(True) + self.ui.label_phase_rho.setHidden(True) + self.ui.le_rho.setHidden(True) + self.ui.le_itr.setHidden(True) + + elif state == 1: + self.phase_regularizer = "TV" + self.ui.label_itr.setHidden(False) + self.ui.label_phase_rho.setHidden(False) + self.ui.le_rho.setHidden(False) + self.ui.le_itr.setHidden(False) + + @Slot() + def enter_acq_bg_path(self): + path = self.ui.le_bg_path.text() + if os.path.exists(path): + self.acq_bg_directory = path + self.current_bg_path = path + else: + self.ui.le_bg_path.setText("Path Does Not Exist") + + @Slot(Path) + def handle_bg_path_update(self, value: Path): + """ + Handles the update of the most recent background folderpath from + BackgroundWorker to display in the reconstruction texbox. + + Parameters + ---------- + value : str + most recent captured background folderpath + """ + path = value + if path.exists(): + self.acq_bg_directory = path + self.current_bg_path = path + self.ui.le_bg_path.setText(str(path)) + else: + msg = """ + Background acquisition was not successful. + Check latest background capture saving directory! + """ + raise RuntimeError(msg) + + @Slot(bool) + def browse_acq_bg_path(self): + result = self._open_file_dialog(self.current_bg_path, "dir") + self.acq_bg_directory = result + self.current_bg_path = result + self.ui.le_bg_path.setText(result) + + @Slot() + def enter_bg_correction(self): + state = self.ui.cb_bg_method.currentIndex() + if state == 0: + self.ui.label_bg_path.setHidden(True) + self.ui.le_bg_path.setHidden(True) + self.ui.qbutton_browse_bg_path.setHidden(True) + self.bg_option = "None" + elif state == 1: + self.ui.label_bg_path.setHidden(False) + self.ui.le_bg_path.setHidden(False) + self.ui.qbutton_browse_bg_path.setHidden(False) + self.bg_option = "Measured" + elif state == 2: + self.ui.label_bg_path.setHidden(True) + self.ui.le_bg_path.setHidden(True) + self.ui.qbutton_browse_bg_path.setHidden(True) + self.bg_option = "Estimated" + elif state == 3: + self.ui.label_bg_path.setHidden(False) + self.ui.le_bg_path.setHidden(False) + self.ui.qbutton_browse_bg_path.setHidden(False) + self.bg_option = "Measured + Estimated" + + @Slot() + def enter_gpu_id(self): + self.gpu_id = int(self.ui.le_gpu_id.text()) + + @Slot() + def enter_use_gpu(self): + state = self.ui.chb_use_gpu.checkState().value + if state == 2: + self.use_gpu = True + elif state == 0: + self.use_gpu = False + + @Slot() + def enter_rotate_orientation(self): + state = self.ui.cb_rotate_orientation.checkState().value + if state == 2: + self.rotate_orientation = True + elif state == 0: + self.rotate_orientation = False + + @Slot() + def enter_flip_orientation(self): + state = self.ui.cb_flip_orientation.checkState().value + if state == 2: + self.flip_orientation = True + elif state == 0: + self.flip_orientation = False + + @Slot() + def enter_invert_phase_contrast(self): + state = self.ui.cb_invert_phase_contrast.checkState().value + if state == 2: + self.invert_phase_contrast = True + elif state == 0: + self.invert_phase_contrast = False + + @Slot() + def enter_recon_wavelength(self): + self.recon_wavelength = int(self.ui.le_recon_wavelength.text()) + + @Slot() + def enter_obj_na(self): + self.obj_na = float(self.ui.le_obj_na.text()) + + @Slot() + def enter_cond_na(self): + self.cond_na = float(self.ui.le_cond_na.text()) + + @Slot() + def enter_mag(self): + self.mag = float(self.ui.le_mag.text()) + + @Slot() + def enter_ps(self): + self.ps = float(self.ui.le_ps.text()) + + @Slot() + def enter_n_media(self): + self.n_media = float(self.ui.le_n_media.text()) + + @Slot() + def enter_pad_z(self): + self.pad_z = int(self.ui.le_pad_z.text()) + + @Slot() + def enter_pause_updates(self): + """ + pauses the updating of the dimension slider for offline reconstruction or live listening mode. + + Returns + ------- + + """ + state = self.ui.chb_pause_updates.checkState() + if state == 2: + self.pause_updates = True + elif state == 0: + self.pause_updates = False + + @Slot(int) + def enter_method(self): + """ + Handles the updating of UI elements depending on the method of offline reconstruction. + + Returns + ------- + + """ + + idx = self.ui.cb_method.currentIndex() + + if idx == 0: + self.method = "QLIPP" + self.ui.label_bf_chan.hide() + self.ui.le_bf_chan.hide() + self.ui.label_chan_desc.setText( + "Retardance, Orientation, BF, Phase3D, Phase2D, S0, S1, S2, S3" + ) + + elif idx == 1: + self.method = "PhaseFromBF" + self.ui.label_bf_chan.show() + self.ui.le_bf_chan.show() + self.ui.label_bf_chan.setText("Brightfield Channel Index") + self.ui.le_bf_chan.setPlaceholderText("int") + self.ui.label_chan_desc.setText("Phase3D, Phase2D") + + @Slot(int) + def enter_mode(self): + idx = self.ui.cb_mode.currentIndex() + + if idx == 0: + self.mode = "3D" + self.ui.label_focus_zidx.hide() + self.ui.le_focus_zidx.hide() + else: + self.mode = "2D" + self.ui.label_focus_zidx.show() + self.ui.le_focus_zidx.show() + + @Slot() + def enter_data_dir(self): + entry = self.ui.le_data_dir.text() + if not os.path.exists(entry): + self.ui.le_data_dir.setStyleSheet( + "border: 1px solid rgb(200,0,0);" + ) + self.ui.le_data_dir.setText("Path Does Not Exist") + else: + self.ui.le_data_dir.setStyleSheet("") + self.data_dir = entry + + @Slot() + def enter_calib_meta(self): + entry = self.ui.le_calibration_metadata.text() + if not os.path.exists(entry): + self.ui.le_calibration_metadata.setStyleSheet( + "border: 1px solid rgb(200,0,0);" + ) + self.ui.le_calibration_metadata.setText("Path Does Not Exist") + else: + self.ui.le_calibration_metadata.setStyleSheet("") + self.calib_path = entry + + @Slot(bool) + def push_note(self): + """ + Pushes a note to the last calibration metadata file. + + Returns + ------- + + """ + + # make sure the user has performed a calibration in this session (or loaded a previous one) + if not self.last_calib_meta_file: + raise ValueError( + "No calibration has been performed yet so there is no previous metadata file" + ) + else: + note = self.ui.le_notes_field.text() + + # Open the existing calibration metadata file and append the notes + with open(self.last_calib_meta_file, "r") as file: + current_json = json.load(file) + + # Append note to the end of the old note (so we don't overwrite previous notes) or write a new + # note in the blank notes field + old_note = current_json["Notes"] + if old_note is None or old_note == "" or old_note == note: + current_json["Notes"] = note + else: + current_json["Notes"] = old_note + ", " + note + + # dump the contents into the metadata file + with open(self.last_calib_meta_file, "w") as file: + json.dump(current_json, file, indent=1) + + @Slot(bool) + def calc_extinction(self): + """ + Calculates the extinction when the user uses the Load Calibration functionality. This if performed + because the calibration file could be loaded in a different FOV which may require recalibration + depending on the extinction quality. + + Returns + ------- + + """ + + # Snap images from the extinction state and first elliptical state + set_lc_state(self.mmc, self.config_group, "State0") + extinction = snap_and_average(self.calib.snap_manager) + set_lc_state(self.mmc, self.config_group, "State1") + state1 = snap_and_average(self.calib.snap_manager) + + # Calculate extinction based off captured intensities + extinction = self.calib.calculate_extinction( + self.swing, self.calib.I_Black, extinction, state1 + ) + self.ui.le_extinction.setText(str(extinction)) + + @Slot(bool) + def load_calibration(self): + """ + Uses previous JSON calibration metadata to load previous calibration + """ + + metadata_path = self._open_file_dialog(self.current_dir_path, "file") + metadata = MetadataReader(metadata_path) + + # Update Properties + self.wavelength = metadata.Wavelength + self.swing = metadata.Swing + + # Initialize calibration class + self.calib = QLIPP_Calibration( + self.mmc, + self.mm, + group=self.config_group, + lc_control_mode=self.calib_mode, + interp_method=self.interp_method, + wavelength=self.wavelength, + ) + self.calib.swing = self.swing + self.ui.le_swing.setText(str(self.swing)) + self.calib.wavelength = self.wavelength + self.ui.le_wavelength.setText(str(self.wavelength)) + + # Update Calibration Scheme Combo Box + if metadata.Calibration_scheme == "4-State": + self.ui.cb_calib_scheme.setCurrentIndex(0) + else: + self.ui.cb_calib_scheme.setCurrentIndex(1) + + self.last_calib_meta_file = metadata_path + + # Move the load calibration function to a separate thread + self.worker = load_calibration(self.calib, metadata) + + def update_extinction(extinction): + self.calib.extinction_ratio = float(extinction) + + # FIXME: for 1.0.0 we'd like to avoid MM call in the main thread + # Make sure Live Mode is off + if self.calib.snap_manager.getIsLiveModeOn(): + self.calib.snap_manager.setLiveModeOn(False) + + # initialize worker properties for multi-threading + self.ui.qbutton_stop_calib.clicked.connect(self.worker.quit) + self.worker.yielded.connect(self.ui.le_extinction.setText) + self.worker.yielded.connect(update_extinction) + self.worker.returned.connect(self._update_calib) + self.worker.errored.connect(self._handle_error) + self.worker.started.connect(self._disable_buttons) + self.worker.finished.connect(self._enable_buttons) + self.worker.finished.connect(self._handle_load_finished) + self.worker.start() + + @Slot(bool) + def run_calibration(self): + """ + Wrapper function to create calibration worker and move that worker to a thread. + Calibration is then executed by the calibration worker + """ + + self._check_MM_config_setup() + + self.calib = QLIPP_Calibration( + self.mmc, + self.mm, + group=self.config_group, + lc_control_mode=self.calib_mode, + interp_method=self.interp_method, + wavelength=self.wavelength, + ) + + if self.calib_mode == "DAC": + self.calib.set_dacs(self.lca_dac, self.lcb_dac) + + # Reset Styling + self.ui.tb_calib_assessment.setText("") + self.ui.tb_calib_assessment.setStyleSheet("") + + # Save initial autoshutter state for when we set it back later + self.auto_shutter = self.mmc.getAutoShutter() + + logging.info("Starting Calibration") + + # Initialize displays + parameters for calibration + self.ui.progress_bar.setValue(0) + self.plot_item.clear() + self.intensity_monitor = [] + self.calib.swing = self.swing + self.calib.wavelength = self.wavelength + self.calib.meta_file = os.path.join( + self.directory, "polarization_calibration.txt" + ) + + # FIXME: for 1.0.0 we'd like to avoid MM call in the main thread + # Make sure Live Mode is off + if self.calib.snap_manager.getIsLiveModeOn(): + self.calib.snap_manager.setLiveModeOn(False) + + # Init Worker and Thread + self.worker = CalibrationWorker(self, self.calib) + + # Connect Handlers + self.worker.progress_update.connect(self.handle_progress_update) + self.worker.extinction_update.connect(self.handle_extinction_update) + self.worker.intensity_update.connect(self.handle_plot_update) + self.worker.calib_assessment.connect( + self.handle_calibration_assessment_update + ) + self.worker.calib_assessment_msg.connect( + self.handle_calibration_assessment_msg_update + ) + self.worker.calib_file_emit.connect(self.handle_calib_file_update) + self.worker.plot_sequence_emit.connect( + self.handle_plot_sequence_update + ) + self.worker.lc_states.connect(self.handle_lc_states_emit) + self.worker.started.connect(self._disable_buttons) + self.worker.finished.connect(self._enable_buttons) + self.worker.errored.connect(self._handle_error) + self.ui.qbutton_stop_calib.clicked.connect(self.worker.quit) + + self.worker.start() + + @property + def _channel_descriptions(self): + return [ + self.mmc.getConfigData( + self.config_group, calib_channel + ).getVerbose() + for calib_channel in self.calib_channels + ] + + def _check_MM_config_setup(self): + # Warns the user if the MM configuration is not correctly set up. + desc = self._channel_descriptions + if self.calib_mode == "MM-Retardance": + if all("String send to" in s for s in desc) and not any( + "Voltage (V)" in s for s in desc + ): + return + else: + msg = " \n".join( + textwrap.wrap( + "In 'MM-Retardance' mode each preset must include the " + "'String send to' property, and no 'Voltage' properties.", + width=40, + ) + ) + show_warning(msg) + + elif self.calib_mode == "MM-Voltage": + if ( + all("Voltage (V) LC-A" in s for s in desc) + and all("Voltage (V) LC-B" in s for s in desc) + and not any("String send to" in s for s in desc) + ): + return + else: + msg = " \n".join( + textwrap.wrap( + "In 'MM-Voltage' mode each preset must include the 'Voltage (V) LC-A' " + "property, the 'Voltage (V) LC-B' property, and no 'String send to' properties.", + width=40, + ) + ) + show_warning(msg) + + elif self.calib_mode == "DAC": + _devices = self.mmc.getLoadedDevices() + loaded_devices = [_devices.get(i) for i in range(_devices.size())] + if LC_DEVICE_NAME in loaded_devices: + show_warning( + "In 'DAC' mode the MeadowLarkLC device adapter must not be loaded in MM." + ) + + else: + raise ValueError( + f"self.calib_mode = {self.calib_mode} is an unrecognized state." + ) + + @Slot(bool) + def capture_bg(self): + """ + Wrapper function to capture a set of background images. Will snap images and display reconstructed + birefringence. Check connected handlers for napari display. + + Returns + ------- + + """ + + if self.calib is None: + no_calibration_message = """Capturing a background requires calibrated liquid crystals. \ + Please either run a calibration or load a calibration from file.""" + raise RuntimeError(no_calibration_message) + + # Init worker and thread + self.worker = BackgroundCaptureWorker(self, self.calib) + + # Connect Handlers + self.worker.bg_image_emitter.connect(self.handle_bg_image_update) + self.worker.bire_image_emitter.connect( + self.handle_bg_bire_image_update + ) + + self.worker.started.connect(self._disable_buttons) + self.worker.finished.connect(self._enable_buttons) + self.worker.errored.connect(self._handle_error) + self.ui.qbutton_stop_calib.clicked.connect(self.worker.quit) + self.worker.aborted.connect(self._handle_calib_abort) + + # Connect to BG Correction Path + self.worker.bg_path_update_emitter.connect(self.handle_bg_path_update) + + # Start Capture Background Thread + self.worker.start() + + @Slot(bool) + def acq_ret_ori(self): + """ + Wrapper function to acquire birefringence stack/image and plot in napari + Returns + ------- + + """ + + self._check_requirements_for_acq("birefringence") + + # Init Worker and thread + self.worker = PolarizationAcquisitionWorker( + self, self.calib, "birefringence" + ) + + # Connect Handlers + self.worker.bire_image_emitter.connect(self.handle_bire_image_update) + self.worker.started.connect(self._disable_buttons) + self.worker.finished.connect(self._enable_buttons) + self.worker.errored.connect(self._handle_acq_error) + + # Start Thread + self.worker.start() + + @Slot(bool) + def acq_phase_from_bf(self): + """ + Wrapper function to acquire phase stack and plot in napari + """ + + self._check_requirements_for_acq("phase") + + # Init worker and thread + self.worker = BFAcquisitionWorker(self) + + # Connect Handlers + self.worker.phase_image_emitter.connect(self.handle_phase_image_update) + self.worker.phase_reconstructor_emitter.connect( + self.handle_qlipp_reconstructor_update + ) + self.worker.started.connect(self._disable_buttons) + self.worker.finished.connect(self._enable_buttons) + self.worker.errored.connect(self._handle_acq_error) + + # Start thread + self.worker.start() + + @Slot(bool) + def acq_ret_ori_phase(self): + """ + Wrapper function to acquire both birefringence and phase stack and plot in napari + """ + + self._check_requirements_for_acq("phase") + + # Init worker and thread + self.worker = PolarizationAcquisitionWorker(self, self.calib, "all") + + # connect handlers + self.worker.phase_image_emitter.connect(self.handle_phase_image_update) + self.worker.phase_reconstructor_emitter.connect( + self.handle_qlipp_reconstructor_update + ) + self.worker.bire_image_emitter.connect(self.handle_bire_image_update) + self.worker.started.connect(self._disable_buttons) + self.worker.finished.connect(self._enable_buttons) + self.worker.errored.connect(self._handle_acq_error) + self.ui.qbutton_stop_acq.clicked.connect(self.worker.quit) + + # Start Thread + self.worker.start() + + @Slot(bool) + def save_config(self): + path = self._open_file_dialog(self.save_config_path, "save") + self.save_config_path = path + name = PurePath(self.save_config_path).name + dir_ = self.save_config_path.strip(name) + self._populate_config_from_app() + + if isinstance(self.config_reader.positions, tuple): + pos = self.config_reader.positions + self.config_reader.positions = ( + f"[!!python/tuple [{pos[0]},{pos[1]}]]" + ) + if isinstance(self.config_reader.timepoints, tuple): + t = self.config_reader.timepoints + self.config_reader.timepoints = f"[!!python/tuple [{t[0]},{t[1]}]]" + + self.config_reader.save_yaml(dir_=dir_, name=name) + + @Slot(int) + def handle_ret_max_slider_move(self, value): + self.ret_max = value + self.update_overlay_dask_array() + + @Slot(tuple) + def update_dims(self, dims): + if not self.pause_updates: + self.viewer.dims.set_current_step(0, dims[0]) + self.viewer.dims.set_current_step(1, dims[1]) + self.viewer.dims.set_current_step(3, dims[2]) + else: + pass + + def _open_file_dialog(self, default_path, type): + return self._open_dialog("select a directory", str(default_path), type) + + def _open_dialog(self, title, ref, type): + """ + opens pop-up dialogue for the user to choose a specific file or directory. + + Parameters + ---------- + title: (str) message to display at the top of the pop up + ref: (str) reference path to start the search at + type: (str) type of file the user is choosing (dir, file, or save) + + Returns + ------- + + """ + + options = QFileDialog.DontUseNativeDialog + if type == "dir": + path = QFileDialog.getExistingDirectory( + None, title, ref, options=options + ) + elif type == "file": + path = QFileDialog.getOpenFileName( + None, title, ref, options=options + )[0] + elif type == "save": + path = QFileDialog.getSaveFileName( + None, "Choose a save name", ref, options=options + )[0] + else: + raise ValueError("Did not understand file dialogue type") + + return path + + +class QtLogger(logging.Handler): + """ + Class to changing logging handler to the napari log output display + """ + + def __init__(self, widget): + super().__init__() + self.widget = widget + + # emit function necessary to be considered a logging handler + def emit(self, record): + msg = self.format(record) + self.widget.appendPlainText(msg) diff --git a/waveorder/plugin/tab_recon.py b/waveorder/plugin/tab_recon.py new file mode 100644 index 00000000..08d87745 --- /dev/null +++ b/waveorder/plugin/tab_recon.py @@ -0,0 +1,3935 @@ +import datetime +import json +import os +import socket +import subprocess +import threading +import time +import uuid +import warnings +from pathlib import Path +from typing import Annotated, Final, List, Literal, Union + +from iohub.ngff import open_ome_zarr +from magicgui import widgets +from magicgui.type_map import get_widget_class +from magicgui.widgets import * +from qtpy import QtCore +from qtpy.QtCore import QEvent, Qt, QThread, Signal +from qtpy.QtWidgets import * + +try: + from napari import Viewer + from napari.utils import notifications +except: + pass + +import concurrent.futures +import importlib.metadata + +from pydantic.v1 import BaseModel, NonNegativeInt + +from waveorder.cli import jobs_mgmt, settings +from waveorder.io import utils + +try: + # Use version specific pydantic import for ModelMetaclass + # prefer to pin to 1.10.19 + version = importlib.metadata.version("pydantic") + # print("Your Pydantic library ver:{v}.".format(v=version)) + if version >= "2.0.0": + print( + "Your Pydantic library ver:{v}. Recommended ver is: 1.10.19".format( + v=version + ) + ) + from pydantic.main import BaseModel, ModelMetaclass, ValidationError + elif version >= "1.10.19": + from pydantic.main import BaseModel, ModelMetaclass, ValidationError + else: + print( + "Your Pydantic library ver:{v}. Recommended ver is: 1.10.19".format( + v=version + ) + ) + from pydantic.main import BaseModel, ModelMetaclass, ValidationError +except: + print("Pydantic library was not found. Ver 1.10.19 is recommended.") + +STATUS_submitted_pool = "Submitted_Pool" +STATUS_submitted_job = "Submitted_Job" +STATUS_running_pool = "Running_Pool" +STATUS_running_job = "Running_Job" +STATUS_finished_pool = "Finished_Pool" +STATUS_finished_job = "Finished_Job" +STATUS_errored_pool = "Errored_Pool" +STATUS_errored_job = "Errored_Job" +STATUS_user_cleared_job = "User_Cleared_Job" +STATUS_user_cancelled_job = "User_Cancelled_Job" + +MSG_SUCCESS = {"msg": "success"} +JOB_COMPLETION_STR = "Job completed successfully" +JOB_RUNNING_STR = "Starting with JobEnvironment" +JOB_TRIGGERED_EXC = "Submitted job triggered an exception" +JOB_OOM_EVENT = "oom_kill event" + +_validate_alert = "⚠" +_validate_ok = "✔️" +_green_dot = "🟢" +_red_dot = "🔴" +_info_icon = "ⓘ" + +# For now replicate CLI processing modes - these could reside in the CLI settings file as well +# for consistency +OPTION_TO_MODEL_DICT = { + "birefringence": {"enabled": False, "setting": None}, + "phase": {"enabled": False, "setting": None}, + "fluorescence": {"enabled": False, "setting": None}, +} + +CONTAINERS_INFO = {} + +# This keeps an instance of the MyWorker server that is listening +# napari will not stop processes and the Hide event is not reliable +HAS_INSTANCE = {"val": False, "instance": None} + +# Components Queue list for new Jobs spanned from single processing +NEW_WIDGETS_QUEUE = [] +NEW_WIDGETS_QUEUE_THREADS = [] +MULTI_JOBS_REFS = {} +ROW_POP_QUEUE = [] + + +# Main class for the Reconstruction tab +# Not efficient since instantiated from GUI +# Does not have access to common functions in main_widget +# ToDo : From main_widget and pass self reference +class Ui_ReconTab_Form(QWidget): + + def __init__(self, parent=None, stand_alone=False): + super().__init__(parent) + self._ui = parent + self.stand_alone = stand_alone + self.viewer: Viewer = None + if HAS_INSTANCE["val"]: + self.current_dir_path = str(Path.cwd()) + self.directory = str(Path.cwd()) + self.input_directory = HAS_INSTANCE["input_directory"] + self.output_directory = HAS_INSTANCE["output_directory"] + self.model_directory = HAS_INSTANCE["model_directory"] + self.yaml_model_file = HAS_INSTANCE["yaml_model_file"] + else: + self.directory = str(Path.cwd()) + self.current_dir_path = str(Path.cwd()) + self.input_directory = str(Path.cwd()) + self.output_directory = str(Path.cwd()) + self.model_directory = str(Path.cwd()) + self.yaml_model_file = str(Path.cwd()) + + self.input_directory_dataset = None + self.input_directory_datasetMeta = None + self.input_channel_names = [] + + # Parent (Widget) which holds the GUI ############################## + self.recon_tab_mainScrollArea = QScrollArea() + self.recon_tab_mainScrollArea.setWidgetResizable(True) + + self.recon_tab_widget = QWidget() + self.recon_tab_widget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + self.recon_tab_layout = QVBoxLayout() + self.recon_tab_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + self.recon_tab_layout.setContentsMargins(0, 0, 0, 0) + self.recon_tab_layout.setSpacing(0) + self.recon_tab_widget.setLayout(self.recon_tab_layout) + self.recon_tab_mainScrollArea.setWidget(self.recon_tab_widget) + + # Top Section Group - Data ############################## + group_box_Data_groupBox_widget = QGroupBox("Data") + group_box_Data_layout = QVBoxLayout() + group_box_Data_layout.setContentsMargins(0, 5, 0, 0) + group_box_Data_layout.setSpacing(0) + group_box_Data_groupBox_widget.setLayout(group_box_Data_layout) + + # Input Data ############################## + self.data_input_widget = QWidget() + self.data_input_widget_layout = QHBoxLayout() + self.data_input_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.data_input_widget.setLayout(self.data_input_widget_layout) + + self.data_input_Label = widgets.Label(value="Input Store") + # self.data_input_Label.native.setMinimumWidth(97) + self.data_input_LineEdit = widgets.LineEdit(value=self.input_directory) + self.data_input_PushButton = widgets.PushButton(label="Browse") + # self.data_input_PushButton.native.setMinimumWidth(75) + self.data_input_PushButton.clicked.connect(self.browse_dir_path_input) + self.data_input_LineEdit.changed.connect( + self.read_and_set_input_path_on_validation + ) + + self.data_input_widget_layout.addWidget(self.data_input_Label.native) + self.data_input_widget_layout.addWidget( + self.data_input_LineEdit.native + ) + self.data_input_widget_layout.addWidget( + self.data_input_PushButton.native + ) + + # Output Data ############################## + self.data_output_widget = QWidget() + self.data_output_widget_layout = QHBoxLayout() + self.data_output_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.data_output_widget.setLayout(self.data_output_widget_layout) + + self.data_output_Label = widgets.Label(value="Output Directory") + self.data_output_LineEdit = widgets.LineEdit( + value=self.output_directory + ) + self.data_output_PushButton = widgets.PushButton(label="Browse") + # self.data_output_PushButton.native.setMinimumWidth(75) + self.data_output_PushButton.clicked.connect( + self.browse_dir_path_output + ) + self.data_output_LineEdit.changed.connect( + self.read_and_set_out_path_on_validation + ) + + self.data_output_widget_layout.addWidget(self.data_output_Label.native) + self.data_output_widget_layout.addWidget( + self.data_output_LineEdit.native + ) + self.data_output_widget_layout.addWidget( + self.data_output_PushButton.native + ) + + self.data_input_Label.native.setMinimumWidth(115) + self.data_output_Label.native.setMinimumWidth(115) + + group_box_Data_layout.addWidget(self.data_input_widget) + group_box_Data_layout.addWidget(self.data_output_widget) + self.recon_tab_layout.addWidget(group_box_Data_groupBox_widget) + + ################################## + + # Middle Section - Models ############################## + # Selection modes, New, Load, Clear + # Pydantic Models ScrollArea + + group_box_Models_groupBox_widget = QGroupBox("Models") + group_box_Models_layout = QVBoxLayout() + group_box_Models_layout.setContentsMargins(0, 5, 0, 0) + group_box_Models_layout.setSpacing(0) + group_box_Models_groupBox_widget.setLayout(group_box_Models_layout) + + self.models_widget = QWidget() + self.models_widget_layout = QHBoxLayout() + self.models_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.models_widget.setLayout(self.models_widget_layout) + + self.modes_selected = OPTION_TO_MODEL_DICT.copy() + + # Make a copy of the Reconstruction settings mode, these will be used as template + for mode in self.modes_selected.keys(): + self.modes_selected[mode]["setting"] = None + + # Checkboxes for the modes to select single or combination of modes + for mode in self.modes_selected.keys(): + self.modes_selected[mode]["Checkbox"] = widgets.Checkbox( + name=mode, label=mode + ) + self.models_widget_layout.addWidget( + self.modes_selected[mode]["Checkbox"].native + ) + + # PushButton to create a copy of the model - UI + self.models_new_PushButton = widgets.PushButton(label="New") + # self.models_new_PushButton.native.setMinimumWidth(100) + self.models_new_PushButton.clicked.connect(self.build_acq_contols) + + self.models_load_PushButton = DropButton(text="Load", recon_tab=self) + # self.models_load_PushButton.setMinimumWidth(90) + + # Passing model location label to model location selector + self.models_load_PushButton.clicked.connect( + lambda: self.browse_dir_path_model() + ) + + # PushButton to clear all copies of models that are create for UI + self.models_clear_PushButton = widgets.PushButton(label="Clear") + # self.models_clear_PushButton.native.setMinimumWidth(110) + self.models_clear_PushButton.clicked.connect(self.clear_all_models) + + self.models_widget_layout.addWidget(self.models_new_PushButton.native) + self.models_widget_layout.addWidget(self.models_load_PushButton) + self.models_widget_layout.addWidget( + self.models_clear_PushButton.native + ) + + # Middle scrollable component which will hold Editable/(vertical) Expanding UI + self.models_scrollArea = QScrollArea() + self.models_scrollArea.setWidgetResizable(True) + self.models_container_widget = DropWidget(self) + self.models_container_widget_layout = QVBoxLayout() + self.models_container_widget_layout.setContentsMargins(0, 0, 0, 0) + self.models_container_widget_layout.setSpacing(2) + self.models_container_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.models_container_widget.setLayout( + self.models_container_widget_layout + ) + self.models_scrollArea.setWidget(self.models_container_widget) + + group_box_Models_layout.addWidget(self.models_widget) + group_box_Models_layout.addWidget(self.models_scrollArea) + + ################################## + + # Create the splitter to resize Middle and Bottom Sections if required ################################## + splitter = QSplitter() + splitter.setOrientation(Qt.Orientation.Vertical) + splitter.setSizes([600, 200]) + + self.recon_tab_layout.addWidget(splitter) + + # Reconstruction ################################## + # Run, Processing, On-The-Fly + group_box_Reconstruction_groupBox_widget = QGroupBox( + "Reconstruction Queue" + ) + group_box_Reconstruction_layout = QVBoxLayout() + group_box_Reconstruction_layout.setContentsMargins(5, 10, 5, 5) + group_box_Reconstruction_layout.setSpacing(2) + group_box_Reconstruction_groupBox_widget.setLayout( + group_box_Reconstruction_layout + ) + + splitter.addWidget(group_box_Models_groupBox_widget) + splitter.addWidget(group_box_Reconstruction_groupBox_widget) + + my_splitter_handle = splitter.handle(1) + my_splitter_handle.setStyleSheet("background: 1px rgb(128,128,128);") + splitter.setStyleSheet( + """QSplitter::handle:pressed {background-color: #ca5;}""" + ) + + # PushButton to validate and Run the yaml file(s) based on selection against the Input store + self.reconstruction_run_PushButton = widgets.PushButton( + name="RUN Model" + ) + self.reconstruction_run_PushButton.native.setMinimumWidth(100) + self.reconstruction_run_PushButton.clicked.connect( + self.build_model_and_run + ) + + group_box_Reconstruction_layout.addWidget( + self.reconstruction_run_PushButton.native + ) + + # Tabs - Processing & On-The-Fly + tabs_Reconstruction = QTabWidget() + group_box_Reconstruction_layout.addWidget(tabs_Reconstruction) + + # Table for Jobs processing entries + tab1_processing_widget = QWidget() + tab1_processing_widget_layout = QVBoxLayout() + tab1_processing_widget_layout.setContentsMargins(5, 5, 5, 5) + tab1_processing_widget_layout.setSpacing(2) + tab1_processing_widget.setLayout(tab1_processing_widget_layout) + self.proc_table_QFormLayout = QFormLayout() + self.proc_table_QFormLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + tab1_processing_form_widget = QWidget() + tab1_processing_form_widget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + tab1_processing_form_widget.setLayout(self.proc_table_QFormLayout) + tab1_processing_widget_layout.addWidget(tab1_processing_form_widget) + + _clear_results_btn = widgets.PushButton(label="Clear Results") + _clear_results_btn.clicked.connect(self.clear_results_table) + tab1_processing_widget_layout.addWidget(_clear_results_btn.native) + + # Table for On-The-Fly processing entries + tab2_processing_widget = QWidget() + tab2_processing_widget_layout = QVBoxLayout() + tab2_processing_widget_layout.setContentsMargins(0, 0, 0, 0) + tab2_processing_widget_layout.setSpacing(0) + tab2_processing_widget.setLayout(tab2_processing_widget_layout) + self.proc_OTF_table_QFormLayout = QFormLayout() + self.proc_OTF_table_QFormLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + _proc_OTF_table_widget = QWidget() + _proc_OTF_table_widget.setSizePolicy( + QSizePolicy.Expanding, QSizePolicy.Expanding + ) + _proc_OTF_table_widget.setLayout(self.proc_OTF_table_QFormLayout) + tab2_processing_widget_layout.addWidget(_proc_OTF_table_widget) + tab2_processing_widget.setMaximumHeight(100) + + tabs_Reconstruction.addTab(tab1_processing_widget, "Processing") + tabs_Reconstruction.addTab(tab2_processing_widget, "On-The-Fly") + + # Editable List holding pydantic class(es) as per user selection + self.pydantic_classes = list() + self.prev_model_settings = {} + self.index = 0 + self.pollData = False + + # Stores Model & Components values which cause validation failure - can be highlighted on the model field as Red + self.modelHighlighterVals = {} + + # handle napari's close widget and avoid starting a second server + if HAS_INSTANCE["val"]: + self.worker: MyWorker = HAS_INSTANCE["MyWorker"] + self.worker.set_new_instances( + self.proc_table_QFormLayout, self, self._ui + ) + else: + self.worker = MyWorker(self.proc_table_QFormLayout, self, self._ui) + HAS_INSTANCE["val"] = True + HAS_INSTANCE["MyWorker"] = self.worker + + self.app = QApplication.instance() + self.app.lastWindowClosed.connect( + self.myCloseEvent + ) # this line is connection to signal close + + ###################################################### + + # our defined close event since napari doesnt do + def myCloseEvent(self): + event = QEvent(QEvent.Type.Close) + self.closeEvent(event) + # self.app.exit() + + # on napari close - cleanup + def closeEvent(self, event): + if event.type() == QEvent.Type.Close: + self.worker.stop_server() + + def hideEvent(self, event): + if event.type() == QEvent.Type.Hide and ( + self._ui is not None and self._ui.isVisible() + ): + pass + + def showEvent(self, event): + if event.type() == QEvent.Type.Show: + pass + + def set_viewer(self, viewer): + self.viewer = viewer + + def show_dataset(self, data_path): + # Show reconstruction data + try: + if self.viewer is not None: + self.viewer.open(data_path, plugin="napari-ome-zarr") + except Exception as exc: + self.message_box(exc.args) + + def confirm_dialog(self, msg="Confirm your selection ?"): + qm = QMessageBox + ret = qm.question( + self.recon_tab_widget, + "Confirm", + msg, + qm.Yes | qm.No, + ) + if ret == qm.Yes: + return True + else: + return False + + # Copied from main_widget + # ToDo: utilize common functions + # Input data selector + def browse_dir_path_input(self): + if len(self.pydantic_classes) > 0 and not self.confirm_dialog( + "Changing Input Data will reset your models. Continue ?" + ): + return + else: + self.clear_all_models(silent=True) + try: + result = self.open_file_dialog( + self.input_directory, "dir", filter="ZARR Storage (*.zarr)" + ) + # .zarr is a folder but we could implement a filter to scan for "ending with" and present those if required + except Exception as exc: + self.message_box(exc.args) + return + + if result == "": + return + + self.data_input_LineEdit.value = result + + def browse_dir_path_output(self): + try: + result = self.open_file_dialog(self.output_directory, "dir") + except Exception as exc: + self.message_box(exc.args) + return + + if result == "": + return + + if not Path(result).exists(): + self.message_box("Output Directory path must exist !") + return + + self.data_output_LineEdit.value = result + + def browse_dir_path_inputBG(self, elem): + result = self.open_file_dialog(self.directory, "dir") + if result == "": + return + + ret, ret_msg = self.validate_input_data(result, BG=True) + if not ret: + self.message_box(ret_msg) + return + + elem.value = result + + def validate_input_data( + self, input_data_folder: str, metadata=False, BG=False + ) -> bool: + try: + self.input_channel_names = [] + self.data_input_Label.value = "Input Store" + input_paths = Path(input_data_folder) + with open_ome_zarr(input_paths, mode="r") as dataset: + try: + self.input_channel_names = dataset.channel_names + self.data_input_Label.value = ( + "Input Store" + " " + _info_icon + ) + self.data_input_Label.tooltip = ( + "Channel Names:\n- " + + "\n- ".join(self.input_channel_names) + ) + except Exception as exc: + print(exc.args) + + try: + string_pos = [] + i = 0 + for pos_paths, pos in dataset.positions(): + string_pos.append(pos_paths) + if i == 0: + axes = pos.zgroup.attrs["multiscales"][0]["axes"] + string_array_n = [str(x["name"]) for x in axes] + string_array = [ + str(x) + for x in pos.zgroup.attrs["multiscales"][0][ + "datasets" + ][0]["coordinateTransformations"][0]["scale"] + ] + string_scale = [] + for i in range(len(string_array_n)): + string_scale.append( + "{n}={d}".format( + n=string_array_n[i], d=string_array[i] + ) + ) + txt = "\n\nScale: " + ", ".join(string_scale) + self.data_input_Label.tooltip += txt + i += 1 + txt = "\n\nFOV: " + ", ".join(string_pos) + self.data_input_Label.tooltip += txt + except Exception as exc: + print(exc.args) + + if not BG and metadata: + self.input_directory_dataset = dataset + + if not BG: + self.pollData = False + zattrs = dataset.zattrs + if self.is_dataset_acq_running(zattrs): + if self.confirm_dialog( + msg="This seems like an in-process Acquisition. Would you like to process data on-the-fly ?" + ): + self.pollData = True + + return True, MSG_SUCCESS + raise Exception( + "Dataset does not appear to be a valid ome-zarr storage" + ) + except Exception as exc: + return False, exc.args + + # call back for input LineEdit path changed manually + # include data validation + def read_and_set_input_path_on_validation(self): + if ( + self.data_input_LineEdit.value is None + or len(self.data_input_LineEdit.value) == 0 + ): + self.data_input_LineEdit.value = self.input_directory + self.message_box("Input data path cannot be empty") + return + if not Path(self.data_input_LineEdit.value).exists(): + self.data_input_LineEdit.value = self.input_directory + self.message_box("Input data path must point to a valid location") + return + + result = self.data_input_LineEdit.value + valid, ret_msg = self.validate_input_data(result) + + if valid: + self.directory = Path(result).parent.absolute() + self.current_dir_path = result + self.input_directory = result + + self.prev_model_settings = {} + + self.save_last_paths() + else: + self.data_input_LineEdit.value = self.input_directory + self.message_box(ret_msg) + + self.data_output_LineEdit.value = Path( + self.input_directory + ).parent.absolute() + + def read_and_set_out_path_on_validation(self): + if ( + self.data_output_LineEdit.value is None + or len(self.data_output_LineEdit.value) == 0 + ): + self.data_output_LineEdit.value = self.output_directory + self.message_box("Output data path cannot be empty") + return + if not Path(self.data_output_LineEdit.value).exists(): + self.data_output_LineEdit.value = self.output_directory + self.message_box("Output data path must point to a valid location") + return + + self.output_directory = self.data_output_LineEdit.value + + self.validate_model_output_paths() + + def validate_model_output_paths(self): + if len(self.pydantic_classes) > 0: + for model_item in self.pydantic_classes: + output_LineEdit = model_item["output_LineEdit"] + output_Button = model_item["output_Button"] + model_item["output_parent_dir"] = self.output_directory + + full_out_path = os.path.join( + Path(self.output_directory).absolute(), + output_LineEdit.value, + ) + model_item["output"] = full_out_path + + save_path_exists = ( + True if Path(full_out_path).exists() else False + ) + output_LineEdit.label = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + output_LineEdit.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + output_Button.text = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + output_Button.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + + def is_dataset_acq_running(self, zattrs: dict) -> bool: + """ + Checks the zattrs for CurrentDimensions & FinalDimensions key and tries to figure if + data acquisition is running + """ + + required_order = ["time", "position", "z", "channel"] + if "CurrentDimensions" in zattrs.keys(): + my_dict = zattrs["CurrentDimensions"] + sorted_dict_acq = { + k: my_dict[k] + for k in sorted(my_dict, key=lambda x: required_order.index(x)) + } + if "FinalDimensions" in zattrs.keys(): + my_dict = zattrs["FinalDimensions"] + sorted_dict_final = { + k: my_dict[k] + for k in sorted(my_dict, key=lambda x: required_order.index(x)) + } + if sorted_dict_acq != sorted_dict_final: + return True + return False + + # Output data selector + def browse_model_dir_path_output(self, elem): + result = self.open_file_dialog(self.output_directory, "save") + if result == "": + return + + save_path_exists = True if Path(result).exists() else False + elem.label = "Output Data:" + ( + "" if not save_path_exists else (" " + _validate_alert) + ) + elem.tooltip = "" if not save_path_exists else "Output file exists" + + elem.value = Path(result).name + + self.save_last_paths() + + # call back for output LineEdit path changed manually + def read_and_set_output_path_on_validation(self, elem1, elem2, save_path): + if elem1.value is None or len(elem1.value) == 0: + elem1.value = Path(save_path).name + + save_path = os.path.join( + Path(self.output_directory).absolute(), elem1.value + ) + + save_path_exists = True if Path(save_path).exists() else False + elem1.label = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + elem1.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + elem2.text = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + elem2.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + + self.save_last_paths() + + # Copied from main_widget + # ToDo: utilize common functions + # Output data selector + def browse_dir_path_model(self): + results = self.open_file_dialog( + self.directory, "files", filter="YAML Files (*.yml)" + ) # returns list + if len(results) == 0 or results == "": + return + + self.model_directory = str(Path(results[0]).parent.absolute()) + self.directory = self.model_directory + self.current_dir_path = self.model_directory + + self.save_last_paths() + self.open_model_files(results) + + def open_model_files(self, results: List): + pydantic_models = list() + for result in results: + self.yaml_model_file = result + + with open(result, "r") as yaml_in: + yaml_object = utils.yaml.safe_load( + yaml_in + ) # yaml_object will be a list or a dict + jsonString = json.dumps(self.convert(yaml_object)) + json_out = json.loads(jsonString) + json_dict = dict(json_out) + + selected_modes = list(OPTION_TO_MODEL_DICT.copy().keys()) + exclude_modes = list(OPTION_TO_MODEL_DICT.copy().keys()) + + for k in range(len(selected_modes) - 1, -1, -1): + if selected_modes[k] in json_dict.keys(): + exclude_modes.pop(k) + else: + selected_modes.pop(k) + + pruned_pydantic_class, ret_msg = self.build_model(selected_modes) + if pruned_pydantic_class is None: + self.message_box(ret_msg) + return + + pydantic_model, ret_msg = self.get_model_from_file( + self.yaml_model_file + ) + if pydantic_model is None: + if ( + isinstance(ret_msg, List) + and len(ret_msg) == 2 + and len(ret_msg[0]["loc"]) == 3 + and ret_msg[0]["loc"][2] == "background_path" + ): + pydantic_model = pruned_pydantic_class # if only background_path fails validation + json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] = "" + self.message_box( + "background_path:\nPath was invalid and will be reset" + ) + else: + self.message_box(ret_msg) + return + else: + # make sure "background_path" is valid + bg_loc = json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] + if bg_loc != "": + extension = os.path.splitext(bg_loc)[1] + if len(extension) > 0: + bg_loc = Path( + os.path.join( + str(Path(bg_loc).parent.absolute()), + "background.zarr", + ) + ) + else: + bg_loc = Path(os.path.join(bg_loc, "background.zarr")) + if not bg_loc.exists() or not self.validate_input_data( + str(bg_loc) + ): + self.message_box( + "background_path:\nPwas invalid and will be reset" + ) + json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] = "" + else: + json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] = str(bg_loc.parent.absolute()) + + pydantic_model = self.create_acq_contols2( + selected_modes, exclude_modes, pydantic_model, json_dict + ) + if pydantic_model is None: + self.message_box("Error - pydantic model returned None") + return + + pydantic_models.append(pydantic_model) + + return pydantic_models + + # useful when using close widget and not napari close and we might need them again + def save_last_paths(self): + HAS_INSTANCE["current_dir_path"] = self.current_dir_path + HAS_INSTANCE["input_directory"] = self.input_directory + HAS_INSTANCE["output_directory"] = self.output_directory + HAS_INSTANCE["model_directory"] = self.model_directory + HAS_INSTANCE["yaml_model_file"] = self.yaml_model_file + + # clears the results table + def clear_results_table(self): + index = self.proc_table_QFormLayout.rowCount() + if index < 1: + self.message_box("There are no processing results to clear !") + return + if self.confirm_dialog(): + for i in range(self.proc_table_QFormLayout.rowCount()): + self.proc_table_QFormLayout.removeRow(0) + + def remove_row(self, row, expID): + try: + if row < self.proc_table_QFormLayout.rowCount(): + widgetItem = self.proc_table_QFormLayout.itemAt(row) + if widgetItem is not None: + name_widget = widgetItem.widget() + toolTip_string = str(name_widget.toolTip) + if expID in toolTip_string: + self.proc_table_QFormLayout.removeRow( + row + ) # removeRow vs takeRow for threads ? + except Exception as exc: + print(exc.args) + + # marks fields on the Model that cause a validation error + def model_highlighter(self, errs): + try: + for uid in errs.keys(): + self.modelHighlighterVals[uid] = {} + container = errs[uid]["cls"] + self.modelHighlighterVals[uid]["errs"] = errs[uid]["errs"] + self.modelHighlighterVals[uid]["items"] = [] + self.modelHighlighterVals[uid]["tooltip"] = [] + if len(errs[uid]["errs"]) > 0: + self.model_highlighter_setter( + errs[uid]["errs"], container, uid + ) + except Exception as exc: + print(exc.args) + # more of a test feature - no need to show up + + # format all model errors into a display format for napari error message box + def format_string_for_error_display(self, errs): + try: + ret_str = "" + for uid in errs.keys(): + if len(errs[uid]["errs"]) > 0: + ret_str += errs[uid]["collapsibleBox"] + "\n" + for idx in range(len(errs[uid]["errs"])): + ret_str += f"{'>'.join(errs[uid]['errs'][idx]['loc'])}:\n{errs[uid]['errs'][idx]['msg']} \n" + ret_str += "\n" + return ret_str + except Exception as exc: + return ret_str + + # recursively fix the container for highlighting + def model_highlighter_setter( + self, errs, container: Container, containerID, lev=0 + ): + try: + layout = container.native.layout() + for i in range(layout.count()): + item = layout.itemAt(i) + if item.widget(): + widget = layout.itemAt(i).widget() + if ( + ( + not isinstance(widget._magic_widget, CheckBox) + and not isinstance( + widget._magic_widget, PushButton + ) + ) + and not isinstance(widget._magic_widget, LineEdit) + and isinstance( + widget._magic_widget._inner_widget, Container + ) + and not (widget._magic_widget._inner_widget is None) + ): + self.model_highlighter_setter( + errs, + widget._magic_widget._inner_widget, + containerID, + lev + 1, + ) + else: + for idx in range(len(errs)): + if len(errs[idx]["loc"]) - 1 < lev: + pass + elif ( + isinstance(widget._magic_widget, CheckBox) + or isinstance(widget._magic_widget, LineEdit) + or isinstance(widget._magic_widget, PushButton) + ): + if widget._magic_widget.label == errs[idx][ + "loc" + ][lev].replace("_", " "): + if widget._magic_widget.tooltip is None: + widget._magic_widget.tooltip = "-\n" + self.modelHighlighterVals[containerID][ + "items" + ].append(widget._magic_widget) + self.modelHighlighterVals[containerID][ + "tooltip" + ].append(widget._magic_widget.tooltip) + widget._magic_widget.tooltip += ( + errs[idx]["msg"] + "\n" + ) + widget._magic_widget.native.setStyleSheet( + "border:1px solid rgb(255, 255, 0); border-width: 1px;" + ) + elif ( + widget._magic_widget._label_widget.value + == errs[idx]["loc"][lev].replace("_", " ") + ): + if ( + widget._magic_widget._label_widget.tooltip + is None + ): + widget._magic_widget._label_widget.tooltip = ( + "-\n" + ) + self.modelHighlighterVals[containerID][ + "items" + ].append( + widget._magic_widget._label_widget + ) + self.modelHighlighterVals[containerID][ + "tooltip" + ].append( + widget._magic_widget._label_widget.tooltip + ) + widget._magic_widget._label_widget.tooltip += ( + errs[idx]["msg"] + "\n" + ) + widget._magic_widget._label_widget.native.setStyleSheet( + "border:1px solid rgb(255, 255, 0); border-width: 1px;" + ) + if ( + widget._magic_widget._inner_widget.tooltip + is None + ): + widget._magic_widget._inner_widget.tooltip = ( + "-\n" + ) + self.modelHighlighterVals[containerID][ + "items" + ].append( + widget._magic_widget._inner_widget + ) + self.modelHighlighterVals[containerID][ + "tooltip" + ].append( + widget._magic_widget._inner_widget.tooltip + ) + widget._magic_widget._inner_widget.tooltip += ( + errs[idx]["msg"] + "\n" + ) + widget._magic_widget._inner_widget.native.setStyleSheet( + "border:1px solid rgb(255, 255, 0); border-width: 1px;" + ) + except Exception as exc: + print(exc.args) + + # recursively fix the container for highlighting + def model_reset_highlighter_setter(self): + try: + for containerID in self.modelHighlighterVals.keys(): + items = self.modelHighlighterVals[containerID]["items"] + tooltip = self.modelHighlighterVals[containerID]["tooltip"] + i = 0 + for widItem in items: + widItem.native.setStyleSheet( + "border:1px solid rgb(0, 0, 0); border-width: 0px;" + ) + widItem.tooltip = tooltip[i] + i += 1 + + except Exception as exc: + print(exc.args) + + except Exception as exc: + print(exc.args) + + # passes msg to napari notifications + def message_box(self, msg, type="exc"): + if len(msg) > 0: + try: + json_object = msg + json_txt = "" + for err in json_object: + json_txt = ( + json_txt + + "Loc: {loc}\nMsg:{msg}\nType:{type}\n\n".format( + loc=err["loc"], msg=err["msg"], type=err["type"] + ) + ) + json_txt = str(json_txt) + # ToDo: format it better + # formatted txt does not show up properly in msg-box ?? + except: + json_txt = str(msg) + + # show is a message box + if self.stand_alone: + self.message_box_stand_alone(json_txt) + else: + if type == "exc": + notifications.show_error(json_txt) + else: + notifications.show_info(json_txt) + + def message_box_stand_alone(self, msg): + q = QMessageBox( + QMessageBox.Warning, + "Message", + str(msg), + parent=self.recon_tab_widget, + ) + q.setStandardButtons(QMessageBox.StandardButton.Ok) + q.setIcon(QMessageBox.Icon.Warning) + q.exec_() + + def cancel_job(self, btn: PushButton): + if self.confirm_dialog(): + btn.enabled = False + btn.text = btn.text + " (cancel called)" + + def add_widget( + self, parentLayout: QVBoxLayout, expID, jID, table_entry_ID="", pos="" + ): + + jID = str(jID) + _cancelJobBtntext = "Cancel Job {jID} ({posName})".format( + jID=jID, posName=pos + ) + _cancelJobButton = widgets.PushButton( + name="JobID", label=_cancelJobBtntext, enabled=True, value=False + ) + _cancelJobButton.clicked.connect( + lambda: self.cancel_job(_cancelJobButton) + ) + _txtForInfoBox = "Updating {id}-{pos}: Please wait... \nJobID assigned: {jID} ".format( + id=table_entry_ID, jID=jID, pos=pos + ) + _scrollAreaCollapsibleBoxDisplayWidget = ScrollableLabel( + text=_txtForInfoBox + ) + + _scrollAreaCollapsibleBoxWidgetLayout = QVBoxLayout() + _scrollAreaCollapsibleBoxWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + _cancelJobButton.native + ) + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + _scrollAreaCollapsibleBoxDisplayWidget + ) + + _scrollAreaCollapsibleBoxWidget = QWidget() + _scrollAreaCollapsibleBoxWidget.setLayout( + _scrollAreaCollapsibleBoxWidgetLayout + ) + _scrollAreaCollapsibleBox = QScrollArea() + _scrollAreaCollapsibleBox.setWidgetResizable(True) + _scrollAreaCollapsibleBox.setMinimumHeight(300) + _scrollAreaCollapsibleBox.setWidget(_scrollAreaCollapsibleBoxWidget) + + _collapsibleBoxWidgetLayout = QVBoxLayout() + _collapsibleBoxWidgetLayout.addWidget(_scrollAreaCollapsibleBox) + + _collapsibleBoxWidget = CollapsibleBox( + table_entry_ID + " - " + pos + ) # tableEntryID, tableEntryShortDesc - should update with processing status + _collapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _collapsibleBoxWidget.setContentLayout(_collapsibleBoxWidgetLayout) + + parentLayout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + parentLayout.addWidget(_collapsibleBoxWidget) + + MULTI_JOBS_REFS[expID + jID] = {} + MULTI_JOBS_REFS[expID + jID]["cancelBtn"] = _cancelJobButton + MULTI_JOBS_REFS[expID + jID][ + "infobox" + ] = _scrollAreaCollapsibleBoxDisplayWidget + NEW_WIDGETS_QUEUE.remove(expID + jID) + + def add_table_entry_job(self, proc_params): + + tableEntryID = proc_params["tableEntryID"] + parentLayout: QVBoxLayout = proc_params["parent_layout"] + + _cancelJobButton = widgets.PushButton( + name="JobID", label="Cancel Job", value=False, enabled=False + ) + _cancelJobButton.clicked.connect( + lambda: self.cancel_job(_cancelJobButton) + ) + _txtForInfoBox = "Updating {id}: Please wait...".format( + id=tableEntryID + ) + _scrollAreaCollapsibleBoxDisplayWidget = ScrollableLabel( + text=_txtForInfoBox + ) + _scrollAreaCollapsibleBoxDisplayWidget.setFixedHeight(300) + + proc_params["table_entry_infoBox"] = ( + _scrollAreaCollapsibleBoxDisplayWidget + ) + proc_params["cancelJobButton"] = _cancelJobButton + parentLayout.addWidget(_cancelJobButton.native) + parentLayout.addWidget(_scrollAreaCollapsibleBoxDisplayWidget) + + return proc_params + + def add_remove_check_OTF_table_entry( + self, OTF_dir_path, bool_msg, do_check=False + ): + if do_check: + try: + for row in range(self.proc_OTF_table_QFormLayout.rowCount()): + widgetItem = self.proc_OTF_table_QFormLayout.itemAt(row) + if widgetItem is not None: + name_widget: QWidget = widgetItem.widget() + name_string = str(name_widget.objectName()) + if OTF_dir_path in name_string: + for item in name_widget.findChildren(QPushButton): + _poll_Stop_PushButton: QPushButton = item + return _poll_Stop_PushButton.isChecked() + return False + except Exception as exc: + print(exc.args) + return False + else: + if bool_msg: + _poll_otf_label = ScrollableLabel( + text=OTF_dir_path + " " + _green_dot + ) + _poll_Stop_PushButton = QPushButton("Stop") + _poll_Stop_PushButton.setCheckable( + True + ) # Make the button checkable + _poll_Stop_PushButton.clicked.connect( + lambda: self.stop_OTF_push_button_call( + _poll_otf_label, OTF_dir_path + " " + _red_dot + ) + ) + + _poll_data_widget = QWidget() + _poll_data_widget.setObjectName(OTF_dir_path) + _poll_data_widget_layout = QHBoxLayout() + _poll_data_widget.setLayout(_poll_data_widget_layout) + _poll_data_widget_layout.addWidget(_poll_otf_label) + _poll_data_widget_layout.addWidget(_poll_Stop_PushButton) + + self.proc_OTF_table_QFormLayout.insertRow(0, _poll_data_widget) + else: + try: + for row in range( + self.proc_OTF_table_QFormLayout.rowCount() + ): + widgetItem = self.proc_OTF_table_QFormLayout.itemAt( + row + ) + if widgetItem is not None: + name_widget: QWidget = widgetItem.widget() + name_string = str(name_widget.objectName()) + if OTF_dir_path in name_string: + self.proc_OTF_table_QFormLayout.removeRow(row) + except Exception as exc: + print(exc.args) + + def stop_OTF_push_button_call(self, label, txt): + _poll_otf_label: QLabel = label + _poll_otf_label.setText(txt) + self.setDisabled(True) + + # adds processing entry to _qwidgetTabEntry_layout as row item + # row item will be purged from table as processing finishes + # there could be 3 tabs for this processing table status + # Running, Finished, Errored + def addTableEntry( + self, table_entry_ID, table_entry_short_desc, proc_params + ): + _scrollAreaCollapsibleBoxWidgetLayout = QVBoxLayout() + _scrollAreaCollapsibleBoxWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidget = QWidget() + _scrollAreaCollapsibleBoxWidget.setLayout( + _scrollAreaCollapsibleBoxWidgetLayout + ) + _scrollAreaCollapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + + _scrollAreaCollapsibleBox = QScrollArea() + _scrollAreaCollapsibleBox.setWidgetResizable(True) + _scrollAreaCollapsibleBox.setWidget(_scrollAreaCollapsibleBoxWidget) + _scrollAreaCollapsibleBox.setMinimumHeight(300) + _scrollAreaCollapsibleBox.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + + _collapsibleBoxWidgetLayout = QVBoxLayout() + _collapsibleBoxWidgetLayout.addWidget(_scrollAreaCollapsibleBox) + + _collapsibleBoxWidget = CollapsibleBox(table_entry_ID) + _collapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _collapsibleBoxWidget.setContentLayout(_collapsibleBoxWidgetLayout) + + _expandingTabEntryWidgetLayout = QVBoxLayout() + _expandingTabEntryWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + _expandingTabEntryWidgetLayout.addWidget(_collapsibleBoxWidget) + + _expandingTabEntryWidget = QWidget() + _expandingTabEntryWidget.toolTip = table_entry_short_desc + _expandingTabEntryWidget.setLayout(_expandingTabEntryWidgetLayout) + _expandingTabEntryWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + + proc_params["tableEntryID"] = table_entry_ID + proc_params["parent_layout"] = _scrollAreaCollapsibleBoxWidgetLayout + proc_params = self.add_table_entry_job(proc_params) + + # instead of adding, insert at 0 to keep latest entry on top + # self.proc_table_QFormLayout.addRow(_expandingTabEntryWidget) + self.proc_table_QFormLayout.insertRow(0, _expandingTabEntryWidget) + + proc_params["table_layout"] = self.proc_table_QFormLayout + proc_params["table_entry"] = _expandingTabEntryWidget + + self.worker.run_in_pool(proc_params) + # result = self.worker.getResult(proc_params["exp_id"]) + # print(result) + + # Builds the model as required + def build_model(self, selected_modes): + try: + birefringence = None + phase = None + fluorescence = None + chNames = ["State0"] + exclude_modes = ["birefringence", "phase", "fluorescence"] + if "birefringence" in selected_modes and "phase" in selected_modes: + birefringence = settings.BirefringenceSettings() + phase = settings.PhaseSettings() + chNames = ["State0", "State1", "State2", "State3"] + exclude_modes = ["fluorescence"] + elif "birefringence" in selected_modes: + birefringence = settings.BirefringenceSettings() + chNames = ["State0", "State1", "State2", "State3"] + exclude_modes = ["fluorescence", "phase"] + elif "phase" in selected_modes: + phase = settings.PhaseSettings() + chNames = ["BF"] + exclude_modes = ["birefringence", "fluorescence"] + elif "fluorescence" in selected_modes: + fluorescence = settings.FluorescenceSettings() + chNames = ["FL"] + exclude_modes = ["birefringence", "phase"] + + model = None + try: + model = settings.ReconstructionSettings( + input_channel_names=chNames, + birefringence=birefringence, + phase=phase, + fluorescence=fluorescence, + ) + except ValidationError as exc: + # use v1 and v2 differ for ValidationError - newer one is not caught properly + return None, exc.errors() + + model = self.fix_model( + model, exclude_modes, "input_channel_names", chNames + ) + return model, "+".join(selected_modes) + ": MSG_SUCCESS" + + except Exception as exc: + return None, exc.args + + # ToDo: Temporary fix to over ride the 'input_channel_names' default value + # Needs revisitation + def fix_model(self, model, exclude_modes, attr_key, attr_val): + try: + for mode in exclude_modes: + model = settings.ReconstructionSettings.copy( + model, + exclude={mode}, + deep=True, + update={attr_key: attr_val}, + ) + settings.ReconstructionSettings.__setattr__( + model, attr_key, attr_val + ) + if hasattr(model, attr_key): + model.__fields__[attr_key].default = attr_val + model.__fields__[attr_key].field_info.default = attr_val + except Exception as exc: + return print(exc.args) + return model + + # Creates UI controls from model based on selections + def build_acq_contols(self): + + # Make a copy of selections and unsed for deletion + selected_modes = [] + exclude_modes = [] + + for mode in self.modes_selected.keys(): + enabled = self.modes_selected[mode]["Checkbox"].value + if not enabled: + exclude_modes.append(mode) + else: + selected_modes.append(mode) + + self.create_acq_contols2(selected_modes, exclude_modes) + + def create_acq_contols2( + self, + selected_modes, + exclude_modes, + my_loaded_model=None, + json_dict=None, + ): + # duplicate settings from the prev model on new model creation + if json_dict is None and len(self.pydantic_classes) > 0: + ret = self.build_model_and_run( + validate_return_prev_model_json_txt=True + ) + if ret is None: + return + key, json_txt = ret + self.prev_model_settings[key] = json.loads(json_txt) + if json_dict is None: + key = "-".join(selected_modes) + if key in self.prev_model_settings.keys(): + json_dict = self.prev_model_settings[key] + + # initialize the top container and specify what pydantic class to map from + if my_loaded_model is not None: + pydantic_class = my_loaded_model + else: + pydantic_class, ret_msg = self.build_model(selected_modes) + if pydantic_class is None: + self.message_box(ret_msg) + return + + # Final constant UI val and identifier + _idx: Final[int] = self.index + _str: Final[str] = str(uuid.uuid4()) + + # Container holding the pydantic UI components + # Multiple instances/copies since more than 1 might be created + recon_pydantic_container = widgets.Container( + name=_str, scrollable=False + ) + + self.add_pydantic_to_container( + pydantic_class, recon_pydantic_container, exclude_modes, json_dict + ) + + # Run a validation check to see if the selected options are permitted + # before we create the GUI + # get the kwargs from the container/class + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + recon_pydantic_container, + pydantic_class, + pydantic_kwargs, + exclude_modes, + ) + if pydantic_kwargs is None: + self.message_box(ret_msg) + return + + # For list element, this needs to be cleaned and parsed back as an array + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None: + self.message_box(ret_msg) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.message_box(ret_msg) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + pydantic_class, pydantic_kwargs + ) + if pydantic_model is None: + self.message_box(ret_msg) + return + + # generate a json from the instantiated model, update the json_display + # most of this will end up in a table as processing proceeds + json_txt, ret_msg = self.validate_and_return_json(pydantic_model) + if json_txt is None: + self.message_box(ret_msg) + return + + # PushButton to delete a UI container + # Use case when a wrong selection of input modes get selected eg Bire+Fl + # Preferably this root level validation should occur before values arevalidated + # in order to display and avoid this to occur + _del_button = widgets.PushButton(name="Delete Model") + + c_mode = "-and-".join(selected_modes) + c_mode_short = "".join( + item[:3].capitalize() for item in selected_modes + ) + if c_mode in CONTAINERS_INFO.keys(): + CONTAINERS_INFO[c_mode] += 1 + else: + CONTAINERS_INFO[c_mode] = 1 + num_str = "{:02d}".format(CONTAINERS_INFO[c_mode]) + c_mode_str = f"{c_mode} - {num_str}" + + # Output Data location + # These could be multiple based on user selection for each model + # Inherits from Input by default at creation time + name_without_ext = os.path.splitext(Path(self.input_directory).name)[0] + save_path = os.path.join( + Path(self.output_directory).absolute(), + ( + name_without_ext + + ("_" + c_mode_short + "_" + num_str) + + ".zarr" + ), + ) + save_path_exists = True if Path(save_path).exists() else False + _output_data_loc = widgets.LineEdit( + value=Path(save_path).name, + tooltip=( + "" + if not save_path_exists + else (_validate_alert + " Output file exists") + ), + ) + _output_data_btn = widgets.PushButton( + text=("" if not save_path_exists else (_validate_alert + " ")) + + "Output Data:", + tooltip=( + "" + if not save_path_exists + else (_validate_alert + " Output file exists") + ), + ) + + # Passing location label to output location selector + _output_data_btn.clicked.connect( + lambda: self.browse_model_dir_path_output(_output_data_loc) + ) + _output_data_loc.changed.connect( + lambda: self.read_and_set_output_path_on_validation( + _output_data_loc, _output_data_btn, save_path + ) + ) + + _show_CheckBox = widgets.CheckBox( + name="Show after Reconstruction", value=True + ) + _show_CheckBox.max_width = 200 + _rx_Label = widgets.Label(value="rx") + _rx_LineEdit = widgets.LineEdit(name="rx", value=1) + _rx_LineEdit.max_width = 50 + _validate_button = widgets.PushButton(name="Validate") + + # Passing all UI components that would be deleted + _expandingTabEntryWidget = QWidget() + _del_button.clicked.connect( + lambda: self.delete_model( + _expandingTabEntryWidget, + recon_pydantic_container.native, + _output_data_loc.native, + _output_data_btn.native, + _show_CheckBox.native, + _validate_button.native, + _del_button.native, + _str, + ) + ) + + # HBox for Output Data + _hBox_widget = QWidget() + _hBox_layout = QHBoxLayout() + _hBox_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + _hBox_widget.setLayout(_hBox_layout) + _hBox_layout.addWidget(_output_data_btn.native) + _hBox_layout.addWidget(_output_data_loc.native) + + # Add this container to the main scrollable widget + _scrollAreaCollapsibleBoxWidgetLayout = QVBoxLayout() + _scrollAreaCollapsibleBoxWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidget = MyWidget() + _scrollAreaCollapsibleBoxWidget.setLayout( + _scrollAreaCollapsibleBoxWidgetLayout + ) + + _scrollAreaCollapsibleBox = QScrollArea() + _scrollAreaCollapsibleBox.setWidgetResizable(True) + _scrollAreaCollapsibleBox.setWidget(_scrollAreaCollapsibleBoxWidget) + + _collapsibleBoxWidgetLayout = QVBoxLayout() + _collapsibleBoxWidgetLayout.setAlignment(Qt.AlignmentFlag.AlignTop) + + scrollbar = _scrollAreaCollapsibleBox.horizontalScrollBar() + _scrollAreaCollapsibleBoxWidget.resized.connect( + lambda: self.check_scrollbar_visibility(scrollbar) + ) + + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + scrollbar, alignment=Qt.AlignmentFlag.AlignTop + ) # Place at the top + + _collapsibleBoxWidgetLayout.addWidget(_scrollAreaCollapsibleBox) + + _collapsibleBoxWidget = CollapsibleBox( + c_mode_str + ) # tableEntryID, tableEntryShortDesc - should update with processing status + + _validate_button.clicked.connect( + lambda: self.validate_model(_str, _collapsibleBoxWidget) + ) + + _hBox_widget2 = QWidget() + _hBox_layout2 = QHBoxLayout() + _hBox_layout2.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + _hBox_widget2.setLayout(_hBox_layout2) + _hBox_layout2.addWidget(_show_CheckBox.native) + _hBox_layout2.addWidget(_validate_button.native) + _hBox_layout2.addWidget(_del_button.native) + _hBox_layout2.addWidget(_rx_Label.native) + _hBox_layout2.addWidget(_rx_LineEdit.native) + + _expandingTabEntryWidgetLayout = QVBoxLayout() + _expandingTabEntryWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + _expandingTabEntryWidgetLayout.addWidget(_collapsibleBoxWidget) + + _expandingTabEntryWidget.toolTip = c_mode_str + _expandingTabEntryWidget.setLayout(_expandingTabEntryWidgetLayout) + _expandingTabEntryWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _expandingTabEntryWidget.layout().setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + recon_pydantic_container.native + ) + _scrollAreaCollapsibleBoxWidgetLayout.addWidget(_hBox_widget) + _scrollAreaCollapsibleBoxWidgetLayout.addWidget(_hBox_widget2) + + _scrollAreaCollapsibleBox.setMinimumHeight( + _scrollAreaCollapsibleBoxWidgetLayout.sizeHint().height() + 20 + ) + _collapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _collapsibleBoxWidget.setContentLayout(_collapsibleBoxWidgetLayout) + + self.models_container_widget_layout.addWidget(_expandingTabEntryWidget) + + # Store a copy of the pydantic container along with all its associated components and properties + # We dont needs a copy of the class but storing for now + # This will be used for making deletion edits and looping to create our final run output + # uuid - used for identiying in editable list + self.pydantic_classes.append( + { + "uuid": _str, + "c_mode_str": c_mode_str, + "collapsibleBoxWidget": _collapsibleBoxWidget, + "class": pydantic_class, + "input": self.data_input_LineEdit, + "output": os.path.join( + Path(self.output_directory).absolute(), + _output_data_loc.value, + ), + "output_parent_dir": str( + Path(self.output_directory).absolute() + ), + "output_LineEdit": _output_data_loc, + "output_Button": _output_data_btn, + "container": recon_pydantic_container, + "selected_modes": selected_modes.copy(), + "exclude_modes": exclude_modes.copy(), + "poll_data": self.pollData, + "show": _show_CheckBox, + "rx": _rx_LineEdit, + } + ) + self.index += 1 + + if self.index > 1: + self.reconstruction_run_PushButton.text = "RUN {n} Models".format( + n=self.index + ) + else: + self.reconstruction_run_PushButton.text = "RUN Model" + + return pydantic_model + + def check_scrollbar_visibility(self, scrollbar): + h_scrollbar = scrollbar + + # Hide scrollbar if not needed + h_scrollbar.setVisible(h_scrollbar.maximum() > h_scrollbar.minimum()) + + def validate_model(self, _str, _collapsibleBoxWidget): + i = 0 + model_entry_item = None + for item in self.pydantic_classes: + if item["uuid"] == _str: + model_entry_item = item + break + i += 1 + if model_entry_item is not None: + cls = item["class"] + cls_container = item["container"] + exclude_modes = item["exclude_modes"] + c_mode_str = item["c_mode_str"] + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.message_box("No model defined !") + return + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + if pydantic_kwargs is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["time_indices"] = time_indices + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + cls, pydantic_kwargs + ) + if pydantic_model is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + if ret_msg == MSG_SUCCESS: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_ok}" + ) + else: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + + # UI components deletion - maybe just needs the parent container instead of individual components + def delete_model(self, wid0, wid1, wid2, wid3, wid4, wid5, wid6, _str): + + if not self.confirm_dialog(): + return False + + # if wid5 is not None: + # wid5.setParent(None) + # if wid4 is not None: + # wid4.setParent(None) + # if wid3 is not None: + # wid3.setParent(None) + # if wid2 is not None: + # wid2.setParent(None) + # if wid1 is not None: + # wid1.setParent(None) + if wid0 is not None: + wid0.setParent(None) + + # Find and remove the class from our pydantic model list using uuid + i = 0 + for item in self.pydantic_classes: + if item["uuid"] == _str: + self.pydantic_classes.pop(i) + break + i += 1 + self.index = len(self.pydantic_classes) + if self.index > 1: + self.reconstruction_run_PushButton.text = "RUN {n} Models".format( + n=self.index + ) + else: + self.reconstruction_run_PushButton.text = "RUN Model" + + # Clear all the generated pydantic models and clears the pydantic model list + def clear_all_models(self, silent=False): + + if silent or self.confirm_dialog(): + index = self.models_container_widget_layout.count() - 1 + while index >= 0: + myWidget = self.models_container_widget_layout.itemAt( + index + ).widget() + if myWidget is not None: + myWidget.setParent(None) + index -= 1 + self.pydantic_classes.clear() + CONTAINERS_INFO.clear() + self.index = 0 + self.reconstruction_run_PushButton.text = "RUN Model" + self.prev_model_settings = {} + + # Displays the json output from the pydantic model UI selections by user + # Loops through all our stored pydantic classes + def build_model_and_run(self, validate_return_prev_model_json_txt=False): + # we dont want to have a partial run if there are N models + # so we will validate them all first and then run in a second loop + # first pass for validating + # second pass for creating yaml and processing + + if len(self.pydantic_classes) == 0: + self.message_box("Please create a processing model first !") + return + + self.model_reset_highlighter_setter() # reset the container elements that might be highlighted for errors + _collectAllErrors = {} + _collectAllErrorsBool = True + for item in self.pydantic_classes: + cls = item["class"] + cls_container = item["container"] + selected_modes = item["selected_modes"] + exclude_modes = item["exclude_modes"] + uuid_str = item["uuid"] + _collapsibleBoxWidget = item["collapsibleBoxWidget"] + c_mode_str = item["c_mode_str"] + + _collectAllErrors[uuid_str] = {} + _collectAllErrors[uuid_str]["cls"] = cls_container + _collectAllErrors[uuid_str]["errs"] = [] + _collectAllErrors[uuid_str]["collapsibleBox"] = c_mode_str + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.message_box(ret_msg) + return + + # get the kwargs from the container/class + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + if pydantic_kwargs is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + + # For list element, this needs to be cleaned and parsed back as an array + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + cls, pydantic_kwargs + ) + if ret_msg == MSG_SUCCESS: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_ok}" + ) + else: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + _collectAllErrors[uuid_str]["errs"] = ret_msg + if pydantic_model is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + + # generate a json from the instantiated model, update the json_display + # most of this will end up in a table as processing proceeds + json_txt, ret_msg = self.validate_and_return_json(pydantic_model) + if json_txt is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + + # check if we collected any validation errors before continuing + for uu_key in _collectAllErrors.keys(): + if len(_collectAllErrors[uu_key]["errs"]) > 0: + self.model_highlighter(_collectAllErrors) + fmt_str = self.format_string_for_error_display( + _collectAllErrors + ) + self.message_box(fmt_str) + return + + if validate_return_prev_model_json_txt: + return "-".join(selected_modes), json_txt + + # generate a time-stamp for our yaml files to avoid overwriting + # files generated at the same time will have an index suffix + now = datetime.datetime.now() + ms = now.strftime("%f")[:3] + unique_id = now.strftime("%Y_%m_%d_%H_%M_%S_") + ms + + if self.pollData: + data = open_ome_zarr(self.input_directory, mode="r") + if "CurrentDimensions" in data.zattrs.keys(): + my_dict_time_indices = data.zattrs["CurrentDimensions"]["time"] + # get the prev time_index, since this is current acq + if my_dict_time_indices - 1 > 1: + time_indices = list(range(0, my_dict_time_indices)) + else: + time_indices = 0 + + pollDataThread = threading.Thread( + target=self.add_poll_loop, + args=(self.input_directory, my_dict_time_indices - 1), + ) + pollDataThread.start() + + i = 0 + for item in self.pydantic_classes: + i += 1 + cls = item["class"] + cls_container = item["container"] + selected_modes = item["selected_modes"] + exclude_modes = item["exclude_modes"] + c_mode_str = item["c_mode_str"] + output_LineEdit = item["output_LineEdit"] + output_parent_dir = item["output_parent_dir"] + + full_out_path = os.path.join( + output_parent_dir, output_LineEdit.value + ) + + # gather input/out locations + input_dir = f"{item['input'].value}" + output_dir = full_out_path + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.message_box("No model defined !") + return + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + if pydantic_kwargs is None: + self.message_box(ret_msg) + return + + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None: + self.message_box(ret_msg) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + if not self.pollData: + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.message_box(ret_msg) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + cls, pydantic_kwargs + ) + if pydantic_model is None: + self.message_box(ret_msg) + return + + # generate a json from the instantiated model, update the json_display + # most of this will end up in a table as processing proceeds + json_txt, ret_msg = self.validate_and_return_json(pydantic_model) + if json_txt is None: + self.message_box(ret_msg) + return + + # save the yaml files + # path is next to saved data location + save_config_path = str(Path(output_dir).parent.absolute()) + yml_file_name = "-and-".join(selected_modes) + yml_file = ( + yml_file_name + "-" + unique_id + "-{:02d}".format(i) + ".yml" + ) + config_path = os.path.join(save_config_path, yml_file) + utils.model_to_yaml(pydantic_model, config_path) + + # Input params for table entry + # Once ALL entries are entered we can deleted ALL model containers + # Table will need a low priority update thread to refresh status queried from CLI + # Table entries will be purged on completion when Result is returned OK + # Table entries will show an error msg when processing finishes but Result not OK + # Table fields ID / DateTime, Reconstruction type, Input Location, Output Location, Progress indicator, Stop button + + # addl_txt = "ID:" + unique_id + "-"+ str(i) + "\nInput:" + input_dir + "\nOutput:" + output_dir + # self.json_display.value = self.json_display.value + addl_txt + "\n" + json_txt+ "\n\n" + expID = "{tID}-{idx}".format(tID=unique_id, idx=i) + tableID = "{tName}: ({tID}-{idx})".format( + tName=c_mode_str, tID=unique_id, idx=i + ) + tableDescToolTip = "{tName}: ({tID}-{idx})".format( + tName=yml_file_name, tID=unique_id, idx=i + ) + + proc_params = {} + proc_params["exp_id"] = expID + proc_params["desc"] = tableDescToolTip + proc_params["config_path"] = str(Path(config_path).absolute()) + proc_params["input_path"] = str(Path(input_dir).absolute()) + proc_params["output_path"] = str(Path(output_dir).absolute()) + proc_params["output_path_parent"] = str( + Path(output_dir).parent.absolute() + ) + proc_params["show"] = item["show"].value + proc_params["rx"] = item["rx"].value + + self.addTableEntry(tableID, tableDescToolTip, proc_params) + + def add_poll_loop(self, input_data_path, last_time_index): + _pydantic_classes = self.pydantic_classes.copy() + required_order = ["time", "position", "z", "channel"] + _pollData = True + + tableEntryWorker = AddOTFTableEntryWorkerThread( + input_data_path, True, False + ) + tableEntryWorker.add_tableOTFentry_signal.connect( + self.add_remove_check_OTF_table_entry + ) + tableEntryWorker.start() + _breakFlag = False + while True: + time.sleep(10) + zattrs_data = None + try: + _stopCalled = self.add_remove_check_OTF_table_entry( + input_data_path, True, do_check=True + ) + if _stopCalled: + tableEntryWorker2 = AddOTFTableEntryWorkerThread( + input_data_path, False, False + ) + tableEntryWorker2.add_tableOTFentry_signal.connect( + self.add_remove_check_OTF_table_entry + ) + tableEntryWorker2.start() + + # let child threads finish their work before exiting the parent thread + while tableEntryWorker2.isRunning(): + time.sleep(1) + time.sleep(5) + break + try: + data = open_ome_zarr(input_data_path, mode="r") + zattrs_data = data.zattrs + except PermissionError: + pass # On-The-Fly dataset will throw Permission Denied when being written + # Maybe we can read the zaatrs directly in that case + # If this write/read is a constant issue then the zattrs 'CurrentDimensions' key + # should be updated less frequently, instead of current design of updating with + # each image + + if zattrs_data is None: + zattrs_data = self.load_zattrs_directly_as_dict( + input_data_path + ) + + if zattrs_data is not None: + if "CurrentDimensions" in zattrs_data.keys(): + my_dict1 = zattrs_data["CurrentDimensions"] + sorted_dict_acq = { + k: my_dict1[k] + for k in sorted( + my_dict1, key=lambda x: required_order.index(x) + ) + } + my_dict_time_indices_curr = zattrs_data[ + "CurrentDimensions" + ]["time"] + # print(sorted_dict_acq) + + if "FinalDimensions" in zattrs_data.keys(): + my_dict2 = zattrs_data["FinalDimensions"] + sorted_dict_final = { + k: my_dict2[k] + for k in sorted( + my_dict2, key=lambda x: required_order.index(x) + ) + } + # print(sorted_dict_final) + + # use the prev time_index, since this is current acq and we need for other dims to finish acq for this t + # or when all dims match - signifying acq finished + if ( + my_dict_time_indices_curr - 2 > last_time_index + or json.dumps(sorted_dict_acq) + == json.dumps(sorted_dict_final) + ): + + now = datetime.datetime.now() + ms = now.strftime("%f")[:3] + unique_id = now.strftime("%Y_%m_%d_%H_%M_%S_") + ms + + i = 0 + for item in _pydantic_classes: + i += 1 + cls = item["class"] + cls_container = item["container"] + selected_modes = item["selected_modes"] + exclude_modes = item["exclude_modes"] + c_mode_str = item["c_mode_str"] + output_LineEdit = item["output_LineEdit"] + output_parent_dir = item["output_parent_dir"] + + full_out_path = os.path.join( + output_parent_dir, output_LineEdit.value + ) + # gather input/out locations + input_dir = f"{item['input'].value}" + output_dir = full_out_path + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = ( + self.get_and_validate_pydantic_args( + cls_container, + cls, + pydantic_kwargs, + exclude_modes, + ) + ) + + input_channel_names, ret_msg = ( + self.clean_string_for_list( + "input_channel_names", + pydantic_kwargs["input_channel_names"], + ) + ) + pydantic_kwargs["input_channel_names"] = ( + input_channel_names + ) + + if _pollData: + if json.dumps(sorted_dict_acq) == json.dumps( + sorted_dict_final + ): + time_indices = list( + range( + last_time_index, + my_dict_time_indices_curr, + ) + ) + _breakFlag = True + else: + time_indices = list( + range( + last_time_index, + my_dict_time_indices_curr - 2, + ) + ) + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = ( + self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"][ + "apply_inverse" + ]["background_path"], + ) + ) + + pydantic_kwargs["birefringence"][ + "apply_inverse" + ]["background_path"] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = ( + self.validate_pydantic_model( + cls, pydantic_kwargs + ) + ) + + # save the yaml files + # path is next to saved data location + save_config_path = str( + Path(output_dir).parent.absolute() + ) + yml_file_name = "-and-".join(selected_modes) + yml_file = ( + yml_file_name + + "-" + + unique_id + + "-{:02d}".format(i) + + ".yml" + ) + config_path = os.path.join( + save_config_path, yml_file + ) + utils.model_to_yaml(pydantic_model, config_path) + + expID = "{tID}-{idx}".format(tID=unique_id, idx=i) + tableID = "{tName}: ({tID}-{idx})".format( + tName=c_mode_str, tID=unique_id, idx=i + ) + tableDescToolTip = "{tName}: ({tID}-{idx})".format( + tName=yml_file_name, tID=unique_id, idx=i + ) + + proc_params = {} + proc_params["exp_id"] = expID + proc_params["desc"] = tableDescToolTip + proc_params["config_path"] = str( + Path(config_path).absolute() + ) + proc_params["input_path"] = str( + Path(input_dir).absolute() + ) + proc_params["output_path"] = str( + Path(output_dir).absolute() + ) + proc_params["output_path_parent"] = str( + Path(output_dir).parent.absolute() + ) + proc_params["show"] = False + proc_params["rx"] = 1 + + tableEntryWorker1 = AddTableEntryWorkerThread( + tableID, tableDescToolTip, proc_params + ) + tableEntryWorker1.add_tableentry_signal.connect( + self.addTableEntry + ) + tableEntryWorker1.start() + + if ( + json.dumps(sorted_dict_acq) + == json.dumps(sorted_dict_final) + and _breakFlag + ): + + tableEntryWorker2 = AddOTFTableEntryWorkerThread( + input_data_path, False, False + ) + tableEntryWorker2.add_tableOTFentry_signal.connect( + self.add_remove_check_OTF_table_entry + ) + tableEntryWorker2.start() + + # let child threads finish their work before exiting the parent thread + while ( + tableEntryWorker1.isRunning() + or tableEntryWorker2.isRunning() + ): + time.sleep(1) + time.sleep(5) + break + + last_time_index = my_dict_time_indices_curr - 2 + except Exception as exc: + print(exc.args) + print( + "Exiting polling for dataset: {data_path}".format( + data_path=input_data_path + ) + ) + break + + def load_zattrs_directly_as_dict(self, zattrsFilePathDir): + try: + file_path = os.path.join(zattrsFilePathDir, ".zattrs") + f = open(file_path, "r") + txt = f.read() + f.close() + return json.loads(txt) + except Exception as exc: + print(exc.args) + return None + + # ======= These function do not implement validation + # They simply make the data from GUI translate to input types + # that the model expects: for eg. GUI txt field will output only str + # when the model needs integers + + # util function to parse list elements displayed as string + def remove_chars(self, string, chars_to_remove): + for char in chars_to_remove: + string = string.replace(char, "") + return string + + # util function to parse list elements displayed as string + def clean_string_for_list(self, field, string): + chars_to_remove = ["[", "]", "'", '"', " "] + if isinstance(string, str): + string = self.remove_chars(string, chars_to_remove) + if len(string) == 0: + return None, {"msg": field + " is invalid"} + if "," in string: + string = string.split(",") + return string, MSG_SUCCESS + if isinstance(string, str): + string = [string] + return string, MSG_SUCCESS + return string, MSG_SUCCESS + + # util function to parse list elements displayed as string, int, int as list of strings, int range + # [1,2,3], 4,5,6 , 5-95 + def clean_string_int_for_list(self, field, string): + chars_to_remove = ["[", "]", "'", '"', " "] + if Literal[string] == Literal["all"]: + return string, MSG_SUCCESS + if Literal[string] == Literal[""]: + return string, MSG_SUCCESS + if isinstance(string, str): + string = self.remove_chars(string, chars_to_remove) + if len(string) == 0: + return None, {"msg": field + " is invalid"} + if "-" in string: + string = string.split("-") + if len(string) == 2: + try: + x = int(string[0]) + if not isinstance(x, int): + raise + except Exception as exc: + return None, { + "msg": field + " first range element is not an integer" + } + try: + y = int(string[1]) + if not isinstance(y, int): + raise + except Exception as exc: + return None, { + "msg": field + + " second range element is not an integer" + } + if y > x: + return list(range(x, y + 1)), MSG_SUCCESS + else: + return None, { + "msg": field + + " second integer cannot be smaller than first" + } + else: + return None, {"msg": field + " is invalid"} + if "," in string: + string = string.split(",") + return string, MSG_SUCCESS + return string, MSG_SUCCESS + + # util function to set path to empty - by default empty path has a "." + def clean_path_string_when_empty(self, field, string): + if isinstance(string, Path) and string == Path(""): + string = "" + return string, MSG_SUCCESS + return string, MSG_SUCCESS + + # get the pydantic_kwargs and catches any errors in doing so + def get_and_validate_pydantic_args( + self, cls_container, cls, pydantic_kwargs, exclude_modes + ): + try: + try: + self.get_pydantic_kwargs( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + return pydantic_kwargs, MSG_SUCCESS + except ValidationError as exc: + return None, exc.errors() + except Exception as exc: + return None, exc.args + + # validate the model and return errors for user actioning + def validate_pydantic_model(self, cls, pydantic_kwargs): + # instantiate the pydantic model form the kwargs we just pulled + try: + try: + pydantic_model = settings.ReconstructionSettings.parse_obj( + pydantic_kwargs + ) + return pydantic_model, MSG_SUCCESS + except ValidationError as exc: + return None, exc.errors() + except Exception as exc: + return None, exc.args + + # test to make sure model coverts to json which should ensure compatibility with yaml export + def validate_and_return_json(self, pydantic_model): + try: + json_format = pydantic_model.json(indent=4) + return json_format, MSG_SUCCESS + except Exception as exc: + return None, exc.args + + # gets a copy of the model from a yaml file + # will get all fields (even those that are optional and not in yaml) and default values + # model needs further parsing against yaml file for fields + def get_model_from_file(self, model_file_path): + pydantic_model = None + try: + try: + pydantic_model = utils.yaml_to_model( + model_file_path, settings.ReconstructionSettings + ) + except ValidationError as exc: + return pydantic_model, exc.errors() + if pydantic_model is None: + raise Exception("utils.yaml_to_model - returned a None model") + return pydantic_model, MSG_SUCCESS + except Exception as exc: + return None, exc.args + + # handles json with boolean properly and converts to lowercase string + # as required + def convert(self, obj): + if isinstance(obj, bool): + return str(obj).lower() + if isinstance(obj, (list, tuple)): + return [self.convert(item) for item in obj] + if isinstance(obj, dict): + return { + self.convert(key): self.convert(value) + for key, value in obj.items() + } + return obj + + # Main function to add pydantic model to container + # https://github.com/chrishavlin/miscellaneous_python/blob/main/src/pydantic_magicgui_roundtrip.py + # Has limitation and can cause breakages for unhandled or incorrectly handled types + # Cannot handle Union types/typing - for now being handled explicitly + # Ignoring NoneType since those should be Optional but maybe needs displaying ?? + # ToDo: Needs revisitation, Union check + # Displaying Union field "time_indices" as LineEdit component + # excludes handles fields that are not supposed to show up from __fields__ + # json_dict adds ability to provide new set of default values at time of container creation + + def add_pydantic_to_container( + self, + py_model: Union[BaseModel, ModelMetaclass], + container: widgets.Container, + excludes=[], + json_dict=None, + ): + # recursively traverse a pydantic model adding widgets to a container. When a nested + # pydantic model is encountered, add a new nested container + + for field, field_def in py_model.__fields__.items(): + if field_def is not None and field not in excludes: + def_val = field_def.default + ftype = field_def.type_ + toolTip = "" + try: + for f_val in field_def.class_validators.keys(): + toolTip = f"{toolTip}{f_val} " + except Exception as e: + pass + if isinstance(ftype, BaseModel) or isinstance( + ftype, ModelMetaclass + ): + json_val = None + if json_dict is not None: + json_val = json_dict[field] + # the field is a pydantic class, add a container for it and fill it + new_widget_cls = widgets.Container + new_widget = new_widget_cls(name=field_def.name) + new_widget.tooltip = toolTip + self.add_pydantic_to_container( + ftype, new_widget, excludes, json_val + ) + # ToDo: Implement Union check, tried: + # pydantic.typing.is_union(ftype) + # isinstance(ftype, types.UnionType) + # https://stackoverflow.com/questions/45957615/how-to-check-a-variable-against-union-type-during-runtime + elif isinstance(ftype, type(Union[NonNegativeInt, List, str])): + if ( + field == "background_path" + ): # field == "background_path": + new_widget_cls, ops = get_widget_class( + def_val, + Annotated[Path, {"mode": "d"}], + dict(name=field, value=def_val), + ) + new_widget = new_widget_cls(**ops) + toolTip = ( + "Select the folder containing background.zarr" + ) + elif field == "time_indices": # field == "time_indices": + new_widget_cls, ops = get_widget_class( + def_val, str, dict(name=field, value=def_val) + ) + new_widget = new_widget_cls(**ops) + else: # other Union cases + new_widget_cls, ops = get_widget_class( + def_val, str, dict(name=field, value=def_val) + ) + new_widget = new_widget_cls(**ops) + new_widget.tooltip = toolTip + if isinstance(new_widget, widgets.EmptyWidget): + warnings.warn( + message=f"magicgui could not identify a widget for {py_model}.{field}, which has type {ftype}" + ) + elif isinstance(def_val, float): + # parse the field, add appropriate widget + def_step_size = 0.001 + if field_def.name == "regularization_strength": + def_step_size = 0.00001 + if def_val > -1 and def_val < 1: + new_widget_cls, ops = get_widget_class( + None, + ftype, + dict( + name=field_def.name, + value=def_val, + step=float(def_step_size), + ), + ) + new_widget = new_widget_cls(**ops) + new_widget.tooltip = toolTip + else: + new_widget_cls, ops = get_widget_class( + None, + ftype, + dict(name=field_def.name, value=def_val), + ) + new_widget = new_widget_cls(**ops) + new_widget.tooltip = toolTip + if isinstance(new_widget, widgets.EmptyWidget): + warnings.warn( + message=f"magicgui could not identify a widget for {py_model}.{field}, which has type {ftype}" + ) + else: + # parse the field, add appropriate widget + new_widget_cls, ops = get_widget_class( + None, ftype, dict(name=field_def.name, value=def_val) + ) + new_widget = new_widget_cls(**ops) + if isinstance(new_widget, widgets.EmptyWidget): + warnings.warn( + message=f"magicgui could not identify a widget for {py_model}.{field}, which has type {ftype}" + ) + else: + new_widget.tooltip = toolTip + if json_dict is not None and ( + not isinstance(new_widget, widgets.Container) + or (isinstance(new_widget, widgets.FileEdit)) + ): + if field in json_dict.keys(): + if isinstance(new_widget, widgets.CheckBox): + new_widget.value = ( + True if json_dict[field] == "true" else False + ) + elif isinstance(new_widget, widgets.FileEdit): + if len(json_dict[field]) > 0: + extension = os.path.splitext(json_dict[field])[ + 1 + ] + if len(extension) > 0: + new_widget.value = Path( + json_dict[field] + ).parent.absolute() # CLI accepts BG folder not .zarr + else: + new_widget.value = Path(json_dict[field]) + else: + new_widget.value = json_dict[field] + container.append(new_widget) + + # refer - add_pydantic_to_container() for comments + def get_pydantic_kwargs( + self, + container: widgets.Container, + pydantic_model, + pydantic_kwargs: dict, + excludes=[], + json_dict=None, + ): + # given a container that was instantiated from a pydantic model, get the arguments + # needed to instantiate that pydantic model from the container. + + # traverse model fields, pull out values from container + for field, field_def in pydantic_model.__fields__.items(): + if field_def is not None and field not in excludes: + ftype = field_def.type_ + if isinstance(ftype, BaseModel) or isinstance( + ftype, ModelMetaclass + ): + # go deeper + pydantic_kwargs[field] = ( + {} + ) # new dictionary for the new nest level + # any pydantic class will be a container, so pull that out to pass + # to the recursive call + sub_container = getattr(container, field_def.name) + self.get_pydantic_kwargs( + sub_container, + ftype, + pydantic_kwargs[field], + excludes, + json_dict, + ) + else: + # not a pydantic class, just pull the field value from the container + if hasattr(container, field_def.name): + value = getattr(container, field_def.name).value + pydantic_kwargs[field] = value + + # copied from main_widget + # file open/select dialog + def open_file_dialog(self, default_path, type, filter="All Files (*)"): + if type == "dir": + return self.open_dialog( + "select a directory", str(default_path), type, filter + ) + elif type == "file": + return self.open_dialog( + "select a file", str(default_path), type, filter + ) + elif type == "files": + return self.open_dialog( + "select file(s)", str(default_path), type, filter + ) + elif type == "save": + return self.open_dialog( + "save a file", str(default_path), type, filter + ) + else: + return self.open_dialog( + "select a directory", str(default_path), type, filter + ) + + def open_dialog(self, title, ref, type, filter="All Files (*)"): + """ + opens pop-up dialogue for the user to choose a specific file or directory. + + Parameters + ---------- + title: (str) message to display at the top of the pop up + ref: (str) reference path to start the search at + type: (str) type of file the user is choosing (dir, file, or save) + + Returns + ------- + + """ + + options = QFileDialog.DontUseNativeDialog + if type == "dir": + path = QFileDialog.getExistingDirectory( + None, title, ref, options=options + ) + elif type == "file": + path = QFileDialog.getOpenFileName( + None, title, ref, filter=filter, options=options + )[0] + elif type == "files": + path = QFileDialog.getOpenFileNames( + None, title, ref, filter=filter, options=options + )[0] + elif type == "save": + path = QFileDialog.getSaveFileName( + None, "Choose a save name", ref, filter=filter, options=options + )[0] + else: + raise ValueError("Did not understand file dialogue type") + + return path + + +class MyWorker: + + def __init__(self, formLayout, tab_recon: Ui_ReconTab_Form, parentForm): + super().__init__() + self.formLayout: QFormLayout = formLayout + self.tab_recon: Ui_ReconTab_Form = tab_recon + self.ui: QWidget = parentForm + self.max_cores = os.cpu_count() + # In the case of CLI, we just need to submit requests in a non-blocking way + self.threadPool = int(self.max_cores / 2) + self.results = {} + self.pool = None + self.futures = [] + # https://click.palletsprojects.com/en/stable/testing/ + # self.runner = CliRunner() + # jobs_mgmt.shared_var_jobs = self.JobsManager.shared_var_jobs + self.JobsMgmt = jobs_mgmt.JobsManagement() + self.useServer = True + self.serverRunning = True + self.server_socket = None + self.isInitialized = False + + def initialize(self): + if not self.isInitialized: + thread = threading.Thread(target=self.start_server) + thread.start() + self.workerThreadRowDeletion = RowDeletionWorkerThread( + self.formLayout + ) + self.workerThreadRowDeletion.removeRowSignal.connect( + self.tab_recon.remove_row + ) + self.workerThreadRowDeletion.start() + self.isInitialized = True + + def set_new_instances(self, formLayout, tab_recon, parentForm): + self.formLayout: QFormLayout = formLayout + self.tab_recon: Ui_ReconTab_Form = tab_recon + self.ui: QWidget = parentForm + self.workerThreadRowDeletion.set_new_instances(formLayout) + + def find_widget_row_in_layout(self, strID): + layout: QFormLayout = self.formLayout + for idx in range(0, layout.rowCount()): + widgetItem = layout.itemAt(idx) + name_widget = widgetItem.widget() + toolTip_string = str(name_widget.toolTip) + if strID in toolTip_string: + name_widget.setParent(None) + return idx + return -1 + + def start_server(self): + try: + if not self.useServer: + return + + self.server_socket = socket.socket( + socket.AF_INET, socket.SOCK_STREAM + ) + self.server_socket.bind(("localhost", jobs_mgmt.SERVER_PORT)) + self.server_socket.listen( + 50 + ) # become a server socket, maximum 50 connections + + while self.serverRunning: + client_socket, address = self.server_socket.accept() + if self.ui is not None and not self.ui.isVisible(): + break + try: + # dont block the server thread + thread = threading.Thread( + target=self.decode_client_data, + args=("", "", "", "", client_socket), + ) + thread.start() + except Exception as exc: + print(exc.args) + time.sleep(1) + + self.server_socket.close() + except Exception as exc: + if not self.serverRunning: + self.serverRunning = True + return # ignore - will cause an exception on napari close but that is fine and does the job + print(exc.args) + + def stop_server(self): + try: + if self.server_socket is not None: + self.serverRunning = False + self.server_socket.close() + except Exception as exc: + print(exc.args) + + def get_max_CPU_cores(self): + return self.max_cores + + def set_pool_threads(self, t): + if t > 0 and t < self.max_cores: + self.threadPool = t + + def start_pool(self): + if self.pool is None: + self.pool = concurrent.futures.ThreadPoolExecutor( + max_workers=self.threadPool + ) + + def shut_down_pool(self): + self.pool.shutdown(wait=True) + + # This method handles each client response thread. It parses the information received from the client + # and is responsible for parsing each well/pos Job if the case may be and starting individual update threads + # using the tableUpdateAndCleaupThread() method + # This is also handling an unused "CoNvErTeR" functioning that can be implemented on 3rd party apps + def decode_client_data( + self, + expIdx="", + jobIdx="", + wellName="", + logs_folder_path="", + client_socket=None, + ): + + if client_socket is not None and expIdx == "" and jobIdx == "": + try: + buf = client_socket.recv(10240) + if len(buf) > 0: + if b"\n" in buf: + dataList = buf.split(b"\n") + else: + dataList = [buf] + for data in dataList: + if len(data) > 0: + decoded_string = data.decode() + if ( + "CoNvErTeR" in decoded_string + ): # this request came from an agnostic route - requires processing + json_str = str(decoded_string) + json_obj = json.loads(json_str) + converter_params = json_obj["CoNvErTeR"] + input_data = converter_params["input"] + output_data = converter_params["output"] + recon_params = converter_params["params"] + expID = recon_params["expID"] + mode = recon_params["mode"] + if "config_path" in recon_params.keys(): + config_path = recon_params["config_path"] + else: + config_path = "" + + proc_params = {} + proc_params["exp_id"] = expID + proc_params["desc"] = expID + proc_params["input_path"] = str(input_data) + proc_params["output_path"] = str(output_data) + proc_params["output_path_parent"] = str( + Path(output_data).parent.absolute() + ) + proc_params["show"] = False + proc_params["rx"] = 1 + + if config_path == "": + model = None + if ( + len(self.tab_recon.pydantic_classes) + > 0 + ): + for ( + item + ) in self.tab_recon.pydantic_classes: + if mode == item["selected_modes"]: + cls = item["class"] + cls_container = item[ + "container" + ] + exclude_modes = item[ + "exclude_modes" + ] + output_LineEdit = item[ + "output_LineEdit" + ] + output_parent_dir = item[ + "output_parent_dir" + ] + full_out_path = os.path.join( + output_parent_dir, + output_LineEdit.value, + ) + + # gather input/out locations + output_dir = full_out_path + if output_data == "": + output_data = output_dir + proc_params[ + "output_path" + ] = str(output_data) + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.tab_recon.message_box( + "No model defined !" + ) + return + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = ( + self.tab_recon.get_and_validate_pydantic_args( + cls_container, + cls, + pydantic_kwargs, + exclude_modes, + ) + ) + if pydantic_kwargs is None: + self.tab_recon.message_box( + ret_msg + ) + return + + ( + input_channel_names, + ret_msg, + ) = self.tab_recon.clean_string_for_list( + "input_channel_names", + pydantic_kwargs[ + "input_channel_names" + ], + ) + if input_channel_names is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "input_channel_names" + ] = input_channel_names + + time_indices, ret_msg = ( + self.tab_recon.clean_string_int_for_list( + "time_indices", + pydantic_kwargs[ + "time_indices" + ], + ) + ) + if time_indices is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "time_indices" + ] = time_indices + + time_indices, ret_msg = ( + self.tab_recon.clean_string_int_for_list( + "time_indices", + pydantic_kwargs[ + "time_indices" + ], + ) + ) + if time_indices is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "time_indices" + ] = time_indices + + if ( + "birefringence" + in pydantic_kwargs.keys() + ): + ( + background_path, + ret_msg, + ) = self.tab_recon.clean_path_string_when_empty( + "background_path", + pydantic_kwargs[ + "birefringence" + ]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "birefringence" + ]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = ( + self.tab_recon.validate_pydantic_model( + cls, pydantic_kwargs + ) + ) + if pydantic_model is None: + self.tab_recon.message_box( + ret_msg + ) + return + model = pydantic_model + break + if model is None: + model, msg = ( + self.tab_recon.build_model(mode) + ) + yaml_path = os.path.join( + str( + Path(output_data).parent.absolute() + ), + expID + ".yml", + ) + utils.model_to_yaml(model, yaml_path) + proc_params["config_path"] = str(yaml_path) + + tableEntryWorker = AddTableEntryWorkerThread( + expID, expID, proc_params + ) + tableEntryWorker.add_tableentry_signal.connect( + self.tab_recon.addTableEntry + ) + tableEntryWorker.start() + time.sleep(10) + return + else: + json_str = str(decoded_string) + json_obj = json.loads(json_str) + for k in json_obj: + expIdx = k + jobIdx = json_obj[k]["jID"] + wellName = json_obj[k]["pos"] + logs_folder_path = json_obj[k]["log"] + if ( + expIdx not in self.results.keys() + ): # this job came from agnostic CLI route - no processing + now = datetime.datetime.now() + ms = now.strftime("%f")[:3] + unique_id = ( + now.strftime("%Y_%m_%d_%H_%M_%S_") + ms + ) + expIdx = expIdx + "-" + unique_id + self.JobsMgmt.put_Job_in_list( + None, + expIdx, + str(jobIdx), + wellName, + mode="server", + ) + # print("Submitting Job: {job} expIdx: {expIdx}".format(job=jobIdx, expIdx=expIdx)) + thread = threading.Thread( + target=self.table_update_and_cleaup_thread, + args=( + expIdx, + jobIdx, + wellName, + logs_folder_path, + client_socket, + ), + ) + thread.start() + return + except Exception as exc: + print(exc.args) + + # the table update thread can be called from multiple points/threads + # on errors - table row item is updated but there is no row deletion + # on successful processing - the row item is expected to be deleted + # row is being deleted from a seperate thread for which we need to connect using signal + + # This is handling essentially each job thread. Points of entry are on a failed job submission + # which then calls this to update based on the expID (used for .yml naming). On successful job + # submissions jobID, the point of entry is via the socket connection the GUI is listening and + # then spawns a new thread to avoid blocking of other connections. + # If a job submission spawns more jobs then this also calls other methods via signal to create + # the required GUI components in the main thread. + # Once we have expID and jobID this thread periodically loops and updates each job status and/or + # the job error by reading the log files. Using certain keywords + # eg JOB_COMPLETION_STR = "Job completed successfully" we determine the progress. We also create + # a map for expID which might have multiple jobs to determine when a reconstruction is + # finished vs a single job finishing. + # The loop ends based on user, time-out, job(s) completion and errors and handles removal of + # processing GUI table items (on main thread). + # Based on the conditions the loop will end calling clientRelease() + def table_update_and_cleaup_thread( + self, + expIdx="", + jobIdx="", + wellName="", + logs_folder_path="", + client_socket=None, + ): + jobIdx = str(jobIdx) + + # ToDo: Another approach to this could be to implement a status thread on the client side + # Since the client is already running till the job is completed, the client could ping status + # at regular intervals and also provide results and exceptions we currently read from the file + # Currently we only send JobID/UniqueID pair from Client to Server. This would reduce multiple threads + # server side. + + if expIdx != "" and jobIdx != "": + # this request came from server listening so we wait for the Job to finish and update progress + if expIdx not in self.results.keys(): + proc_params = {} + tableID = "{exp} - {job} ({pos})".format( + exp=expIdx, job=jobIdx, pos=wellName + ) + proc_params["exp_id"] = expIdx + proc_params["desc"] = tableID + proc_params["config_path"] = "" + proc_params["input_path"] = "" + proc_params["output_path"] = "" + proc_params["output_path_parent"] = "" + proc_params["show"] = False + proc_params["rx"] = 1 + + tableEntryWorker = AddTableEntryWorkerThread( + tableID, tableID, proc_params + ) + tableEntryWorker.add_tableentry_signal.connect( + self.tab_recon.addTableEntry + ) + tableEntryWorker.start() + + while expIdx not in self.results.keys(): + time.sleep(1) + + params = self.results[expIdx]["JobUNK"].copy() + params["status"] = STATUS_running_job + else: + params = self.results[expIdx]["JobUNK"].copy() + + if ( + jobIdx not in self.results[expIdx].keys() + and len(self.results[expIdx].keys()) == 1 + ): + # this is the first job + params["primary"] = True + self.results[expIdx][jobIdx] = params + elif ( + jobIdx not in self.results[expIdx].keys() + and len(self.results[expIdx].keys()) > 1 + ): + # this is a new job + # we need to create cancel and job status windows and add to parent container + params["primary"] = False + NEW_WIDGETS_QUEUE.append(expIdx + jobIdx) + parentLayout: QVBoxLayout = params["parent_layout"] + worker_thread = AddWidgetWorkerThread( + parentLayout, expIdx, jobIdx, params["desc"], wellName + ) + worker_thread.add_widget_signal.connect( + self.tab_recon.add_widget + ) + NEW_WIDGETS_QUEUE_THREADS.append(worker_thread) + + while len(NEW_WIDGETS_QUEUE_THREADS) > 0: + s_worker_thread = NEW_WIDGETS_QUEUE_THREADS.pop(0) + s_worker_thread.start() + time.sleep(1) + + # wait for new components reference + while expIdx + jobIdx in NEW_WIDGETS_QUEUE: + time.sleep(1) + + _cancelJobBtn = MULTI_JOBS_REFS[expIdx + jobIdx]["cancelBtn"] + _infoBox = MULTI_JOBS_REFS[expIdx + jobIdx]["infobox"] + params["table_entry_infoBox"] = _infoBox + params["cancelJobButton"] = _cancelJobBtn + + self.results[expIdx][jobIdx] = params + + _infoBox: ScrollableLabel = params["table_entry_infoBox"] + _cancelJobBtn: PushButton = params["cancelJobButton"] + + _txtForInfoBox = "Updating {id}-{pos}: Please wait... \nJobID assigned: {jID} ".format( + id=params["desc"], pos=wellName, jID=jobIdx + ) + try: + _cancelJobBtn.text = "Cancel Job {jID} ({posName})".format( + jID=jobIdx, posName=wellName + ) + _cancelJobBtn.enabled = True + _infoBox.setText(_txtForInfoBox) + except: + # deleted by user - no longer needs updating + params["status"] = STATUS_user_cleared_job + return + _tUpdateCount = 0 + _tUpdateCountTimeout = ( + jobs_mgmt.JOBS_TIMEOUT * 60 + ) # 5 mins - match executor time-out + _lastUpdate_jobTXT = "" + jobTXT = "" + # print("Updating Job: {job} expIdx: {expIdx}".format(job=jobIdx, expIdx=expIdx)) + while True: + time.sleep(1) # update every sec and exit on break + try: + if "cancel called" in _cancelJobBtn.text: + json_obj = { + "uID": expIdx, + "jID": jobIdx, + "command": "cancel", + } + json_str = json.dumps(json_obj) + "\n" + client_socket.send(json_str.encode()) + params["status"] = STATUS_user_cancelled_job + _infoBox.setText( + "User called for Cancel Job Request\n" + + "Please check terminal output for Job status..\n\n" + + jobTXT + ) + self.client_release( + expIdx, jobIdx, client_socket, params, reason=1 + ) + break # cancel called by user + if _infoBox == None: + params["status"] = STATUS_user_cleared_job + self.client_release( + expIdx, jobIdx, client_socket, params, reason=2 + ) + break # deleted by user - no longer needs updating + if _infoBox: + pass + except Exception as exc: + print(exc.args) + params["status"] = STATUS_user_cleared_job + self.client_release( + expIdx, jobIdx, client_socket, params, reason=3 + ) + break # deleted by user - no longer needs updating + if self.JobsMgmt.has_submitted_job( + expIdx, jobIdx, mode="server" + ): + if params["status"] in [STATUS_finished_job]: + self.client_release( + expIdx, jobIdx, client_socket, params, reason=4 + ) + break + elif params["status"] in [STATUS_errored_job]: + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="err" + ) + _infoBox.setText( + jobIdx + "\n" + params["desc"] + "\n\n" + jobERR + ) + self.client_release( + expIdx, jobIdx, client_socket, params, reason=5 + ) + break + else: + jobTXT = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="out" + ) + try: + if jobTXT == "": # job file not created yet + # print(jobIdx + " not started yet") + time.sleep(2) + _tUpdateCount += 2 + if ( + _tUpdateCount > 10 + ): # if out file is empty for 10s, check the err file to update user + jobERR = ( + self.JobsMgmt.check_for_jobID_File( + jobIdx, + logs_folder_path, + extension="err", + ) + ) + if JOB_OOM_EVENT in jobERR: + params["status"] = STATUS_errored_job + _infoBox.setText( + jobERR + "\n\n" + jobTXT + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + _infoBox.setText( + jobIdx + + "\n" + + params["desc"] + + "\n\n" + + jobERR + ) + if _tUpdateCount > _tUpdateCountTimeout: + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + elif params["status"] == STATUS_finished_job: + rowIdx = self.find_widget_row_in_layout(expIdx) + # check to ensure row deletion due to shrinking table + # if not deleted try to delete again + if rowIdx < 0: + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=6, + ) + break + else: + break + elif JOB_COMPLETION_STR in jobTXT: + params["status"] = STATUS_finished_job + _infoBox.setText(jobTXT) + # this is the only case where row deleting occurs + # we cant delete the row directly from this thread + # we will use the exp_id to identify and delete the row + # using Signal + # break - based on status + elif JOB_TRIGGERED_EXC in jobTXT: + params["status"] = STATUS_errored_job + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="err" + ) + _infoBox.setText( + jobIdx + + "\n" + + params["desc"] + + "\n\n" + + jobTXT + + "\n\n" + + jobERR + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + elif JOB_RUNNING_STR in jobTXT: + params["status"] = STATUS_running_job + _infoBox.setText(jobTXT) + _tUpdateCount += 1 + if _tUpdateCount > 60: + jobERR = ( + self.JobsMgmt.check_for_jobID_File( + jobIdx, + logs_folder_path, + extension="err", + ) + ) + if JOB_OOM_EVENT in jobERR: + params["status"] = STATUS_errored_job + _infoBox.setText( + jobERR + "\n\n" + jobTXT + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + elif _lastUpdate_jobTXT != jobTXT: + # if there is an update reset counter + _tUpdateCount = 0 + _lastUpdate_jobTXT = jobTXT + else: + _infoBox.setText( + "Please check terminal output for Job status..\n\n" + + jobTXT + ) + if _tUpdateCount > _tUpdateCountTimeout: + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + else: + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="err" + ) + _infoBox.setText( + jobIdx + + "\n" + + params["desc"] + + "\n\n" + + jobERR + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + except Exception as exc: + print(exc.args) + else: + self.client_release( + expIdx, jobIdx, client_socket, params, reason=0 + ) + break + else: + # this would occur when an exception happens on the pool side before or during job submission + # we dont have a job ID and will update based on exp_ID/uID + # if job submission was not successful we can assume the client is not listening + # and does not require a clientRelease cmd + for uID in self.results.keys(): + params = self.results[uID]["JobUNK"] + if params["status"] in [STATUS_errored_pool]: + _infoBox = params["table_entry_infoBox"] + poolERR = params["error"] + _infoBox.setText(poolERR) + + def client_release(self, expIdx, jobIdx, client_socket, params, reason=0): + # only need to release client from primary job + # print("clientRelease Job: {job} expIdx: {expIdx} reason:{reason}".format(job=jobIdx, expIdx=expIdx, reason=reason)) + self.JobsMgmt.put_Job_completion_in_list(True, expIdx, jobIdx) + showData_thread = None + if params["primary"]: + if "show" in params: + if params["show"]: + # Read reconstruction data + showData_thread = ShowDataWorkerThread( + params["output_path"] + ) + showData_thread.show_data_signal.connect( + self.tab_recon.show_dataset + ) + showData_thread.start() + + # for multi-job expID we need to check completion for all of them + while not self.JobsMgmt.check_all_ExpJobs_completion(expIdx): + time.sleep(1) + + json_obj = { + "uID": expIdx, + "jID": jobIdx, + "command": "clientRelease", + } + json_str = json.dumps(json_obj) + "\n" + client_socket.send(json_str.encode()) + + if ( + reason != 0 + ): # remove processing entry when exiting without error + ROW_POP_QUEUE.append(expIdx) + # print("FINISHED") + + if self.pool is not None: + if self.pool._work_queue.qsize() == 0: + self.pool.shutdown() + self.pool = None + + if showData_thread is not None: + while showData_thread.isRunning(): + time.sleep(3) + + def run_in_pool(self, params): + if not self.isInitialized: + self.initialize() + + self.start_pool() + self.results[params["exp_id"]] = {} + self.results[params["exp_id"]]["JobUNK"] = params + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_running_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = "" + + try: + # when a request on the listening port arrives with an empty path + # we can assume the processing was initiated outside this application + # we do not proceed with the processing and will display the results + if params["input_path"] != "": + f = self.pool.submit(self.run, params) + self.futures.append(f) + except Exception as exc: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + def run_multi_in_pool(self, multi_params_as_list): + self.start_pool() + for params in multi_params_as_list: + self.results[params["exp_id"]] = {} + self.results[params["exp_id"]]["JobUNK"] = params + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_submitted_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = "" + try: + self.pool.map(self.run, multi_params_as_list) + except Exception as exc: + for params in multi_params_as_list: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + def get_results(self): + return self.results + + def get_result(self, exp_id): + return self.results[exp_id] + + def run(self, params): + # thread where work is passed to CLI which will handle the + # multi-processing aspects as Jobs + if params["exp_id"] not in self.results.keys(): + self.results[params["exp_id"]] = {} + self.results[params["exp_id"]]["JobUNK"] = params + self.results[params["exp_id"]]["JobUNK"]["error"] = "" + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_running_pool + + try: + # does need further threading ? probably not ! + thread = threading.Thread( + target=self.run_in_subprocess, args=(params,) + ) + thread.start() + + except Exception as exc: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + def run_in_subprocess(self, params): + """function that initiates the processing on the CLI""" + try: + input_path = str(params["input_path"]) + config_path = str(params["config_path"]) + output_path = str(params["output_path"]) + uid = str(params["exp_id"]) + rx = str(params["rx"]) + mainfp = str(jobs_mgmt.FILE_PATH) + + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_submitted_job + + proc = subprocess.run( + [ + "python", + mainfp, + "reconstruct", + "-i", + input_path, + "-c", + config_path, + "-o", + output_path, + "-rx", + str(rx), + "-uid", + uid, + ] + ) + self.results[params["exp_id"]]["JobUNK"]["proc"] = proc + if proc.returncode != 0: + raise Exception( + "An error occurred in processing ! Check terminal output." + ) + + except Exception as exc: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + +class ShowDataWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + show_data_signal = Signal(str) + + def __init__(self, path): + super().__init__() + self.path = path + + def run(self): + # Emit the signal to add the widget to the main thread + self.show_data_signal.emit(self.path) + + +class AddOTFTableEntryWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + add_tableOTFentry_signal = Signal(str, bool, bool) + + def __init__(self, OTF_dir_path, bool_msg, doCheck=False): + super().__init__() + self.OTF_dir_path = OTF_dir_path + self.bool_msg = bool_msg + self.doCheck = doCheck + + def run(self): + # Emit the signal to add the widget to the main thread + self.add_tableOTFentry_signal.emit( + self.OTF_dir_path, self.bool_msg, self.doCheck + ) + + +class AddTableEntryWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + add_tableentry_signal = Signal(str, str, dict) + + def __init__(self, expID, desc, params): + super().__init__() + self.expID = expID + self.desc = desc + self.params = params + + def run(self): + # Emit the signal to add the widget to the main thread + self.add_tableentry_signal.emit(self.expID, self.desc, self.params) + + +class AddWidgetWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + add_widget_signal = Signal(QVBoxLayout, str, str, str, str) + + def __init__(self, layout, expID, jID, desc, wellName): + super().__init__() + self.layout = layout + self.expID = expID + self.jID = jID + self.desc = desc + self.wellName = wellName + + def run(self): + # Emit the signal to add the widget to the main thread + self.add_widget_signal.emit( + self.layout, self.expID, self.jID, self.desc, self.wellName + ) + + +class RowDeletionWorkerThread(QThread): + """Searches for a row based on its ID and then + emits a signal to QFormLayout on the main thread for deletion""" + + removeRowSignal = Signal(int, str) + + def __init__(self, formLayout): + super().__init__() + self.formLayout = formLayout + + def set_new_instances(self, formLayout): + self.formLayout: QFormLayout = formLayout + + # we might deal with race conditions with a shrinking table + # find out widget and return its index + def find_widget_row_in_layout(self, strID): + layout: QFormLayout = self.formLayout + for idx in range(0, layout.rowCount()): + widgetItem = layout.itemAt(idx) + if widgetItem is not None: + name_widget = widgetItem.widget() + toolTip_string = str(name_widget.toolTip) + if strID in toolTip_string: + name_widget.setParent(None) + return idx + return -1 + + def run(self): + while True: + if len(ROW_POP_QUEUE) > 0: + stringID = ROW_POP_QUEUE.pop(0) + # Emit the signal to remove the row + deleteRow = self.find_widget_row_in_layout(stringID) + if deleteRow > -1: + self.removeRowSignal.emit(int(deleteRow), str(stringID)) + time.sleep(1) + else: + time.sleep(5) + + +class DropButton(QPushButton): + """A drag & drop PushButton to load model file(s)""" + + def __init__(self, text, parent=None, recon_tab: Ui_ReconTab_Form = None): + super().__init__(text, parent) + self.setAcceptDrops(True) + self.recon_tab = recon_tab + + def dragEnterEvent(self, event): + if event.mimeData().hasUrls(): + event.acceptProposedAction() + + def dropEvent(self, event): + files = [] + for url in event.mimeData().urls(): + filepath = url.toLocalFile() + files.append(filepath) + self.recon_tab.open_model_files(files) + + +class DropWidget(QWidget): + """A drag & drop widget container to load model file(s)""" + + def __init__(self, recon_tab: Ui_ReconTab_Form = None): + super().__init__() + self.setAcceptDrops(True) + self.recon_tab = recon_tab + + def dragEnterEvent(self, event): + if event.mimeData().hasUrls(): + event.acceptProposedAction() + + def dropEvent(self, event): + files = [] + for url in event.mimeData().urls(): + filepath = url.toLocalFile() + files.append(filepath) + self.recon_tab.open_model_files(files) + + +class ScrollableLabel(QScrollArea): + """A scrollable label widget used for Job entry""" + + def __init__(self, text, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.label = QLabel() + self.label.setWordWrap(True) + self.label.setText(text) + + layout = QVBoxLayout() + layout.setAlignment(Qt.AlignmentFlag.AlignTop) + layout.addWidget(self.label) + self.label.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + + container = QWidget() + container.setLayout(layout) + container.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + + self.setWidget(container) + self.setWidgetResizable(True) + self.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + self.setAlignment(Qt.AlignmentFlag.AlignTop) + + def setText(self, text): + self.label.setText(text) + + +class MyWidget(QWidget): + resized = Signal() + + def __init__(self): + super().__init__() + + def resizeEvent(self, event): + self.resized.emit() + super().resizeEvent(event) + + +class CollapsibleBox(QWidget): + """A collapsible widget""" + + def __init__(self, title="", parent=None, hasPydanticModel=False): + super(CollapsibleBox, self).__init__(parent) + + self.hasPydanticModel = hasPydanticModel + self.toggle_button = QToolButton( + text=title, checkable=True, checked=False + ) + self.toggle_button.setStyleSheet("QToolButton { border: none; }") + self.toggle_button.setToolButtonStyle( + QtCore.Qt.ToolButtonStyle.ToolButtonTextBesideIcon + ) + self.toggle_button.setArrowType(QtCore.Qt.ArrowType.RightArrow) + self.toggle_button.pressed.connect(self.on_pressed) + + self.toggle_animation = QtCore.QParallelAnimationGroup(self) + + self.content_area = QScrollArea(maximumHeight=0, minimumHeight=0) + self.content_area.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + self.content_area.setFrameShape(QFrame.Shape.NoFrame) + + lay = QVBoxLayout(self) + lay.setSpacing(0) + lay.setContentsMargins(0, 0, 0, 0) + lay.addWidget(self.toggle_button) + lay.addWidget(self.content_area) + + self.toggle_animation.addAnimation( + QtCore.QPropertyAnimation(self, b"minimumHeight") + ) + self.toggle_animation.addAnimation( + QtCore.QPropertyAnimation(self, b"maximumHeight") + ) + self.toggle_animation.addAnimation( + QtCore.QPropertyAnimation(self.content_area, b"maximumHeight") + ) + + def setNewName(self, name): + self.toggle_button.setText(name) + + # @QtCore.pyqtSlot() + def on_pressed(self): + checked = self.toggle_button.isChecked() + self.toggle_button.setArrowType( + QtCore.Qt.ArrowType.DownArrow + if not checked + else QtCore.Qt.ArrowType.RightArrow + ) + self.toggle_animation.setDirection( + QtCore.QAbstractAnimation.Direction.Forward + if not checked + else QtCore.QAbstractAnimation.Direction.Backward + ) + self.toggle_animation.start() + if checked and self.hasPydanticModel: + # do model verification on close + pass + + def setContentLayout(self, layout): + lay = self.content_area.layout() + del lay + self.content_area.setLayout(layout) + collapsed_height = ( + self.sizeHint().height() - self.content_area.maximumHeight() + ) + content_height = layout.sizeHint().height() + for i in range(self.toggle_animation.animationCount()): + animation = self.toggle_animation.animationAt(i) + animation.setDuration(500) + animation.setStartValue(collapsed_height) + animation.setEndValue(collapsed_height + content_height) + + content_animation = self.toggle_animation.animationAt( + self.toggle_animation.animationCount() - 1 + ) + content_animation.setDuration(500) + content_animation.setStartValue(0) + content_animation.setEndValue(content_height) + + +# VScode debugging +if __name__ == "__main__": + import napari + + napari.Viewer() + napari.run() diff --git a/waveorder/scripts/__init__.py b/waveorder/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/waveorder/scripts/launch_napari.py b/waveorder/scripts/launch_napari.py new file mode 100644 index 00000000..2131d742 --- /dev/null +++ b/waveorder/scripts/launch_napari.py @@ -0,0 +1,13 @@ +import napari + +from waveorder.plugin.main_widget import MainWidget + + +def main(): + viewer = napari.Viewer() + viewer.window.add_dock_widget(MainWidget(viewer)) + napari.run() + + +if __name__ == "__main__": + main() diff --git a/waveorder/scripts/repeat-cal-acq-rec.py b/waveorder/scripts/repeat-cal-acq-rec.py new file mode 100644 index 00000000..5fdea669 --- /dev/null +++ b/waveorder/scripts/repeat-cal-acq-rec.py @@ -0,0 +1,147 @@ +# This script can be modified to debug and test calibrations + +import random +import time +from contextlib import contextmanager + +import napari +from pycromanager import Core + +from waveorder.plugin.main_widget import MainWidget + +SAVE_DIR = "." +SWING = 0.05 +CAL_REPEATS = 3 +BKG_REPEATS = 3 + + +@contextmanager +def stage_detour(app: MainWidget, dx: float, dy: float, wait=5): + """Context manager to temporarily move the stage to a new XY-position. + + Parameters + ---------- + app : MainWidget + waveorder main widget instance + dx : float + relative x to translate + dy : float + relative y to translate + wait : int, optional + time to wait for the stage to complete movement, by default 5 + + Yields + ------ + MainWidget + waveorder main widget instance + + Usage + ----- + ```py + with stage_detour(app) as app: + pass # do something at the new location + ``` + """ + xy_stage = app.mmc.getXYStageDevice() + # get the original position + ox = app.mmc.getXPosition(xy_stage) + oy = app.mmc.getYPosition(xy_stage) + # go to a translated position + # TODO: args are floored due to a pycromanager bug: https://github.com/micro-manager/pycro-manager/issues/67 + app.mmc.setRelativeXYPosition(int(dx), int(dy)) + time.sleep(wait) + try: + yield app + finally: + # go back to the original position + # TODO: args are floored due to a pycromanager bug: https://github.com/micro-manager/pycro-manager/issues/67 + app.mmc.setXYPosition(int(ox), int(oy)) + time.sleep(wait) + + +def measure_fov(mmc: Core): + """Calculate the MM FOV in micrometers. + + Parameters + ---------- + mmc : Core + MMCore object via pycromanager (with CamelCase set to `True`) + + Returns + ------- + tuple[float, float] + FOV size (x, y) + """ + pixel_size = float(mmc.getPixelSizeUm()) + if pixel_size == 0: + float( + input( + "Pixel size is not calibrated. Please provide an estimate (in micrometers):" + ) + ) + fov_x = pixel_size * float(mmc.getImageWidth()) + fov_y = pixel_size * float(mmc.getImageHeight()) + return fov_x, fov_y + + +def rand_shift(length: float): + """Randomly signed shift of a certain length. + + Parameters + ---------- + length : float + absolote length in micrometers + + Returns + ------- + float + +length or -length + """ + sign = random.randint(0, 1) * 2 - 1 + return sign * length + + +def main(): + viewer = napari.Viewer() + app = MainWidget(viewer) + viewer.window.add_dock_widget(app) + app.ui.qbutton_gui_mode.click() + app.calib_scheme = "5-State" + app.directory = SAVE_DIR + app.save_directory = SAVE_DIR + + fov_x, fov_y = measure_fov(app.mmc) + + input("Please center the target in the FOV and hit ") + + for cal_repeat in range(CAL_REPEATS): + dx = rand_shift(fov_x) + dy = rand_shift(fov_y) + # run calibration + with stage_detour(app, dx, dy) as app: + print(f"Calibration repeat # {cal_repeat}") + app.swing = SWING + + print(f"Calibrating with swing = {SWING}") + app.run_calibration() + time.sleep(90) + + for bkg_repeat in range(BKG_REPEATS): + # capture background + with stage_detour(app, dx, dy) as app: + print(f">>> Background repeat # {bkg_repeat}") + app.last_calib_meta_file = app.calib.meta_file + app.capture_bg() + time.sleep(20) + app.ui.cb_bg_method.setCurrentIndex( + 1 + ) # Set to "Measured" bg correction + app.enter_bg_correction() + app.save_name = f"cal-{cal_repeat}-bkg-{bkg_repeat}" + app.enter_acq_bg_path() + app.acq_ret_ori() + time.sleep(15) + + +if __name__ == "__main__": + main() diff --git a/waveorder/scripts/repeat-calibration.py b/waveorder/scripts/repeat-calibration.py new file mode 100644 index 00000000..6b946973 --- /dev/null +++ b/waveorder/scripts/repeat-calibration.py @@ -0,0 +1,31 @@ +# This script can be modified to debug and test calibrations + +import time + +import napari + +from waveorder.plugin.main_widget import MainWidget + +SAVE_DIR = "./" +SWINGS = [0.1, 0.03, 0.01, 0.005] +REPEATS = 5 + + +def main(): + viewer = napari.Viewer() + waveorder = MainWidget(viewer) + viewer.window.add_dock_widget(waveorder) + waveorder.ui.qbutton_connect_to_mm.click() + waveorder.calib_scheme = "5-State" + + for repeat in range(REPEATS): + for swing in SWINGS: + print("Calibrating with swing = " + str(swing)) + waveorder.swing = swing + waveorder.directory = SAVE_DIR + waveorder.run_calibration() + time.sleep(100) + + +if __name__ == "__main__": + main() diff --git a/waveorder/scripts/samples.py b/waveorder/scripts/samples.py new file mode 100644 index 00000000..a1fb5c48 --- /dev/null +++ b/waveorder/scripts/samples.py @@ -0,0 +1,85 @@ +import shutil +from pathlib import Path +from typing import Literal + +from iohub import open_ome_zarr +from iohub.ngff import Plate +from napari.utils.notifications import show_warning +from platformdirs import user_data_dir +from wget import download + + +def _build_layer_list(dataset: Plate, layer_names: list[str]): + layer_list = [] + for channel_name in layer_names: + channel_index = dataset.channel_names.index(channel_name) + position = dataset["0/0/0"] + data = (position["0"][:, channel_index],) + layer_dict = {"name": channel_name, "scale": position.scale[3:]} + layer_list.append((data, layer_dict)) + + return layer_list + + +def download_and_unzip(data_type: Literal["target", "embryo"]) -> tuple[Path]: + """Downloads sample data .zip from zenodo, unzips, and returns Paths to the .zarr datasets. + + Skips the download if the files already exist. + + Uses platformdirs.user_data_dir to store data. + """ + + # Delete old data + old_data_dirs = ["waveorder-sample-v1.4"] + for old_data_dir in old_data_dirs: + old_data_path = Path(user_data_dir(old_data_dir)) + if old_data_path.exists(): + shutil.rmtree(str(old_data_path)) + + temp_dirpath = Path(user_data_dir("waveorder-sample-v1.5")) + temp_dirpath.mkdir(exist_ok=True, parents=True) + + if data_type == "target": + data_dirpath = temp_dirpath / "sample_contribution" + data_size = "10 MB" + data_url = "https://zenodo.org/record/8386856/files/sample_contribution.zip?download=1" + elif data_type == "embryo": + data_dirpath = temp_dirpath / "sample_contribution_embryo" + data_size = "92 MB" + data_url = "https://zenodo.org/record/8386856/files/sample_contribution_embryo.zip?download=1" + + if not data_dirpath.with_suffix(".zip").exists(): + show_warning( + f"Downloading {data_size} sample contribution. This might take a moment..." + ) + download(data_url, out=str(temp_dirpath)) + + if not data_dirpath.exists(): + shutil.unpack_archive( + data_dirpath.with_suffix(".zip"), extract_dir=temp_dirpath + ) + + data_path = data_dirpath / "raw_data.zarr" + recon_path = data_dirpath / "reconstruction.zarr" + return data_path, recon_path + + +def read_polarization_target_data(): + """Returns the polarization data sample contribution""" + data_path, _ = download_and_unzip("target") + dataset = open_ome_zarr(data_path) + return _build_layer_list(dataset, dataset.channel_names) + + +def read_polarization_target_reconstruction(): + """Returns the polarization target reconstruction sample contribution""" + _, recon_path = download_and_unzip("target") + dataset = open_ome_zarr(recon_path) + return _build_layer_list(dataset, ["Phase3D", "Retardance", "Orientation"]) + + +def read_zebrafish_embryo_reconstruction(): + """Returns the embryo reconstruction sample contribution""" + _, recon_path = download_and_unzip("embryo") + dataset = open_ome_zarr(recon_path) + return _build_layer_list(dataset, ["Retardance", "Orientation"]) diff --git a/waveorder/scripts/simulate_zarr_acq.py b/waveorder/scripts/simulate_zarr_acq.py new file mode 100644 index 00000000..109c3c5c --- /dev/null +++ b/waveorder/scripts/simulate_zarr_acq.py @@ -0,0 +1,207 @@ +import os +import shutil +import subprocess +import threading +import time +from pathlib import Path + +from iohub.convert import TIFFConverter +from iohub.ngff import open_ome_zarr + +from waveorder.cli import jobs_mgmt +from waveorder.cli.utils import create_empty_hcs_zarr + +# This script is a demo .zarr acquisition simulation from an acquired .zarr store +# The script copies and writes additional metadata to .zattrs inserting two keys +# The two keys are "FinalDimensions" and "CurrentDimensions". +# The "FinalDimensions" key with (t,p,z,c) needs to be inserted when the dataset is created +# and then should be updated at close to ensure aborted acquisitions represent correct dimensions. +# The "CurrentDimensions" key should have the same (t,p,z,c) information and should be written out +# either with every new image, end of dimension OR at frequent intervals. +# Refer further notes below in the example regarding encountered issues. +# +# Refer to steps at the end of the file on steps to run this file + + +# %% ############################################# +def convert_data( + tif_path, latest_out_path, prefix="", data_type_str="ometiff" +): + converter = TIFFConverter( + os.path.join(tif_path, prefix), + latest_out_path, + data_type=data_type_str, + grid_layout=False, + ) + converter.run() + + +def run_convert(ome_tif_path): + out_path = os.path.join( + Path(ome_tif_path).parent.absolute(), + ("raw_" + Path(ome_tif_path).name + ".zarr"), + ) + convert_data(ome_tif_path, out_path) + + +# %% ############################################# + + +def run_acq(input_path="", waitBetweenT=30): + + output_store_path = os.path.join( + Path(input_path).parent.absolute(), + ("acq_sim_" + Path(input_path).name), + ) + + if Path(output_store_path).exists(): + shutil.rmtree(output_store_path) + time.sleep(1) + + input_data = open_ome_zarr(input_path, mode="r") + channel_names = input_data.channel_names + + position_keys: list[tuple[str]] = [] + + for path, pos in input_data.positions(): + shape = pos["0"].shape + dtype = pos["0"].dtype + chunks = pos["0"].chunks + scale = (1, 1, 1, 1, 1) + position_keys.append(path.split("/")) + + create_empty_hcs_zarr( + output_store_path, + position_keys, + shape, + chunks, + scale, + channel_names, + dtype, + {}, + ) + output_dataset = open_ome_zarr(output_store_path, mode="r+") + + if "Summary" in input_data.zattrs.keys(): + output_dataset.zattrs["Summary"] = input_data.zattrs["Summary"] + + output_dataset.zattrs.update( + { + "FinalDimensions": { + "channel": shape[1], + "position": len(position_keys), + "time": shape[0], + "z": shape[2], + } + } + ) + + total_time = shape[0] + total_pos = len(position_keys) + total_z = shape[2] + total_c = shape[1] + for t in range(total_time): + for p in range(total_pos): + for z in range(total_z): + for c in range(total_c): + position_key_string = "/".join(position_keys[p]) + img_src = input_data[position_key_string][0][t, c, z] + + img_data = output_dataset[position_key_string][0] + img_data[t, c, z] = img_src + + # Note: On-The-Fly dataset reconstruction will throw Permission Denied when being written + # Maybe we can read the zaatrs directly in that case as a file which is less blocking + # If this write/read is a constant issue then the zattrs 'CurrentDimensions' key + # should be updated less frequently, instead of current design of updating with + # each image + output_dataset.zattrs.update( + { + "CurrentDimensions": { + "channel": total_c, + "position": p + 1, + "time": t + 1, + "z": z + 1, + } + } + ) + + required_order = ["time", "position", "z", "channel"] + my_dict = output_dataset.zattrs["CurrentDimensions"] + sorted_dict_acq = { + k: my_dict[k] + for k in sorted(my_dict, key=lambda x: required_order.index(x)) + } + print("Writer thread - Acquisition Dim:", sorted_dict_acq) + + # reconThread = threading.Thread(target=doReconstruct, args=(output_store_path, t)) + # reconThread.start() + + time.sleep(waitBetweenT) # sleep after every t + + output_dataset.close + + +def do_reconstruct(input_path, time_point): + + config_path = os.path.join( + Path(input_path).parent.absolute(), "Bire-" + str(time_point) + ".yml" + ) + output_path = os.path.join( + Path(input_path).parent.absolute(), "Recon_" + Path(input_path).name + ) + mainfp = str(jobs_mgmt.FILE_PATH) + + print( + "Processing {input} time_point={tp}".format( + input=input_path, tp=time_point + ) + ) + + try: + proc = subprocess.run( + [ + "python", + mainfp, + "reconstruct", + "-i", + input_path, + "-c", + config_path, + "-o", + output_path, + "-rx", + str(20), + ] + ) + if proc.returncode != 0: + raise Exception( + "An error occurred in processing ! Check terminal output." + ) + except Exception as exc: + print(exc.args) + + +# %% ############################################# +def run_acquire(input_path, waitBetweenT): + runThread1Acq = threading.Thread( + target=run_acq, args=(input_path, waitBetweenT) + ) + runThread1Acq.start() + + +# %% ############################################# +# Step 1: +# Convert an existing ome-tif waveorder acquisition, preferably with all dims (t, p, z, c) +# This will convert an existing ome-tif to a .zarr storage + +# ome_tif_path = "/ome-zarr_data/waveorderAcq/test/snap_6D_ometiff_1" +# runConvert(ome_tif_path) + +# %% ############################################# +# Step 2: +# run the test to simulate Acquiring a waveorder .zarr store + +input_path = "/ome-zarr_data/waveorderAcq/test/raw_snap_6D_ometiff_1.zarr" +waitBetweenT = 60 +run_acquire(input_path, waitBetweenT)