From 3db6dc8ee11b1f270e45a4c99bb1ffbf102f3f9f Mon Sep 17 00:00:00 2001 From: Wim Pomp Date: Mon, 3 Feb 2025 15:33:32 +0100 Subject: [PATCH] - start rust rewrite --- .github/workflows/pytest.yml | 22 - .gitignore | 84 +- Cargo.lock | 816 +++++++++++++++ Cargo.toml | 33 + README.md | 94 +- build.rs | 28 + ndbioimage/__init__.py | 1370 -------------------------- ndbioimage/jvm.py | 77 -- ndbioimage/readers/__init__.py | 1 - ndbioimage/readers/bfread.py | 208 ---- ndbioimage/readers/cziread.py | 606 ------------ ndbioimage/readers/fijiread.py | 59 -- ndbioimage/readers/metaseriesread.py | 80 -- ndbioimage/readers/ndread.py | 53 - ndbioimage/readers/seqread.py | 146 --- ndbioimage/readers/tifread.py | 85 -- ndbioimage/transform.txt | 7 - ndbioimage/transforms.py | 458 --------- pyproject.toml | 67 +- src/bioformats.rs | 142 +++ src/lib.rs | 366 +++++++ src/py.rs | 14 + 22 files changed, 1487 insertions(+), 3329 deletions(-) delete mode 100644 .github/workflows/pytest.yml create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 build.rs delete mode 100755 ndbioimage/__init__.py delete mode 100644 ndbioimage/jvm.py delete mode 100644 ndbioimage/readers/__init__.py delete mode 100644 ndbioimage/readers/bfread.py delete mode 100644 ndbioimage/readers/cziread.py delete mode 100644 ndbioimage/readers/fijiread.py delete mode 100644 ndbioimage/readers/metaseriesread.py delete mode 100644 ndbioimage/readers/ndread.py delete mode 100644 ndbioimage/readers/seqread.py delete mode 100644 ndbioimage/readers/tifread.py delete mode 100644 ndbioimage/transform.txt delete mode 100644 ndbioimage/transforms.py create mode 100644 src/bioformats.rs create mode 100644 src/lib.rs create mode 100644 src/py.rs diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml deleted file mode 100644 index a0500dc..0000000 --- a/.github/workflows/pytest.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: PyTest - -on: [workflow_call, push, pull_request] - -jobs: - pytest: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.10", "3.11", "3.12"] - - steps: - - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - pip install .[test] - - name: Test with pytest - run: pytest \ No newline at end of file diff --git a/.gitignore b/.gitignore index 54248cb..c8f0442 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,72 @@ -._* -*.pyc -/build/ -*.egg-info -/venv/ -.idea -/.pytest_cache/ -/ndbioimage/_version.py -/ndbioimage/jars -/tests/files/* -/poetry.lock -/dist/ +/target + +# Byte-compiled / optimized / DLL files +__pycache__/ +.pytest_cache/ +*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +.venv/ +env/ +bin/ +build/ +develop-eggs/ +dist/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +include/ +man/ +venv/ +*.egg-info/ +.installed.cfg +*.egg + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt +pip-selfcheck.json + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# Rope +.ropeproject + +# Django stuff: +*.log +*.pot + +.DS_Store + +# Sphinx documentation +docs/_build/ + +# PyCharm +.idea/ + +# VSCode +.vscode/ + +# Pyenv +.python-version diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..a4f8a09 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,816 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "anyhow" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "cc" +version = "1.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf" +dependencies = [ + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + +[[package]] +name = "itoa" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" + +[[package]] +name = "j4rs" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dacf87fdd07b36f3124894a5bf20c8d418aaa28d4974d717672507f6d05cd31c" +dependencies = [ + "cesu8", + "dunce", + "fs_extra", + "futures", + "java-locator", + "jni-sys", + "lazy_static", + "libc", + "libloading", + "log", + "serde", + "serde_json", +] + +[[package]] +name = "java-locator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c46c1fe465c59b1474e665e85e1256c3893dd00927b8d55f63b09044c1e64f" +dependencies = [ + "glob", +] + +[[package]] +name = "jni-sys" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c30a312d782b8d56a1e0897d45c1af33f31f9b4a4d13d31207a8675e0223b818" +dependencies = [ + "jni-sys-macros", +] + +[[package]] +name = "jni-sys-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c199962dfd5610ced8eca382606e349f7940a4ac7d867b58a046123411cbb4" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" + +[[package]] +name = "libloading" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "log" +version = "0.4.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" + +[[package]] +name = "matrixmultiply" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" +dependencies = [ + "autocfg", + "rawpointer", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "ndarray" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" +dependencies = [ + "matrixmultiply", + "num-complex", + "num-integer", + "num-traits", + "portable-atomic", + "portable-atomic-util", + "rawpointer", +] + +[[package]] +name = "ndbioimage_rs" +version = "2025.1.0" +dependencies = [ + "anyhow", + "j4rs", + "ndarray", + "num", + "numpy", + "pyo3", + "rayon", + "retry", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "numpy" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94caae805f998a07d33af06e6a3891e38556051b8045c615470a71590e13e78" +dependencies = [ + "libc", + "ndarray", + "num-complex", + "num-integer", + "num-traits", + "pyo3", + "rustc-hash", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "portable-atomic" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pyo3" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57fe09249128b3173d092de9523eaa75136bf7ba85e0d69eca241c7939c933cc" +dependencies = [ + "anyhow", + "cfg-if", + "indoc", + "libc", + "memoffset", + "once_cell", + "portable-atomic", + "pyo3-build-config", + "pyo3-ffi", + "pyo3-macros", + "unindent", +] + +[[package]] +name = "pyo3-build-config" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd3927b5a78757a0d71aa9dff669f903b1eb64b54142a9bd9f757f8fde65fd7" +dependencies = [ + "once_cell", + "python3-dll-a", + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dab6bb2102bd8f991e7749f130a70d05dd557613e39ed2deeee8e9ca0c4d548d" +dependencies = [ + "libc", + "pyo3-build-config", +] + +[[package]] +name = "pyo3-macros" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91871864b353fd5ffcb3f91f2f703a22a9797c91b9ab497b1acac7b07ae509c7" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43abc3b80bc20f3facd86cd3c60beed58c3e2aa26213f3cda368de39c60a27e4" +dependencies = [ + "heck", + "proc-macro2", + "pyo3-build-config", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "python3-dll-a" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b66f9171950e674e64bad3456e11bb3cca108e5c34844383cfe277f45c8a7a8" +dependencies = [ + "cc", +] + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "retry" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4" +dependencies = [ + "rand", +] + +[[package]] +name = "rustc-hash" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" + +[[package]] +name = "ryu" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" + +[[package]] +name = "serde" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "serde_json" +version = "1.0.138" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + +[[package]] +name = "unicode-ident" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" + +[[package]] +name = "unindent" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..71257e5 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "ndbioimage_rs" +version = "2025.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +name = "ndbioimage_rs" +crate-type = ["cdylib"] + +[dependencies] +anyhow = "1.0.95" +j4rs = "0.22.0" +ndarray = "0.16.1" +num = "0.4.3" +numpy = { version = "0.23.0", optional = true } + +[dependencies.pyo3] +version = "0.23.4" +features = ["extension-module", "abi3-py310", "generate-import-lib", "anyhow"] +optional = true + +[dev-dependencies] +rayon = "1.10.0" + +[build-dependencies] +j4rs = { version = "0.22", features = [] } +retry = { version = "2.0.0"} +anyhow = { version = "1.0.95"} + +[features] +python = ["dep:pyo3", "dep:numpy"] +gpl-formats = [] \ No newline at end of file diff --git a/README.md b/README.md index b037443..48d8296 100644 --- a/README.md +++ b/README.md @@ -1,94 +1,4 @@ [![Pytest](https://github.com/wimpomp/ndbioimage/actions/workflows/pytest.yml/badge.svg)](https://github.com/wimpomp/ndbioimage/actions/workflows/pytest.yml) -# ndbioimage - -Exposes (bio) images as a numpy ndarray-like object, but without loading the whole -image into memory, reading from the file only when needed. Some metadata is read -and stored in an [ome](https://genomebiology.biomedcentral.com/articles/10.1186/gb-2005-6-5-r47) structure. -Additionally, it can automatically calculate an affine transform that corrects for chromatic aberrations etc. and apply -it on the fly to the image. - -Currently, it supports imagej tif files, czi files, micromanager tif sequences and anything -[bioformats](https://www.openmicroscopy.org/bio-formats/) can handle. - -## Installation - -``` -pip install ndbioimage -``` - -### Installation with option to write mp4 or mkv: -Work in progress! Make sure ffmpeg is installed. - -``` -pip install ndbioimage[write] -``` - -## Usage -### Python - -- Reading an image file and plotting the frame at channel=2, time=1 - -``` -import matplotlib.pyplot as plt -from ndbioimage import Imread -with Imread('image_file.tif', axes='ctyx', dtype=int) as im: - plt.imshow(im[2, 1]) -``` - -- Showing some image metadata - -``` -from ndbioimage import Imread -from pprint import pprint -with Imread('image_file.tif') as im: - pprint(im) -``` - -- Slicing the image without loading the image into memory - -``` -from ndbioimage import Imread -with Imread('image_file.tif', axes='cztyx') as im: - sliced_im = im[1, :, :, 100:200, 100:200] -``` - -sliced_im is an instance of Imread which will load any image data from file only when needed - - -- Converting (part) of the image to a numpy ndarray - -``` -from ndbioimage import Imread -import numpy as np -with Imread('image_file.tif', axes='cztyx') as im: - array = np.asarray(im[0, 0]) -``` - -### Command line -```ndbioimage --help```: show help -```ndbioimage image```: show metadata about image -```ndbioimage image -w {name}.tif -r```: copy image into image.tif (replacing {name} with image), while registering channels -```ndbioimage image -w image.mp4 -C cyan lime red``` copy image into image.mp4 (z will be max projected), make channel colors cyan lime and red - -## Adding more formats -Readers for image formats subclass AbstractReader. When an image reader is imported, Imread will -automatically recognize it and use it to open the appropriate file format. Image readers -are required to implement the following methods: - -- staticmethod _can_open(path): return True if path can be opened by this reader -- \_\_frame__(self, c, z, t): return the frame at channel=c, z-slice=z, time=t from the file - -Optional methods: -- get_ome: reads metadata from file and adds them to an OME object imported -from the ome-types library -- open(self): maybe open some file handle -- close(self): close any file handles - -Optional fields: -- priority (int): Imread will try readers with a lower number first, default: 99 -- do_not_pickle (strings): any attributes that should not be included when the object is pickled, -for example: any file handles - -# TODO -- more image formats +# Work in progress +Rust rewrite of python version diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..0f67075 --- /dev/null +++ b/build.rs @@ -0,0 +1,28 @@ +// copied from https://github.com/AzHicham/bioformats-rs + +use j4rs::{errors::J4RsError, JvmBuilder, MavenArtifact, MavenArtifactRepo, MavenSettings}; +use retry::{delay, delay::Exponential, retry}; + +fn main() -> anyhow::Result<()> { + println!("cargo:rerun-if-changed=build.rs"); + + Ok(retry( + Exponential::from_millis(1000).map(delay::jitter).take(4), + deploy_java_artifacts, + )?) +} + +fn deploy_java_artifacts() -> Result<(), J4RsError> { + let jvm = JvmBuilder::new() + .with_maven_settings(MavenSettings::new(vec![MavenArtifactRepo::from( + "openmicroscopy::https://artifacts.openmicroscopy.org/artifactory/ome.releases", + )])) + .build()?; + + jvm.deploy_artifact(&MavenArtifact::from("ome:bioformats_package:8.0.1"))?; + + #[cfg(feature = "gpl-formats")] + jvm.deploy_artifact(&MavenArtifact::from("ome:formats-gpl:8.0.1"))?; + + Ok(()) +} diff --git a/ndbioimage/__init__.py b/ndbioimage/__init__.py deleted file mode 100755 index 272268c..0000000 --- a/ndbioimage/__init__.py +++ /dev/null @@ -1,1370 +0,0 @@ -from __future__ import annotations - -import multiprocessing -import os -import re -import warnings -from abc import ABC, ABCMeta, abstractmethod -from argparse import ArgumentParser -from collections import OrderedDict -from datetime import datetime -from functools import cached_property, wraps -from importlib.metadata import version -from itertools import product -from operator import truediv -from pathlib import Path -from traceback import print_exc -from typing import Any, Callable, Generator, Iterable, Optional, Sequence, TypeVar - -import numpy as np -import yaml -from numpy.typing import ArrayLike, DTypeLike -from ome_types import OME, from_xml, model, ureg -from pint import set_application_registry -from tiffwrite import FrameInfo, IJTiffParallel -from tqdm.auto import tqdm - -from .jvm import JVM, JVMException -from .transforms import Transform, Transforms - -try: - __version__ = version(Path(__file__).parent.name) -except Exception: # noqa - __version__ = 'unknown' - -try: - with open(Path(__file__).parent.parent / '.git' / 'HEAD') as g: - head = g.read().split(':')[1].strip() - with open(Path(__file__).parent.parent / '.git' / head) as h: - __git_commit_hash__ = h.read().rstrip('\n') -except Exception: # noqa - __git_commit_hash__ = 'unknown' - -ureg.default_format = '~P' -set_application_registry(ureg) -warnings.filterwarnings('ignore', 'Reference to unknown ID') -Number = int | float | np.integer | np.floating - - -class ReaderNotFoundError(Exception): - pass - - -class TransformTiff(IJTiffParallel): - """ transform frames in a parallel process to speed up saving """ - def __init__(self, image: Imread, *args: Any, **kwargs: Any) -> None: - self.image = image - super().__init__(*args, **kwargs) - - def parallel(self, frame: tuple[int, int, int]) -> tuple[FrameInfo]: - return (np.asarray(self.image(*frame)), 0, 0, 0), - - -class DequeDict(OrderedDict): - def __init__(self, maxlen: int = None, *args: Any, **kwargs: Any) -> None: - self.maxlen = maxlen - super().__init__(*args, **kwargs) - - def __setitem__(self, *args: Any, **kwargs: Any) -> None: - super().__setitem__(*args, **kwargs) - self.truncate() - - def truncate(self) -> None: - if self.maxlen is not None: - while len(self) > self.maxlen: - self.popitem(False) - - def update(self, *args: Any, **kwargs: Any) -> None: - super().update(*args, **kwargs) # type: ignore - self.truncate() - - -def find(obj: Sequence[Any], **kwargs: Any) -> Any: - for item in obj: - try: - if all([getattr(item, key) == value for key, value in kwargs.items()]): - return item - except AttributeError: - pass - - -R = TypeVar('R') - - -def try_default(fun: Callable[..., R], default: Any, *args: Any, **kwargs: Any) -> R: - try: - return fun(*args, **kwargs) - except Exception: # noqa - return default - - -def bioformats_ome(path: str | Path) -> OME: - from .readers.bfread import jars - try: - jvm = JVM(jars) # noqa - ome_meta = jvm.metadata_tools.createOMEXMLMetadata() - reader = jvm.image_reader() - reader.setMetadataStore(ome_meta) - reader.setId(str(path)) - ome = from_xml(str(ome_meta.dumpXML()), parser='lxml') - except Exception: # noqa - print_exc() - ome = model.OME() - finally: - jvm.kill_vm() # noqa - return ome - - -class Shape(tuple): - def __new__(cls, shape: Sequence[int] | Shape, axes: str = 'yxczt') -> Shape: - if isinstance(shape, Shape): - axes = shape.axes # type: ignore - new = super().__new__(cls, shape) - new.axes = axes.lower() - return new # type: ignore - - def __getitem__(self, n: int | str) -> int | tuple[int]: - if isinstance(n, str): - if len(n) == 1: - return self[self.axes.find(n.lower())] if n.lower() in self.axes else 1 - else: - return tuple(self[i] for i in n) # type: ignore - return super().__getitem__(n) - - @cached_property - def yxczt(self) -> tuple[int, int, int, int, int]: - return tuple(self[i] for i in 'yxczt') # type: ignore - - -class OmeCache(DequeDict): - """ prevent (potentially expensive) rereading of ome data by caching """ - - instance = None - - def __new__(cls) -> OmeCache: - if cls.instance is None: - cls.instance = super().__new__(cls) - return cls.instance - - def __init__(self) -> None: - super().__init__(64) - - def __reduce__(self) -> tuple[type, tuple]: - return self.__class__, () - - def __getitem__(self, path: Path | str | tuple) -> OME: - if isinstance(path, tuple): - return super().__getitem__(path) - else: - return super().__getitem__(self.path_and_lstat(path)) - - def __setitem__(self, path: Path | str | tuple, value: OME) -> None: - if isinstance(path, tuple): - super().__setitem__(path, value) - else: - super().__setitem__(self.path_and_lstat(path), value) - - def __contains__(self, path: Path | str | tuple) -> bool: - if isinstance(path, tuple): - return super().__contains__(path) - else: - return super().__contains__(self.path_and_lstat(path)) - - @staticmethod - def path_and_lstat(path: str | Path) -> tuple[Path, Optional[os.stat_result], Optional[os.stat_result]]: - path = Path(path) - return (path, (path.lstat() if path.exists() else None), - (path.with_suffix('.ome.xml').lstat() if path.with_suffix('.ome.xml').exists() else None)) - - -def get_positions(path: str | Path) -> Optional[list[int]]: - subclass = AbstractReader.get_subclass(path) - return subclass.get_positions(AbstractReader.split_path_series(path)[0]) - - -class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): - """ class to read image files, while taking good care of important metadata, - currently optimized for .czi files, but can open anything that bioformats can handle - path: path to the image file - optional: - axes: order of axes, default: cztyx, but omitting any axes with lenght 1 - dtype: datatype to be used when returning frames - - modify images on the fly with a decorator function: - define a function which takes an instance of this object, one image frame, - and the coordinates c, z, t as arguments, and one image frame as return - >> imread.frame_decorator = fun - then use imread as usually - - Examples: - >> im = Imread('/path/to/file.image', axes='czt) - >> im - << shows summary - >> im.shape - << (15, 26, 1000, 1000) - >> im.axes - << 'ztyx' - >> plt.imshow(im[1, 0]) - << plots frame at position z=1, t=0 (python type indexing) - >> plt.imshow(im[:, 0].max('z')) - << plots max-z projection at t=0 - >> im.pxsize_um - << 0.09708737864077668 image-plane pixel size in um - >> im.laserwavelengths - << [642, 488] - >> im.laserpowers - << [0.02, 0.0005] in % - - See __init__ and other functions for more ideas. - - Subclassing: - Subclass AbstractReader to add more file types. A subclass should always have at least the following - methods: - staticmethod _can_open(path): returns True when the subclass can open the image in path - __frame__(self, c, z, t): this should return a single frame at channel c, slice z and time t - optional open(self): code to be run during initialization, e.g. to open a file handle - optional close(self): close the file in a proper way - optional class field priority: subclasses with lower priority will be tried first, default = 99 - optional get_ome(self) -> OME: return an OME structure with metadata, - if not present bioformats will be used to generate an OME - Any other method can be overridden as needed - """ - - isclosed: Optional[bool] - channel_names: Optional[list[str]] - series: Optional[int] - pxsize_um: Optional[float] - deltaz_um: Optional[float] - exposuretime_s: Optional[list[float]] - timeinterval: Optional[float] - binning: Optional[list[int]] - laserwavelengths: Optional[list[tuple[float]]] - laserpowers: Optional[list[tuple[float]]] - objective: Optional[model.Objective] - magnification: Optional[float] - tubelens: Optional[model.Objective] - filter: Optional[str] - powermode: Optional[str] - collimator: Optional[str] - tirfangle: Optional[list[float]] - gain: Optional[list[float]] - pcf: Optional[list[float]] - path: Path - __frame__: Callable[[int, int, int], np.ndarray] - - @staticmethod - def get_subclass(path: Path | str | Any): - if len(AbstractReader.__subclasses__()) == 0: - raise Exception('Restart python kernel please!') - path, _ = AbstractReader.split_path_series(path) - for subclass in sorted(AbstractReader.__subclasses__(), key=lambda subclass_: subclass_.priority): - if subclass._can_open(path): # noqa - do_not_pickle = (AbstractReader.do_not_pickle,) if isinstance(AbstractReader.do_not_pickle, str) \ - else AbstractReader.do_not_pickle - subclass_do_not_pickle = (subclass.do_not_pickle,) if isinstance(subclass.do_not_pickle, str) \ - else subclass.do_not_pickle if hasattr(subclass, 'do_not_pickle') else () - subclass.do_not_pickle = set(do_not_pickle).union(set(subclass_do_not_pickle)) - return subclass - raise ReaderNotFoundError(f'No reader found for {path}.') - - - def __new__(cls, path: Path | str | Imread | Any = None, dtype: DTypeLike = None, axes: str = None) -> Imread: - if cls is not Imread: - return super().__new__(cls) - if isinstance(path, Imread): - return path - subclass = cls.get_subclass(path) - do_not_pickle = (AbstractReader.do_not_pickle,) if isinstance(AbstractReader.do_not_pickle, str) \ - else AbstractReader.do_not_pickle - subclass_do_not_pickle = (subclass.do_not_pickle,) if isinstance(subclass.do_not_pickle, str) \ - else subclass.do_not_pickle if hasattr(subclass, 'do_not_pickle') else () - subclass.do_not_pickle = set(do_not_pickle).union(set(subclass_do_not_pickle)) - return super().__new__(subclass) - - def __init__(self, *args: Any, **kwargs: Any): - def parse(base: Imread = None, # noqa - slice: tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] = None, # noqa - shape: tuple[int, ...] = (0, 0, 0, 0, 0), # noqa - dtype: DTypeLike = None, # noqa - frame_decorator: Callable[[Imread, np.ndarray, int, int, int], np.ndarray] = None # noqa - ) -> tuple[Any, ...]: - return base, slice, shape, dtype, frame_decorator - - base, slice, shape, dtype, frame_decorator = parse(*args, **kwargs) # noqa - self.base = base or self - self.slice = slice - self._shape = Shape(shape) - self.dtype = dtype - self.frame_decorator = frame_decorator - self.transform = Transforms() - self.flags = dict(C_CONTIGUOUS=False, F_CONTIGUOUS=False, OWNDATA=False, WRITEABLE=False, - ALIGNED=False, WRITEBACKIFCOPY=False, UPDATEIFCOPY=False) - - def __call__(self, c: int = None, z: int = None, t: int = None, x: int = None, y: int = None) -> np.ndarray: - """ same as im[] but allowing keyword axes, but slices need to made with slice() or np.s_ """ - return self[{k: slice(v) if v is None else v for k, v in dict(c=c, z=z, t=t, x=x, y=y).items()}] - - def __copy__(self) -> Imread: - return self.copy() - - def __contains__(self, item: Number) -> bool: - def unique_yield(a: Iterable[Any], b: Iterable[Any]) -> Generator[Any, None, None]: - for k in a: - yield k - for k in b: - if k not in a: - yield k - - for idx in unique_yield([key[:3] for key in self.cache.keys()], - product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t']))): - yxczt = (slice(None), slice(None)) + idx - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - if item in np.asarray(self[in_idx]): - return True - return False - - def __enter__(self) -> Imread: - return self - - def __exit__(self, *args: Any, **kwargs: Any) -> None: - if not self.isclosed: - self.isclosed = True - if hasattr(self, 'close'): - self.close() - - def __getitem__(self, n: int | Sequence[int] | Sequence[slice] | slice | type(Ellipsis) | - dict[str, int | Sequence[int] | Sequence[slice] | slice | type(Ellipsis)] - ) -> Number | Imread | np.ndarray: - """ slice like a numpy array but return an Imread instance """ - if self.isclosed: - raise OSError('file is closed') - if isinstance(n, (slice, Number)): # None = : - n = (n,) - elif isinstance(n, type(Ellipsis)): - n = (None,) * len(self.axes) - elif isinstance(n, dict): # allow im[dict(z=0)] etc. - n = [n.get(i, slice(None)) for i in self.axes] - n = list(n) - - # deal with ... - ell = [i for i, e in enumerate(n) if isinstance(e, type(Ellipsis))] - if len(ell) > 1: - raise IndexError('an index can only have a single ellipsis (...)') - if len(ell): - if len(n) > self.ndim: - n.remove(Ellipsis) - else: - n[ell[0]] = None - while len(n) < self.ndim: - n.insert(ell[0], None) - while len(n) < self.ndim: - n.append(None) - - axes_idx = [self.shape.axes.find(i) for i in 'yxczt'] - n = [n[j] if 0 <= j < len(n) else None for j in axes_idx] # reorder n - - new_slice = [] - for s, e in zip(self.slice, n): - if e is None: - new_slice.append(s) - else: - new_slice.append(s[e]) - - # TODO: check output dimensionality when requested shape in some dimension is 1 - if all([isinstance(s, Number) or a < 0 and s.size == 1 for s, a in zip(new_slice, axes_idx)]): - return self.block(*new_slice).item() - else: - new = View(self) - new.slice = new_slice - new._shape = Shape([1 if isinstance(s, Number) else len(s) for s in new_slice]) - new.axes = ''.join(j for j in self.axes if j in [i for i, s in zip('yxczt', new_slice) - if not isinstance(s, Number)]) - return new - - def __getstate__(self) -> dict[str: Any]: - return ({key: value for key, value in self.__dict__.items() if key not in self.do_not_pickle} | - {'cache_size': self.cache.maxlen}) - - def __len__(self) -> int: - return self.shape[0] - - def __repr__(self) -> str: - return self.summary - - def __setstate__(self, state: dict[str, Any]) -> None: - """ What happens during unpickling """ - self.__dict__.update({key: value for key, value in state.items() if key != 'cache_size'}) - if isinstance(self, AbstractReader): - self.open() - self.cache = DequeDict(state.get('cache_size', 16)) - - def __str__(self) -> str: - return str(self.path) - - # @property - # def __array_interface__(self) -> dict[str, Any]: - # return dict(shape=tuple(self.shape), typestr=self.dtype.str, version=3, data=self.tobytes()) - - def tobytes(self) -> bytes: - return self.flatten().tobytes() - - def __array__(self, dtype: DTypeLike = None, copy: bool = None) -> np.ndarray: - if copy is False: - raise ValueError("`copy=False` isn't supported. A copy is always created.") - block = self.block(*self.slice) - axes_idx = [self.shape.axes.find(i) for i in 'yxczt'] - axes_squeeze = tuple({i for i, j in enumerate(axes_idx) if j == -1}.union( - {i for i, j in enumerate(self.slice) if isinstance(j, Number)})) - block = block.squeeze(axes_squeeze) - if dtype is not None: - block = block.astype(dtype) - if block.ndim == 0: - return block.item() - axes = ''.join(j for i, j in enumerate('yxczt') if i not in axes_squeeze) - return block.transpose([axes.find(i) for i in self.shape.axes if i in axes]) - - def __array_arg_fun__(self, fun: Callable[[ArrayLike, Optional[int]], Number | np.ndarray], - axis: int | str = None, out: np.ndarray = None) -> Number | np.ndarray: - """ frame-wise application of np.argmin and np.argmax """ - if axis is None: - value = arg = None - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): - yxczt = (slice(None), slice(None)) + idx - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - new = np.asarray(self[in_idx]) - new_arg = np.unravel_index(fun(new), new.shape) # type: ignore - new_value = new[new_arg] - if value is None: - arg = new_arg + idx - value = new_value - else: - i = fun((value, new_value)) # type: ignore - arg = (arg, new_arg + idx)[i] - value = (value, new_value)[i] - axes = ''.join(i for i in self.axes if i in 'yx') + 'czt' - arg = np.ravel_multi_index([arg[axes.find(i)] for i in self.axes], self.shape) - if out is None: - return arg - else: - out.itemset(arg) - return out - else: - if isinstance(axis, str): - axis_str, axis_idx = axis, self.axes.index(axis) - else: - axis_str, axis_idx = self.axes[axis], axis - if axis_str not in self.axes: - raise IndexError(f'Axis {axis_str} not in {self.axes}.') - out_shape = list(self.shape) - out_axes = list(self.axes) - out_shape.pop(axis_idx) - out_axes.pop(axis_idx) - if out is None: - out = np.zeros(out_shape, int) - if axis_str in 'yx': - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): - yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - new = self[in_idx] - out[out_idx] = fun(np.asarray(new), new.axes.find(axis_str)) - else: - value = np.zeros(out.shape, self.dtype) - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): - yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - new_value = self[in_idx] - new_arg = np.full_like(new_value, idx['czt'.find(axis_str)]) - if idx['czt'.find(axis_str)] == 0: - value[out_idx] = new_value - out[out_idx] = new_arg - else: - old_value = value[out_idx] - i = fun((old_value, new_value), 0) - value[out_idx] = np.where(i, new_value, old_value) - out[out_idx] = np.where(i, new_arg, out[out_idx]) - return out - - def __array_fun__(self, funs: Sequence[Callable[[ArrayLike], Number | np.ndarray]], axis: int | str = None, - dtype: DTypeLike = None, out: np.ndarray = None, keepdims: bool = False, - initials: list[Number | np.ndarray] = None, where: bool | int | np.ndarray = True, - ffuns: Sequence[Callable[[ArrayLike], np.ndarray]] = None, - cfun: Callable[..., np.ndarray] = None) -> Number | np.ndarray: - """ frame-wise application of np.min, np.max, np.sum, np.mean and their nan equivalents """ - p = re.compile(r'\d') - dtype = self.dtype if dtype is None else np.dtype(dtype) - if initials is None: - initials = [None for _ in funs] - if ffuns is None: - ffuns = [None for _ in funs] - - def ffun_(frame: ArrayLike) -> np.ndarray: - return np.asarray(frame) - ffuns = [ffun_ if ffun is None else ffun for ffun in ffuns] - if cfun is None: - def cfun(*res): # noqa - return res[0] - - # TODO: smarter transforms - if axis is None: - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): - yxczt = (slice(None), slice(None)) + idx - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - w = where if where is None or isinstance(where, bool) else where[in_idx] - initials = [fun(np.asarray(ffun(self[in_idx])), initial=initial, where=w) # type: ignore - for fun, ffun, initial in zip(funs, ffuns, initials)] - res = cfun(*initials) - res = (np.round(res) if dtype.kind in 'ui' else res).astype(p.sub('', dtype.name)) - if keepdims: - res = np.array(res, dtype, ndmin=self.ndim) - if out is None: - return res - else: - out.itemset(res) - return out - else: - if isinstance(axis, str): - axis_str, axis_idx = axis, self.axes.index(axis) - else: - axis_idx = axis % self.ndim - axis_str = self.axes[axis_idx] - if axis_str not in self.axes: - raise IndexError(f'Axis {axis_str} not in {self.axes}.') - out_shape = list(self.shape) - out_axes = list(self.axes) - if not keepdims: - out_shape.pop(axis_idx) - out_axes.pop(axis_idx) - if out is None: - out = np.zeros(out_shape, dtype) - if axis_str in 'yx': - yx = 'yx' if self.axes.find('x') > self.axes.find('y') else 'yx' - frame_ax = yx.find(axis_str) - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): - yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - w = where if where is None or isinstance(where, bool) else where[in_idx] - res = cfun(*[fun(ffun(self[in_idx]), frame_ax, initial=initial, where=w) # type: ignore - for fun, ffun, initial in zip(funs, ffuns, initials)]) - out[out_idx] = (np.round(res) if out.dtype.kind in 'ui' else res).astype(p.sub('', dtype.name)) - else: - tmps = [np.zeros(out_shape) for _ in ffuns] - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): - yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) - - if idx['czt'.find(axis_str)] == 0: - w = where if where is None or isinstance(where, bool) else (where[in_idx],) - for tmp, fun, ffun, initial in zip(tmps, funs, ffuns, initials): - tmp[out_idx] = fun((ffun(self[in_idx]),), 0, initial=initial, where=w) # type: ignore - else: - w = where if where is None or isinstance(where, bool) else \ - (np.ones_like(where[in_idx]), where[in_idx]) - for tmp, fun, ffun in zip(tmps, funs, ffuns): - tmp[out_idx] = fun((tmp[out_idx], ffun(self[in_idx])), 0, where=w) # type: ignore - out[...] = (np.round(cfun(*tmps)) if out.dtype.kind in 'ui' else - cfun(*tmps)).astype(p.sub('', dtype.name)) - return out - - @property - def axes(self) -> str: - return self.shape.axes - - @axes.setter - def axes(self, value: str) -> None: - shape = self.shape[value] - if isinstance(shape, Number): - shape = (shape,) - self._shape = Shape(shape, value) - - @property - def dtype(self) -> np.dtype: - return self._dtype - - @dtype.setter - def dtype(self, value: DTypeLike) -> None: - self._dtype = np.dtype(value) - - @cached_property - def extrametadata(self) -> Optional[Any]: - if isinstance(self.path, Path): - if self.path.with_suffix('.pzl2').exists(): - pname = self.path.with_suffix('.pzl2') - elif self.path.with_suffix('.pzl').exists(): - pname = self.path.with_suffix('.pzl') - else: - return - try: - return self.get_config(pname) - except Exception: # noqa - return - return - - @property - def ndim(self) -> int: - return len(self.shape) - - @property - def size(self) -> int: - return np.prod(self.shape) # type: ignore - - @property - def shape(self) -> Shape: - return self._shape - - @shape.setter - def shape(self, value: Shape | tuple[int, ...]) -> None: - if isinstance(value, Shape): - self._shape = value - else: - self._shape = Shape([value['yxczt'.find(i.lower())] for i in self.axes], self.axes) - - @property - def summary(self) -> str: - """ gives a helpful summary of the recorded experiment """ - s = [f'path/filename: {self.path}', - f'series/pos: {self.series}', - f"reader: {self.base.__class__.__module__.split('.')[-1]}"] - s.extend((f'dtype: {self.dtype}', - f'shape ({self.axes}):'.ljust(15) + f"{' x '.join(str(i) for i in self.shape)}")) - if self.pxsize_um: - s.append(f'pixel size: {1000 * self.pxsize_um:.2f} nm') - if self.zstack and self.deltaz_um: - s.append(f'z-interval: {1000 * self.deltaz_um:.2f} nm') - if self.exposuretime_s and not all(e is None for e in self.exposuretime_s): - s.append(f'exposuretime: {self.exposuretime_s[0]:.2f} s') - if self.timeseries and self.timeinterval: - s.append(f'time interval: {self.timeinterval:.3f} s') - if self.binning: - s.append('binning: {}x{}'.format(*self.binning)) - if self.laserwavelengths: - s.append('laser colors: ' + ' | '.join([' & '.join(len(w) * ('{:.0f}',)).format(*w) - for w in self.laserwavelengths]) + ' nm') - if self.laserpowers: - s.append('laser powers: ' + ' | '.join([' & '.join(len(p) * ('{:.3g}',)).format(*[100 * i for i in p]) - for p in self.laserpowers]) + ' %') - if self.objective and self.objective.model: - s.append(f'objective: {self.objective.model}') - if self.magnification: - s.append(f'magnification: {self.magnification}x') - if self.tubelens and self.tubelens.model: - s.append(f'tubelens: {self.tubelens.model}') - if self.filter: - s.append(f'filterset: {self.filter}') - if self.powermode: - s.append(f'powermode: {self.powermode}') - if self.collimator: - s.append('collimator: ' + (' {}' * len(self.collimator)).format(*self.collimator)) - if self.tirfangle: - s.append('TIRF angle: ' + (' {:.2f}°' * len(self.tirfangle)).format(*self.tirfangle)) - if self.gain: - s.append('gain: ' + (' {:.0f}' * len(self.gain)).format(*self.gain)) - if self.pcf: - s.append('pcf: ' + (' {:.2f}' * len(self.pcf)).format(*self.pcf)) - return '\n'.join(s) - - @property - def T(self) -> Imread: # noqa - return self.transpose() # type: ignore - - @cached_property - def timeseries(self) -> bool: - return self.shape['t'] > 1 - - @cached_property - def zstack(self) -> bool: - return self.shape['z'] > 1 - - @wraps(np.ndarray.argmax) - def argmax(self, *args, **kwargs): - return self.__array_arg_fun__(np.argmax, *args, **kwargs) - - @wraps(np.ndarray.argmin) - def argmin(self, *args, **kwargs): - return self.__array_arg_fun__(np.argmin, *args, **kwargs) - - @wraps(np.ndarray.max) - def max(self, axis=None, out=None, keepdims=False, initial=None, where=True, **_): - return self.__array_fun__([np.max], axis, None, out, keepdims, [initial], where) - - @wraps(np.ndarray.mean) - def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True, **_): - dtype = dtype or float - n = np.prod(self.shape) if axis is None else self.shape[axis] - - def sfun(frame: ArrayLike) -> np.ndarray: - return np.asarray(frame).astype(float) - - def cfun(res: np.ndarray) -> np.ndarray: - return res / n - - return self.__array_fun__([np.sum], axis, dtype, out, keepdims, None, where, [sfun], cfun) - - @wraps(np.ndarray.min) - def min(self, axis=None, out=None, keepdims=False, initial=None, where=True, **_): - return self.__array_fun__([np.min], axis, None, out, keepdims, [initial], where) - - @wraps(np.moveaxis) - def moveaxis(self, source, destination): - raise NotImplementedError('moveaxis is not implemented') - - @wraps(np.nanmax) - def nanmax(self, axis=None, out=None, keepdims=False, initial=None, where=True, **_): - return self.__array_fun__([np.nanmax], axis, None, out, keepdims, [initial], where) - - @wraps(np.nanmean) - def nanmean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True, **_): - dtype = dtype or float - - def sfun(frame): - return np.asarray(frame).astype(float) - - def nfun(frame): - return np.invert(np.isnan(frame)) - - return self.__array_fun__([np.nansum, np.sum], axis, dtype, out, keepdims, None, where, (sfun, nfun), truediv) - - @wraps(np.nanmin) - def nanmin(self, axis=None, out=None, keepdims=False, initial=None, where=True, **_): - return self.__array_fun__([np.nanmin], axis, None, out, keepdims, [initial], where) - - @wraps(np.nansum) - def nansum(self, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=True, **_): - return self.__array_fun__([np.nansum], axis, dtype, out, keepdims, [initial], where) - - @wraps(np.nanstd) - def nanstd(self, axis=None, dtype=None, out=None, ddof=0, keepdims=None, *, where=None): - return self.nanvar(axis, dtype, out, ddof, keepdims, where=where, std=True) - - @wraps(np.nanvar) - def nanvar(self, axis=None, dtype=None, out=None, ddof=0, keepdims=None, *, where=True, std=False): - dtype = dtype or float - - def sfun(frame): - return np.asarray(frame).astype(float) - - def s2fun(frame): - return np.asarray(frame).astype(float) ** 2 - - def nfun(frame): - return np.invert(np.isnan(frame)) - - if std: - def cfun(s, s2, n): - return np.sqrt((s2 - s ** 2 / n) / (n - ddof)) - else: - def cfun(s, s2, n): - return (s2 - s ** 2 / n) / (n - ddof) - return self.__array_fun__([np.nansum, np.nansum, np.sum], axis, dtype, out, keepdims, None, where, - (sfun, s2fun, nfun), cfun) - - @wraps(np.ndarray.flatten) - def flatten(self, *args, **kwargs): - return np.asarray(self).flatten(*args, **kwargs) - - @wraps(np.ndarray.reshape) - def reshape(self, *args, **kwargs): - return np.asarray(self).reshape(*args, **kwargs) - - @wraps(np.ndarray.squeeze) - def squeeze(self, axes=None): - new = self.copy() - if axes is None: - axes = tuple(i for i, j in enumerate(new.shape) if j == 1) - elif isinstance(axes, Number): - axes = (axes,) - else: - axes = tuple(new.axes.find(ax) if isinstance(ax, str) else ax for ax in axes) - if any([new.shape[ax] != 1 for ax in axes]): - raise ValueError('cannot select an axis to squeeze out which has size not equal to one') - new.axes = ''.join(j for i, j in enumerate(new.axes) if i not in axes) - return new - - @wraps(np.ndarray.std) - def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=None, *, where=True): - return self.var(axis, dtype, out, ddof, keepdims, where=where, std=True) # type: ignore - - @wraps(np.ndarray.sum) - def sum(self, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=True, **_): - return self.__array_fun__([np.sum], axis, dtype, out, keepdims, [initial], where) - - @wraps(np.ndarray.swapaxes) - def swapaxes(self, axis1, axis2): - new = self.copy() - axes = new.axes - if isinstance(axis1, str): - axis1 = axes.find(axis1) - if isinstance(axis2, str): - axis2 = axes.find(axis2) - axes[axis1], axes[axis2] = axes[axis2], axes[axis1] - new.axes = axes - return new - - @wraps(np.ndarray.transpose) - def transpose(self, *axes): - new = self.copy() - if not axes: - new.axes = new.axes[::-1] - else: - new.axes = ''.join(ax if isinstance(ax, str) else new.axes[ax] for ax in axes) - return new - - @wraps(np.ndarray.var) - def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=None, *, where=True, std=False): - dtype = dtype or float - n = np.prod(self.shape) if axis is None else self.shape[axis] - - def sfun(frame): - return np.asarray(frame).astype(float) - - def s2fun(frame): - return np.asarray(frame).astype(float) ** 2 - - if std: - def cfun(s, s2): - return np.sqrt((s2 - s ** 2 / n) / (n - ddof)) - else: - def cfun(s, s2): - return (s2 - s ** 2 / n) / (n - ddof) - return self.__array_fun__([np.sum, np.sum], axis, dtype, out, keepdims, None, where, (sfun, s2fun), cfun) - - def asarray(self) -> np.ndarray: - return self.__array__() - - @wraps(np.ndarray.astype) - def astype(self, dtype, *_, **__): - new = self.copy() - new.dtype = dtype - return new - - def block(self, y: int | Sequence[int] = None, x: int | Sequence[int] = None, - c: int | Sequence[int] = None, z: int | Sequence[int] = None, - t: int | Sequence[int] = None) -> np.ndarray: - """ returns 5D block of frames """ - y, x, c, z, t = (np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) - for i, e in zip('yxczt', (y, x, c, z, t))) - d = np.empty((len(y), len(x), len(c), len(z), len(t)), self.dtype) - for (ci, cj), (zi, zj), (ti, tj) in product(enumerate(c), enumerate(z), enumerate(t)): - d[:, :, ci, zi, ti] = self.frame(cj, zj, tj)[y][:, x] # type: ignore - return d - - def copy(self) -> View: - return View(self) - - def data(self, c: int | Sequence[int] = 0, z: int | Sequence[int] = 0, t: int | Sequence[int] = 0) -> np.ndarray: - """ returns 3D stack of frames """ - c, z, t = (np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) for i, e in zip('czt', (c, z, t))) - return np.dstack([self.frame(ci, zi, ti) for ci, zi, ti in product(c, z, t)]) - - def frame(self, c: int = 0, z: int = 0, t: int = 0) -> np.ndarray: - """ returns single 2D frame """ - c = self.get_channel(c) - c %= self.base.shape['c'] - z %= self.base.shape['z'] - t %= self.base.shape['t'] - - # cache last n (default 16) frames in memory for speed (~250x faster) - key = (c, z, t, self.transform, self.frame_decorator) - if self.cache.maxlen and key in self.cache: - self.cache.move_to_end(key) - f = self.cache[key] - else: - f = self.transform[self.channel_names[c], t].frame(self.__frame__(c, z, t)) - if self.frame_decorator is not None: - f = self.frame_decorator(self, f, c, z, t) - if self.cache.maxlen: - self.cache[key] = f - if self.dtype is not None: - return f.copy().astype(self.dtype) - else: - return f.copy() - - def get_channel(self, channel_name: str | int) -> int: - if not isinstance(channel_name, str): - return channel_name - else: - c = [i for i, c in enumerate(self.channel_names) if c.lower().startswith(channel_name.lower())] - assert len(c) > 0, f'Channel {c} not found in {self.channel_names}' - assert len(c) < 2, f'Channel {c} not unique in {self.channel_names}' - return c[0] - - @staticmethod - def get_config(file: Path | str) -> Any: - """ Open a yml config file """ - loader = yaml.SafeLoader - loader.add_implicit_resolver( - r'tag:yaml.org,2002:float', - re.compile(r'''^(?: - [-+]?([0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)? - |[-+]?([0-9][0-9_]*)([eE][-+]?[0-9]+) - |\.[0-9_]+(?:[eE][-+][0-9]+)? - |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* - |[-+]?\\.(?:inf|Inf|INF) - |\.(?:nan|NaN|NAN))$''', re.X), - list(r'-+0123456789.')) - with open(file) as f: - return yaml.load(f, loader) - - def get_czt(self, c: int | Sequence[int], z: int | Sequence[int], - t: int | Sequence[int]) -> tuple[list[int], list[int], list[int]]: - czt = [] - for i, n in zip('czt', (c, z, t)): - if n is None: - czt.append(list(range(self.shape[i]))) - elif isinstance(n, range): - if n.stop < 0: - stop = n.stop % self.shape[i] - elif n.stop > self.shape[i]: - stop = self.shape[i] - else: - stop = n.stop - czt.append(list(range(n.start % self.shape[i], stop, n.step))) - elif isinstance(n, Number): - czt.append([n % self.shape[i]]) - else: - czt.append([k % self.shape[i] for k in n]) - return [self.get_channel(c) for c in czt[0]], *czt[1:3] # type: ignore - - @staticmethod - def bioformats_ome(path: [str, Path]) -> OME: - """ Use java BioFormats to make an ome metadata structure. """ - with multiprocessing.get_context('spawn').Pool(1) as pool: - return pool.map(bioformats_ome, (path,))[0] - - @staticmethod - def fix_ome(ome: OME) -> OME: - # fix ome if necessary - for image in ome.images: - try: - if image.pixels.physical_size_z is None and len(set([plane.the_z - for plane in image.pixels.planes])) > 1: - z = np.array([(plane.position_z * ureg.Quantity(plane.position_z_unit.value).to(ureg.m).magnitude, - plane.the_z) - for plane in image.pixels.planes if plane.the_c == 0 and plane.the_t == 0]) - i = np.argsort(z[:, 1]) - image.pixels.physical_size_z = np.nanmean(np.true_divide(*np.diff(z[i], axis=0).T)) * 1e6 - image.pixels.physical_size_z_unit = 'µm' # type: ignore - except Exception: # noqa - pass - return ome - - @staticmethod - def read_ome(path: [str, Path]) -> Optional[OME]: - path = Path(path) - if path.with_suffix('.ome.xml').exists(): - return OME.from_xml(path.with_suffix('.ome.xml')) - - def get_ome(self) -> OME: - """ overload this """ - return self.bioformats_ome(self.path) - - @cached_property - def ome(self) -> OME: - cache = OmeCache() - if self.path not in cache: - ome = self.read_ome(self.path) - if ome is None: - ome = self.get_ome() - cache[self.path] = self.fix_ome(ome) - return cache[self.path] - - def is_noise(self, volume: ArrayLike = None) -> bool: - """ True if volume only has noise """ - if volume is None: - volume = self - fft = np.fft.fftn(volume) - corr = np.fft.fftshift(np.fft.ifftn(fft * fft.conj()).real / np.sum(volume ** 2)) - return 1 - corr[tuple([0] * corr.ndim)] < 0.0067 - - @staticmethod - def kill_vm() -> None: - JVM().kill_vm() - - def new(self, *args: Any, **kwargs: Any) -> View: - warnings.warn('Imread.new has been deprecated, use Imread.view instead.', DeprecationWarning, 2) - return self.view(*args, **kwargs) - - def save_as_movie(self, fname: Path | str = None, - c: int | Sequence[int] = None, z: int | Sequence[int] = None, # noqa - t: str | int | Sequence[int] = None, # noqa - colors: tuple[str] = None, brightnesses: tuple[float] = None, - scale: int = None, bar: bool = True) -> None: - """ saves the image as a mp4 or mkv file """ - from matplotlib.colors import to_rgb - from skvideo.io import FFmpegWriter - - if t is None: - t = np.arange(self.shape['t']) - elif isinstance(t, str): - t = eval(f"np.arange(self.shape['t'])[{t}]") - elif np.isscalar(t): - t = (t,) - - def get_ab(tyx: Imread, p: tuple[float, float] = (1, 99)) -> tuple[float, float]: - s = tyx.flatten() - s = s[s > 0] - a, b = np.percentile(s, p) - if a == b: - a, b = np.min(s), np.max(s) - if a == b: - a, b = 0, 1 - return a, b - - def cframe(frame: ArrayLike, color: str, a: float, b: float, scale: float = 1) -> np.ndarray: # noqa - color = to_rgb(color) - frame = (frame - a) / (b - a) - frame = np.dstack([255 * frame * i for i in color]) - return np.clip(np.round(frame), 0, 255).astype('uint8') - - ab = list(zip(*[get_ab(i) for i in self.transpose('cztyx')])) # type: ignore - colors = colors or ('r', 'g', 'b')[:self.shape['c']] + max(0, self.shape['c'] - 3) * ('w',) - brightnesses = brightnesses or (1,) * self.shape['c'] - scale = scale or 1 - shape_x = 2 * ((self.shape['x'] * scale + 1) // 2) - shape_y = 2 * ((self.shape['y'] * scale + 1) // 2) - - with FFmpegWriter( - str(fname).format(name=self.path.stem, path=str(self.path.parent)), - outputdict={'-vcodec': 'libx264', '-preset': 'veryslow', '-pix_fmt': 'yuv420p', '-r': '7', - '-vf': f'setpts={25 / 7}*PTS,scale={shape_x}:{shape_y}:flags=neighbor'} - ) as movie: - im = self.transpose('tzcyx') # type: ignore - for ti in tqdm(t, desc='Saving movie', disable=not bar): - movie.writeFrame(np.max([cframe(yx, c, a, b / s, scale) - for yx, a, b, c, s in zip(im[ti].max('z'), *ab, colors, brightnesses)], 0)) - - def save_as_tiff(self, fname: Path | str = None, c: int | Sequence[int] = None, z: int | Sequence[int] = None, - t: int | Sequence[int] = None, split: bool = False, bar: bool = True, pixel_type: str = 'uint16', - **kwargs: Any) -> None: - """ saves the image as a tif file - split: split channels into different files """ - fname = Path(str(fname).format(name=self.path.stem, path=str(self.path.parent))) - if fname is None: - fname = self.path.with_suffix('.tif') - if fname == self.path: - raise FileExistsError(f'File {fname} exists already.') - if not isinstance(fname, Path): - fname = Path(fname) - if split: - for i in range(self.shape['c']): - if self.timeseries: - self.save_as_tiff(fname.with_name(f'{fname.stem}_C{i:01d}').with_suffix('.tif'), i, 0, None, False, - bar, pixel_type) - else: - self.save_as_tiff(fname.with_name(f'{fname.stem}_C{i:01d}').with_suffix('.tif'), i, None, 0, False, - bar, pixel_type) - else: - n = [c, z, t] - for i, ax in enumerate('czt'): - if n[i] is None: - n[i] = range(self.shape[ax]) - elif not isinstance(n[i], (tuple, list)): - n[i] = (n[i],) - - shape = [len(i) for i in n] - with TransformTiff(self, fname.with_suffix('.tif'), dtype=pixel_type, - pxsize=self.pxsize_um, deltaz=self.deltaz_um, **kwargs) as tif: - for i, m in tqdm(zip(product(*[range(s) for s in shape]), product(*n)), # noqa - total=np.prod(shape), desc='Saving tiff', disable=not bar): - tif.save(m, *i) - - def with_transform(self, channels: bool = True, drift: bool = False, file: Path | str = None, - bead_files: Sequence[Path | str] = ()) -> View: - """ returns a view where channels and/or frames are registered with an affine transformation - channels: True/False register channels using bead_files - drift: True/False register frames to correct drift - file: load registration from file with name file, default: transform.yml in self.path.parent - bead_files: files used to register channels, default: files in self.path.parent, - with names starting with 'beads' - """ - view = self.view() - if file is None: - file = Path(view.path.parent) / 'transform.yml' - else: - file = Path(file) - if not bead_files: - try: - bead_files = Transforms.get_bead_files(view.path.parent) - except Exception: # noqa - if not file.exists(): - raise Exception('No transform file and no bead file found.') - bead_files = () - - if channels: - try: - view.transform = Transforms.from_file(file, T=drift) - except Exception: # noqa - view.transform = Transforms().with_beads(view.cyllens, bead_files) - if drift: - view.transform = view.transform.with_drift(view) - view.transform.save(file.with_suffix('.yml')) - view.transform.save_channel_transform_tiff(bead_files, file.with_suffix('.tif')) - elif drift: - try: - view.transform = Transforms.from_file(file, C=False) - except Exception: # noqa - view.transform = Transforms().with_drift(self) - view.transform.adapt(view.frameoffset, view.shape.yxczt, view.channel_names) - return view - - def set_cache_size(self, cache_size: int) -> None: - assert isinstance(cache_size, int) and cache_size >= 0 - self.cache.maxlen = cache_size - self.cache.truncate() - - @staticmethod - def split_path_series(path: Path | str) -> tuple[Path, int]: - if isinstance(path, str): - path = Path(path) - if isinstance(path, Path) and path.name.startswith('Pos') and path.name.lstrip('Pos').isdigit(): - return path.parent, int(path.name.lstrip('Pos')) - return path, 0 - - def view(self, *args: Any, **kwargs: Any) -> View: - return View(self, *args, **kwargs) - - -class View(Imread, ABC): - def __init__(self, base: Imread, dtype: DTypeLike = None) -> None: - super().__init__(base.base, base.slice, base.shape, dtype or base.dtype, base.frame_decorator) - self.transform = base.transform - - def __getattr__(self, item: str) -> Any: - if not hasattr(self.base, item): - raise AttributeError(f'{self.__class__} object has no attribute {item}') - return self.base.__getattribute__(item) - - -class AbstractReader(Imread, metaclass=ABCMeta): - priority = 99 - do_not_pickle = 'cache' - ureg = ureg - - @staticmethod - @abstractmethod - def _can_open(path: Path | str) -> bool: - """ Override this method, and return true when the subclass can open the file """ - return False - - @staticmethod - def get_positions(path: str | Path) -> Optional[list[int]]: - return None - - @abstractmethod - def __frame__(self, c: int, z: int, t: int) -> np.ndarray: - """ Override this, return the frame at c, z, t """ - return np.random.randint(0, 255, self.shape['yx']) - - def open(self) -> None: - """ Optionally override this, open file handles etc. - filehandles cannot be pickled and should be marked such by setting do_not_pickle = 'file_handle_name' """ - return - - def close(self) -> None: - """ Optionally override this, close file handles etc. """ - return - - def __init__(self, path: Path | str | Imread | Any = None, dtype: DTypeLike = None, axes: str = None) -> None: - if isinstance(path, Imread): - return - super().__init__() - self.isclosed = False - if isinstance(path, str): - path = Path(path) - self.path, self.series = self.split_path_series(path) - if isinstance(path, Path) and path.exists(): - self.title = self.path.name - self.acquisitiondate = datetime.fromtimestamp(self.path.stat().st_mtime).strftime('%y-%m-%dT%H:%M:%S') - else: # ndarray - self.title = self.__class__.__name__ - self.acquisitiondate = 'now' - - self.reader = None - self.pcf = None - self.powermode = None - self.collimator = None - self.tirfangle = None - self.cyllens = ['None', 'None'] - self.duolink = 'None' - self.detector = [0, 1] - self.track = [0] - self.cache = DequeDict(16) - self.frameoffset = 0, 0 # how far apart the centers of frame and sensor are - - self.open() - # extract some metadata from ome - instrument = self.ome.instruments[0] if self.ome.instruments else None - image = self.ome.images[self.series if len(self.ome.images) > 1 else 0] - pixels = image.pixels - self.shape = pixels.size_y, pixels.size_x, pixels.size_c, pixels.size_z, pixels.size_t - self.base_shape = Shape((pixels.size_y, pixels.size_x, pixels.size_c, pixels.size_z, pixels.size_t), 'yxczt') - self.dtype = pixels.type.value if dtype is None else dtype - self.pxsize = pixels.physical_size_x_quantity - try: - self.exposuretime = tuple(find(image.pixels.planes, the_c=c).exposure_time_quantity - for c in range(self.shape['c'])) - except AttributeError: - self.exposuretime = () - - if self.zstack: - self.deltaz = image.pixels.physical_size_z_quantity - self.deltaz_um = None if self.deltaz is None else self.deltaz.to(self.ureg.um).m - else: - self.deltaz = self.deltaz_um = None - if image.objective_settings: - self.objective = find(instrument.objectives, id=image.objective_settings.id) - else: - self.objective = None - try: - t0 = find(image.pixels.planes, the_c=0, the_t=0, the_z=0).delta_t - t1 = find(image.pixels.planes, the_c=0, the_t=self.shape['t'] - 1, the_z=0).delta_t - self.timeinterval = (t1 - t0) / (self.shape['t'] - 1) if self.shape['t'] > 1 and t1 > t0 else None - except AttributeError: - self.timeinterval = None - try: - self.binning = [int(i) for i in image.pixels.channels[0].detector_settings.binning.value.split('x')] - if self.pxsize is not None: - self.pxsize *= self.binning[0] - except (AttributeError, IndexError, ValueError): - self.binning = None - self.channel_names = [channel.name for channel in image.pixels.channels] - self.channel_names += [chr(97 + i) for i in range(len(self.channel_names), self.shape['c'])] - self.cnamelist = self.channel_names - try: - optovars = [objective for objective in instrument.objectives if 'tubelens' in objective.id.lower()] - except AttributeError: - optovars = [] - if len(optovars) == 0: - self.tubelens = None - else: - self.tubelens = optovars[0] - if self.objective: - if self.tubelens: - self.magnification = self.objective.nominal_magnification * self.tubelens.nominal_magnification - else: - self.magnification = self.objective.nominal_magnification - self.NA = self.objective.lens_na - else: - self.magnification = None - self.NA = None - - self.gain = [find(instrument.detectors, id=channel.detector_settings.id).amplification_gain - for channel in image.pixels.channels - if channel.detector_settings - and find(instrument.detectors, id=channel.detector_settings.id).amplification_gain] - self.laserwavelengths = [(channel.excitation_wavelength_quantity.to(self.ureg.nm).m,) - for channel in pixels.channels if channel.excitation_wavelength_quantity] - self.laserpowers = try_default(lambda: [(1 - channel.light_source_settings.attenuation,) - for channel in pixels.channels], []) - self.filter = try_default( # type: ignore - lambda: [find(instrument.filter_sets, id=channel.filter_set_ref.id).model - for channel in image.pixels.channels], None) - self.pxsize_um = None if self.pxsize is None else self.pxsize.to(self.ureg.um).m - self.exposuretime_s = [None if i is None else i.to(self.ureg.s).m for i in self.exposuretime] - - if axes is None: - self.axes = ''.join(i for i in 'cztyx' if self.shape[i] > 1) - elif axes.lower() == 'full': - self.axes = 'cztyx' - else: - self.axes = axes - self.slice = [np.arange(s, dtype=int) for s in self.shape.yxczt] - - m = self.extrametadata - if m is not None: - try: - self.cyllens = m['CylLens'] - self.duolink = m['DLFilterSet'].split(' & ')[m['DLFilterChannel']] - if 'FeedbackChannels' in m: - self.feedback = m['FeedbackChannels'] - else: - self.feedback = m['FeedbackChannel'] - except Exception: # noqa - self.cyllens = ['None', 'None'] - self.duolink = 'None' - self.feedback = [] - try: - self.cyllenschannels = np.where([self.cyllens[self.detector[c]].lower() != 'none' - for c in range(self.shape['c'])])[0].tolist() - except Exception: # noqa - pass - try: - s = int(re.findall(r'_(\d{3})_', self.duolink)[0]) * ureg.nm - except Exception: # noqa - s = 561 * ureg.nm - try: - sigma = [] - for c, d in enumerate(self.detector): - emission = (np.hstack(self.laserwavelengths[c]) + 22) * ureg.nm - sigma.append([emission[emission > s].max(initial=0), emission[emission < s].max(initial=0)][d]) - sigma = np.hstack(sigma) - sigma[sigma == 0] = 600 * ureg.nm - sigma /= 2 * self.NA * self.pxsize - self.sigma = sigma.magnitude.tolist() # type: ignore - except Exception: # noqa - self.sigma = [2] * self.shape['c'] - if not self.NA: - self.immersionN = 1 - elif 1.5 < self.NA: - self.immersionN = 1.661 - elif 1.3 < self.NA < 1.5: - self.immersionN = 1.518 - elif 1 < self.NA < 1.3: - self.immersionN = 1.33 - else: - self.immersionN = 1 - - p = re.compile(r'(\d+):(\d+)$') - try: - self.track, self.detector = zip(*[[int(i) for i in p.findall(find( - self.ome.images[self.series].pixels.channels, id=f'Channel:{c}').detector_settings.id)[0]] - for c in range(self.shape['c'])]) - except Exception: # noqa - pass - - -def main() -> None: - parser = ArgumentParser(description='Display info and save as tif') - parser.add_argument('-v', '--version', action='version', version=__version__) - parser.add_argument('file', help='image_file', type=str, nargs='*') - parser.add_argument('-w', '--write', help='path to tif/movie out, {folder}, {name} and {ext} take this from file in', - type=str, default=None) - parser.add_argument('-o', '--extract_ome', help='extract ome to xml file', action='store_true') - parser.add_argument('-r', '--register', help='register channels', action='store_true') - parser.add_argument('-c', '--channel', help='channel', type=int, default=None) - parser.add_argument('-z', '--zslice', help='z-slice', type=int, default=None) - parser.add_argument('-t', '--time', help='time (frames) in python slicing notation', type=str, default=None) - parser.add_argument('-s', '--split', help='split channels', action='store_true') - parser.add_argument('-f', '--force', help='force overwrite', action='store_true') - parser.add_argument('-C', '--movie-colors', help='colors for channels in movie', type=str, nargs='*') - parser.add_argument('-B', '--movie-brightnesses', help='scale brightness of each channel', - type=float, nargs='*') - parser.add_argument('-S', '--movie-scale', help='upscale movie xy size, int', type=float) - args = parser.parse_args() - - for file in tqdm(args.file, desc='operating on files', disable=len(args.file) == 1): - file = Path(file) - with Imread(file) as im: # noqa - if args.register: - im = im.with_transform() # noqa - if args.write: - write = Path(args.write.format(folder=str(file.parent), name=file.stem, ext=file.suffix)).absolute() # noqa - write.parent.mkdir(parents=True, exist_ok=True) - if write.exists() and not args.force: - print(f'File {args.write} exists already, add the -f flag if you want to overwrite it.') - elif write.suffix in ('.mkv', '.mp4'): - im.save_as_movie(write, args.channel, args.zslice, args.time, args.movie_colors, - args.movie_brightnesses, args.movie_scale, bar=len(args.file) == 1) - else: - im.save_as_tiff(write, args.channel, args.zslice, args.time, args.split, bar=len(args.file) == 1) - if args.extract_ome: - with open(im.path.with_suffix('.ome.xml'), 'w') as f: - f.write(im.ome.to_xml()) - if len(args.file) == 1: - print(im.summary) - - -from .readers import * # noqa diff --git a/ndbioimage/jvm.py b/ndbioimage/jvm.py deleted file mode 100644 index 777f324..0000000 --- a/ndbioimage/jvm.py +++ /dev/null @@ -1,77 +0,0 @@ -from pathlib import Path -from urllib import request - - -class JVMException(Exception): - pass - - -try: - class JVM: - """ There can be only one java virtual machine per python process, - so this is a singleton class to manage the jvm. - """ - _instance = None - vm_started = False - vm_killed = False - success = True - - def __new__(cls, *args): - if cls._instance is None: - cls._instance = object.__new__(cls) - return cls._instance - - def __init__(self, jars=None): - if not self.vm_started and not self.vm_killed: - try: - jar_path = Path(__file__).parent / 'jars' - if jars is None: - jars = {} - for jar, src in jars.items(): - if not (jar_path / jar).exists(): - JVM.download(src, jar_path / jar) - classpath = [str(jar_path / jar) for jar in jars.keys()] - - import jpype - jpype.startJVM(classpath=classpath) - except Exception: # noqa - self.vm_started = False - else: - self.vm_started = True - try: - import jpype.imports - from loci.common import DebugTools # noqa - from loci.formats import ChannelSeparator # noqa - from loci.formats import FormatTools # noqa - from loci.formats import ImageReader # noqa - from loci.formats import MetadataTools # noqa - - DebugTools.setRootLevel("ERROR") - - self.image_reader = ImageReader - self.channel_separator = ChannelSeparator - self.format_tools = FormatTools - self.metadata_tools = MetadataTools - except Exception: # noqa - pass - - if self.vm_killed: - raise Exception('The JVM was killed before, and cannot be restarted in this Python process.') - - @staticmethod - def download(src, dest): - print(f'Downloading {dest.name} to {dest}.') - dest.parent.mkdir(exist_ok=True) - dest.write_bytes(request.urlopen(src).read()) - - @classmethod - def kill_vm(cls): - self = cls._instance - if self is not None and self.vm_started and not self.vm_killed: - import jpype - jpype.shutdownJVM() # noqa - self.vm_started = False - self.vm_killed = True - -except ImportError: - JVM = None diff --git a/ndbioimage/readers/__init__.py b/ndbioimage/readers/__init__.py deleted file mode 100644 index d4ca3bc..0000000 --- a/ndbioimage/readers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__all__ = 'bfread', 'cziread', 'fijiread', 'ndread', 'seqread', 'tifread', 'metaseriesread' diff --git a/ndbioimage/readers/bfread.py b/ndbioimage/readers/bfread.py deleted file mode 100644 index 1bf6188..0000000 --- a/ndbioimage/readers/bfread.py +++ /dev/null @@ -1,208 +0,0 @@ -import multiprocessing -from abc import ABC -from multiprocessing import queues -from pathlib import Path -from traceback import format_exc - -import numpy as np - -from .. import JVM, AbstractReader, JVMException - -jars = {'bioformats_package.jar': 'https://downloads.openmicroscopy.org/bio-formats/latest/artifacts/' - 'bioformats_package.jar'} - - -class JVMReader: - def __init__(self, path: Path, series: int) -> None: - mp = multiprocessing.get_context('spawn') - self.path = path - self.series = series - self.queue_in = mp.Queue() - self.queue_out = mp.Queue() - self.done = mp.Event() - self.process = mp.Process(target=self.run) - self.process.start() - status, message = self.queue_out.get() - if status == 'status' and message == 'started': - self.is_alive = True - else: - raise JVMException(message) - - def close(self) -> None: - if self.is_alive: - self.done.set() - while not self.queue_in.empty(): - self.queue_in.get() - self.queue_in.close() - self.queue_in.join_thread() - while not self.queue_out.empty(): - print(self.queue_out.get()) - self.queue_out.close() - self.process.join() - self.process.close() - self.is_alive = False - - def frame(self, c: int, z: int, t: int) -> np.ndarray: - self.queue_in.put((c, z, t)) - status, message = self.queue_out.get() - if status == 'frame': - return message - else: - raise JVMException(message) - - def run(self) -> None: - """ Read planes from the image reader file. - adapted from python-bioformats/bioformats/formatreader.py - """ - jvm = None - try: - jvm = JVM(jars) - reader = jvm.image_reader() - ome_meta = jvm.metadata_tools.createOMEXMLMetadata() - reader.setMetadataStore(ome_meta) - reader.setId(str(self.path)) - reader.setSeries(self.series) - - open_bytes_func = reader.openBytes - width, height = int(reader.getSizeX()), int(reader.getSizeY()) - - pixel_type = reader.getPixelType() - little_endian = reader.isLittleEndian() - - if pixel_type == jvm.format_tools.INT8: - dtype = np.int8 - elif pixel_type == jvm.format_tools.UINT8: - dtype = np.uint8 - elif pixel_type == jvm.format_tools.UINT16: - dtype = 'u2' - elif pixel_type == jvm.format_tools.INT16: - dtype = 'i2' - elif pixel_type == jvm.format_tools.UINT32: - dtype = 'u4' - elif pixel_type == jvm.format_tools.INT32: - dtype = 'i4' - elif pixel_type == jvm.format_tools.FLOAT: - dtype = 'f4' - elif pixel_type == jvm.format_tools.DOUBLE: - dtype = 'f8' - else: - dtype = None - self.queue_out.put(('status', 'started')) - - while not self.done.is_set(): - try: - c, z, t = self.queue_in.get(True, 0.02) - if reader.isRGB() and reader.isInterleaved(): - index = reader.getIndex(z, 0, t) - image = np.frombuffer(open_bytes_func(index), dtype) - image.shape = (height, width, reader.getSizeC()) - if image.shape[2] > 3: - image = image[:, :, :3] - elif c is not None and reader.getRGBChannelCount() == 1: - index = reader.getIndex(z, c, t) - image = np.frombuffer(open_bytes_func(index), dtype) - image.shape = (height, width) - elif reader.getRGBChannelCount() > 1: - n_planes = reader.getRGBChannelCount() - rdr = jvm.channel_separator(reader) - planes = [np.frombuffer(rdr.openBytes(rdr.getIndex(z, i, t)), dtype) for i in range(n_planes)] - if len(planes) > 3: - planes = planes[:3] - elif len(planes) < 3: - # > 1 and < 3 means must be 2 - # see issue #775 - planes.append(np.zeros(planes[0].shape, planes[0].dtype)) - image = np.dstack(planes) - image.shape = (height, width, 3) - del rdr - elif reader.getSizeC() > 1: - images = [np.frombuffer(open_bytes_func(reader.getIndex(z, i, t)), dtype) - for i in range(reader.getSizeC())] - image = np.dstack(images) - image.shape = (height, width, reader.getSizeC()) - # if not channel_names is None: - # metadata = MetadataRetrieve(self.metadata) - # for i in range(self.reader.getSizeC()): - # index = self.reader.getIndex(z, 0, t) - # channel_name = metadata.getChannelName(index, i) - # if channel_name is None: - # channel_name = metadata.getChannelID(index, i) - # channel_names.append(channel_name) - elif reader.isIndexed(): - # - # The image data is indexes into a color lookup-table - # But sometimes the table is the identity table and just generates - # a monochrome RGB image - # - index = reader.getIndex(z, 0, t) - image = np.frombuffer(open_bytes_func(index), dtype) - if pixel_type in (jvm.format_tools.INT16, jvm.format_tools.UINT16): - lut = reader.get16BitLookupTable() - if lut is not None: - lut = np.array(lut) - # lut = np.array( - # [env.get_short_array_elements(d) - # for d in env.get_object_array_elements(lut)]) \ - # .transpose() - else: - lut = reader.get8BitLookupTable() - if lut is not None: - lut = np.array(lut) - # lut = np.array( - # [env.get_byte_array_elements(d) - # for d in env.get_object_array_elements(lut)]) \ - # .transpose() - image.shape = (height, width) - if (lut is not None) and not np.all(lut == np.arange(lut.shape[0])[:, np.newaxis]): - image = lut[image, :] - else: - index = reader.getIndex(z, 0, t) - image = np.frombuffer(open_bytes_func(index), dtype) - image.shape = (height, width) - - if image.ndim == 3: - self.queue_out.put(('frame', image[..., c])) - else: - self.queue_out.put(('frame', image)) - except queues.Empty: # noqa - continue - except (Exception,): - self.queue_out.put(('error', format_exc())) - finally: - if jvm is not None: - jvm.kill_vm() - - -def can_open(path: Path) -> bool: - try: - jvm = JVM(jars) - reader = jvm.image_reader() - reader.getFormat(str(path)) - return True - except (Exception,): - return False - finally: - jvm.kill_vm() # noqa - - -class Reader(AbstractReader, ABC): - """ This class is used as a last resort, when we don't have another way to open the file. We don't like it - because it requires the java vm. - """ - priority = 99 # panic and open with BioFormats - do_not_pickle = 'reader', 'key', 'jvm' - - @staticmethod - def _can_open(path: Path) -> bool: - """ Use java BioFormats to make an ome metadata structure. """ - with multiprocessing.get_context('spawn').Pool(1) as pool: - return pool.map(can_open, (path,))[0] - - def open(self) -> None: - self.reader = JVMReader(self.path, self.series) - - def __frame__(self, c: int, z: int, t: int) -> np.ndarray: - return self.reader.frame(c, z, t) - - def close(self) -> None: - self.reader.close() diff --git a/ndbioimage/readers/cziread.py b/ndbioimage/readers/cziread.py deleted file mode 100644 index c345d7b..0000000 --- a/ndbioimage/readers/cziread.py +++ /dev/null @@ -1,606 +0,0 @@ -import re -import warnings -from abc import ABC -from functools import cached_property -from io import BytesIO -from itertools import product -from pathlib import Path -from typing import Any, Callable, Optional, TypeVar - -import czifile -import imagecodecs -import numpy as np -from lxml import etree -from ome_types import OME, model -from tifffile import repeat_nd - -from .. import AbstractReader - -try: - # TODO: use zoom from imagecodecs implementation when available - from scipy.ndimage.interpolation import zoom -except ImportError: - try: - from ndimage.interpolation import zoom - except ImportError: - zoom = None - - -Element = TypeVar('Element') - - -def zstd_decode(data: bytes) -> bytes: # noqa - """ decode zstd bytes, copied from BioFormats ZeissCZIReader """ - def read_var_int(stream: BytesIO) -> int: # noqa - a = stream.read(1)[0] - if a & 128: - b = stream.read(1)[0] - if b & 128: - c = stream.read(1)[0] - return (c << 14) | ((b & 127) << 7) | (a & 127) - return (b << 7) | (a & 127) - return a & 255 - - try: - with BytesIO(data) as stream: - size_of_header = read_var_int(stream) - high_low_unpacking = False - while stream.tell() < size_of_header: - chunk_id = read_var_int(stream) - # only one chunk ID defined so far - if chunk_id == 1: - high_low_unpacking = (stream.read(1)[0] & 1) == 1 - else: - raise ValueError(f'Invalid chunk id: {chunk_id}') - pointer = stream.tell() - except Exception: # noqa - high_low_unpacking = False - pointer = 0 - - decoded = imagecodecs.zstd_decode(data[pointer:]) - if high_low_unpacking: - second_half = len(decoded) // 2 - return bytes([decoded[second_half + i // 2] if i % 2 else decoded[i // 2] for i in range(len(decoded))]) - else: - return decoded - - -def data(self, raw: bool = False, resize: bool = True, order: int = 0) -> np.ndarray: - """Read image data from file and return as numpy array.""" - DECOMPRESS = czifile.czifile.DECOMPRESS # noqa - DECOMPRESS[5] = imagecodecs.zstd_decode - DECOMPRESS[6] = zstd_decode - - de = self.directory_entry - fh = self._fh - if raw: - with fh.lock: - fh.seek(self.data_offset) - data = fh.read(self.data_size) # noqa - return data - if de.compression: - # if de.compression not in DECOMPRESS: - # raise ValueError('compression unknown or not supported') - with fh.lock: - fh.seek(self.data_offset) - data = fh.read(self.data_size) # noqa - data = DECOMPRESS[de.compression](data) # noqa - if de.compression == 2: - # LZW - data = np.fromstring(data, de.dtype) # noqa - elif de.compression in (5, 6): - # ZSTD - data = np.frombuffer(data, de.dtype) # noqa - else: - dtype = np.dtype(de.dtype) - with fh.lock: - fh.seek(self.data_offset) - data = fh.read_array(dtype, self.data_size // dtype.itemsize) # noqa - - data = data.reshape(de.stored_shape) # noqa - if de.compression != 4 and de.stored_shape[-1] in (3, 4): - if de.stored_shape[-1] == 3: - # BGR -> RGB - data = data[..., ::-1] # noqa - else: - # BGRA -> RGBA - tmp = data[..., 0].copy() - data[..., 0] = data[..., 2] - data[..., 2] = tmp - if de.stored_shape == de.shape or not resize: - return data - - # sub / supersampling - factors = [j / i for i, j in zip(de.stored_shape, de.shape)] - factors = [(int(round(f)) if abs(f - round(f)) < 0.0001 else f) - for f in factors] - - # use repeat if possible - if order == 0 and all(isinstance(f, int) for f in factors): - data = repeat_nd(data, factors).copy() # noqa - data.shape = de.shape - return data - - # remove leading dimensions with size 1 for speed - shape = list(de.stored_shape) - i = 0 - for s in shape: - if s != 1: - break - i += 1 - shape = shape[i:] - factors = factors[i:] - data.shape = shape - - # resize RGB components separately for speed - if zoom is None: - raise ImportError("cannot import 'zoom' from scipy or ndimage") - if shape[-1] in (3, 4) and factors[-1] == 1.0: - factors = factors[:-1] - old = data - data = np.empty(de.shape, de.dtype[-2:]) # noqa - for i in range(shape[-1]): - data[..., i] = zoom(old[..., i], zoom=factors, order=order) - else: - data = zoom(data, zoom=factors, order=order) # noqa - - data.shape = de.shape - return data - - -# monkeypatch zstd into czifile -czifile.czifile.SubBlockSegment.data = data - - -class Reader(AbstractReader, ABC): - priority = 0 - do_not_pickle = 'reader', 'filedict' - - @staticmethod - def _can_open(path: Path) -> bool: - return isinstance(path, Path) and path.suffix == '.czi' - - def open(self) -> None: - self.reader = czifile.CziFile(self.path) - filedict = {} - for directory_entry in self.reader.filtered_subblock_directory: - idx = self.get_index(directory_entry, self.reader.start) - if 'S' not in self.reader.axes or self.series in range(*idx[self.reader.axes.index('S')]): - for c in range(*idx[self.reader.axes.index('C')]): - for z in range(*idx[self.reader.axes.index('Z')]): - for t in range(*idx[self.reader.axes.index('T')]): - if (c, z, t) in filedict: - filedict[c, z, t].append(directory_entry) - else: - filedict[c, z, t] = [directory_entry] - if len(filedict) == 0: - raise FileNotFoundError(f'Series {self.series} not found in {self.path}.') - self.filedict = filedict # noqa - - def close(self) -> None: - self.reader.close() - - def get_ome(self) -> OME: - return OmeParse.get_ome(self.reader, self.filedict) - - def __frame__(self, c: int = 0, z: int = 0, t: int = 0) -> np.ndarray: - f = np.zeros(self.base_shape['yx'], self.dtype) - if (c, z, t) in self.filedict: - directory_entries = self.filedict[c, z, t] - x_min = min([f.start[f.axes.index('X')] for f in directory_entries]) - y_min = min([f.start[f.axes.index('Y')] for f in directory_entries]) - xy_min = {'X': x_min, 'Y': y_min} - for directory_entry in directory_entries: - subblock = directory_entry.data_segment() - tile = subblock.data(resize=True, order=0) - axes_min = [xy_min.get(ax, 0) for ax in directory_entry.axes] - index = [slice(i - j - m, i - j + k) - for i, j, k, m in zip(directory_entry.start, self.reader.start, tile.shape, axes_min)] - index = tuple(index[self.reader.axes.index(i)] for i in 'YX') - f[index] = tile.squeeze() - return f - - @staticmethod - def get_index(directory_entry: czifile.DirectoryEntryDV, start: tuple[int]) -> list[tuple[int, int]]: - return [(i - j, i - j + k) for i, j, k in zip(directory_entry.start, start, directory_entry.shape)] - - -class OmeParse: - size_x: int - size_y: int - size_c: int - size_z: int - size_t: int - - nm = model.UnitsLength.NANOMETER - um = model.UnitsLength.MICROMETER - - @classmethod - def get_ome(cls, reader: czifile.CziFile, filedict: dict[tuple[int, int, int], Any]) -> OME: - new = cls(reader, filedict) - new.parse() - return new.ome - - def __init__(self, reader: czifile.CziFile, filedict: dict[tuple[int, int, int], Any]) -> None: - self.reader = reader - self.filedict = filedict - xml = reader.metadata() - self.attachments = {i.attachment_entry.name: i.attachment_entry.data_segment() - for i in reader.attachments()} - self.tree = etree.fromstring(xml) - self.metadata = self.tree.find('Metadata') - version = self.metadata.find('Version') - if version is not None: - self.version = version.text - else: - self.version = self.metadata.find('Experiment').attrib['Version'] - - self.ome = OME() - self.information = self.metadata.find('Information') - self.display_setting = self.metadata.find('DisplaySetting') - self.experiment = self.metadata.find('Experiment') - self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock') - self.instrument = self.information.find('Instrument') - self.image = self.information.find('Image') - - if self.version == '1.0': - self.experiment = self.metadata.find('Experiment') - self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock') - self.multi_track_setup = self.acquisition_block.find('MultiTrackSetup') - else: - self.experiment = None - self.acquisition_block = None - self.multi_track_setup = None - - def parse(self) -> None: - self.get_experimenters() - self.get_instruments() - self.get_detectors() - self.get_objectives() - self.get_tubelenses() - self.get_light_sources() - self.get_filters() - self.get_pixels() - self.get_channels() - self.get_planes() - self.get_annotations() - - @staticmethod - def text(item: Optional[Element], default: str = "") -> str: - return default if item is None else item.text - - @staticmethod - def def_list(item: Any) -> list[Any]: - return [] if item is None else item - - @staticmethod - def try_default(fun: Callable[[Any, ...], Any] | type, default: Any = None, *args: Any, **kwargs: Any) -> Any: - try: - return fun(*args, **kwargs) - except Exception: # noqa - return default - - def get_experimenters(self) -> None: - if self.version == '1.0': - self.ome.experimenters = [ - model.Experimenter(id='Experimenter:0', - user_name=self.information.find('User').find('DisplayName').text)] - elif self.version in ('1.1', '1.2'): - self.ome.experimenters = [ - model.Experimenter(id='Experimenter:0', - user_name=self.information.find('Document').find('UserName').text)] - - def get_instruments(self) -> None: - if self.version == '1.0': - self.ome.instruments.append(model.Instrument(id=self.instrument.attrib['Id'])) - elif self.version in ('1.1', '1.2'): - for _ in self.instrument.find('Microscopes'): - self.ome.instruments.append(model.Instrument(id='Instrument:0')) - - def get_detectors(self) -> None: - if self.version == '1.0': - for detector in self.instrument.find('Detectors'): - try: - detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "") - except ValueError: - detector_type = model.Detector_Type.OTHER - - self.ome.instruments[0].detectors.append( - model.Detector( - id=detector.attrib['Id'], model=self.text(detector.find('Manufacturer').find('Model')), - amplification_gain=float(self.text(detector.find('AmplificationGain'))), - gain=float(self.text(detector.find('Gain'))), zoom=float(self.text(detector.find('Zoom'))), - type=detector_type - )) - elif self.version in ('1.1', '1.2'): - for detector in self.instrument.find('Detectors'): - try: - detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "") - except ValueError: - detector_type = model.Detector_Type.OTHER - - self.ome.instruments[0].detectors.append( - model.Detector( - id=detector.attrib['Id'].replace(' ', ''), - model=self.text(detector.find('Manufacturer').find('Model')), - type=detector_type - )) - - def get_objectives(self) -> None: - for objective in self.instrument.find('Objectives'): - self.ome.instruments[0].objectives.append( - model.Objective( - id=objective.attrib['Id'], - model=self.text(objective.find('Manufacturer').find('Model')), - immersion=self.text(objective.find('Immersion')), # type: ignore - lens_na=float(self.text(objective.find('LensNA'))), - nominal_magnification=float(self.text(objective.find('NominalMagnification'))))) - - def get_tubelenses(self) -> None: - if self.version == '1.0': - for idx, tube_lens in enumerate({self.text(track_setup.find('TubeLensPosition')) - for track_setup in self.multi_track_setup}): - try: - nominal_magnification = float(re.findall(r'\d+[,.]\d*', tube_lens)[0].replace(',', '.')) - except Exception: # noqa - nominal_magnification = 1.0 - - self.ome.instruments[0].objectives.append( - model.Objective(id=f'Objective:Tubelens:{idx}', model=tube_lens, - nominal_magnification=nominal_magnification)) - elif self.version in ('1.1', '1.2'): - for tubelens in self.def_list(self.instrument.find('TubeLenses')): - try: - nominal_magnification = float(re.findall(r'\d+(?:[,.]\d*)?', - tubelens.attrib['Name'])[0].replace(',', '.')) - except Exception: # noqa - nominal_magnification = 1.0 - - self.ome.instruments[0].objectives.append( - model.Objective( - id=f"Objective:{tubelens.attrib['Id']}", - model=tubelens.attrib['Name'], - nominal_magnification=nominal_magnification)) - - def get_light_sources(self) -> None: - if self.version == '1.0': - for light_source in self.def_list(self.instrument.find('LightSources')): - try: - if light_source.find('LightSourceType').find('Laser') is not None: - self.ome.instruments[0].lasers.append( - model.Laser( - id=light_source.attrib['Id'], - model=self.text(light_source.find('Manufacturer').find('Model')), - power=float(self.text(light_source.find('Power'))), - wavelength=float( - self.text(light_source.find('LightSourceType').find('Laser').find('Wavelength'))))) - except AttributeError: - pass - elif self.version in ('1.1', '1.2'): - for light_source in self.def_list(self.instrument.find('LightSources')): - try: - if light_source.find('LightSourceType').find('Laser') is not None: - self.ome.instruments[0].lasers.append( - model.Laser( - id=f"LightSource:{light_source.attrib['Id']}", - power=float(self.text(light_source.find('Power'))), - wavelength=float(light_source.attrib['Id'][-3:]))) # TODO: follow Id reference - except (AttributeError, ValueError): - pass - - def get_filters(self) -> None: - if self.version == '1.0': - for idx, filter_ in enumerate({self.text(beam_splitter.find('Filter')) - for track_setup in self.multi_track_setup - for beam_splitter in track_setup.find('BeamSplitters')}): - self.ome.instruments[0].filter_sets.append( - model.FilterSet(id=f'FilterSet:{idx}', model=filter_) - ) - - def get_pixels(self) -> None: - x_min = min([f.start[f.axes.index('X')] for f in self.filedict[0, 0, 0]]) - y_min = min([f.start[f.axes.index('Y')] for f in self.filedict[0, 0, 0]]) - x_max = max([f.start[f.axes.index('X')] + f.shape[f.axes.index('X')] for f in self.filedict[0, 0, 0]]) - y_max = max([f.start[f.axes.index('Y')] + f.shape[f.axes.index('Y')] for f in self.filedict[0, 0, 0]]) - self.size_x = x_max - x_min - self.size_y = y_max - y_min - self.size_c, self.size_z, self.size_t = (self.reader.shape[self.reader.axes.index(directory_entry)] - for directory_entry in 'CZT') - image = self.information.find('Image') - pixel_type = self.text(image.find('PixelType'), 'Gray16') - if pixel_type.startswith('Gray'): - pixel_type = 'uint' + pixel_type[4:] - objective_settings = image.find('ObjectiveSettings') - - self.ome.images.append( - model.Image( - id='Image:0', - name=f"{self.text(self.information.find('Document').find('Name'))} #1", - pixels=model.Pixels( - id='Pixels:0', size_x=self.size_x, size_y=self.size_y, - size_c=self.size_c, size_z=self.size_z, size_t=self.size_t, - dimension_order='XYCZT', type=pixel_type, # type: ignore - significant_bits=int(self.text(image.find('ComponentBitCount'))), - big_endian=False, interleaved=False, metadata_only=True), # type: ignore - experimenter_ref=model.ExperimenterRef(id='Experimenter:0'), - instrument_ref=model.InstrumentRef(id='Instrument:0'), - objective_settings=model.ObjectiveSettings( - id=objective_settings.find('ObjectiveRef').attrib['Id'], - medium=self.text(objective_settings.find('Medium')), # type: ignore - refractive_index=float(self.text(objective_settings.find('RefractiveIndex')))), - stage_label=model.StageLabel( - name=f'Scene position #0', - x=self.positions[0], x_unit=self.um, - y=self.positions[1], y_unit=self.um, - z=self.positions[2], z_unit=self.um))) - - for distance in self.metadata.find('Scaling').find('Items'): - if distance.attrib['Id'] == 'X': - self.ome.images[0].pixels.physical_size_x = float(self.text(distance.find('Value'))) * 1e6 - elif distance.attrib['Id'] == 'Y': - self.ome.images[0].pixels.physical_size_y = float(self.text(distance.find('Value'))) * 1e6 - elif self.size_z > 1 and distance.attrib['Id'] == 'Z': - self.ome.images[0].pixels.physical_size_z = float(self.text(distance.find('Value'))) * 1e6 - - @cached_property - def positions(self) -> tuple[float, float, Optional[float]]: - if self.version == '1.0': - scenes = self.image.find('Dimensions').find('S').find('Scenes') - positions = scenes[0].find('Positions')[0] - return float(positions.attrib['X']), float(positions.attrib['Y']), float(positions.attrib['Z']) - elif self.version in ('1.1', '1.2'): - try: # TODO - scenes = self.image.find('Dimensions').find('S').find('Scenes') - center_position = [float(pos) for pos in self.text(scenes[0].find('CenterPosition')).split(',')] - except AttributeError: - center_position = [0, 0] - return center_position[0], center_position[1], None - - @cached_property - def channels_im(self) -> dict: - return {channel.attrib['Id']: channel for channel in self.image.find('Dimensions').find('Channels')} - - @cached_property - def channels_ds(self) -> dict: - return {channel.attrib['Id']: channel for channel in self.display_setting.find('Channels')} - - @cached_property - def channels_ts(self) -> dict: - return {detector.attrib['Id']: track_setup - for track_setup in - self.experiment.find('ExperimentBlocks').find('AcquisitionBlock').find('MultiTrackSetup') - for detector in track_setup.find('Detectors')} - - def get_channels(self) -> None: - if self.version == '1.0': - for idx, (key, channel) in enumerate(self.channels_im.items()): - detector_settings = channel.find('DetectorSettings') - laser_scan_info = channel.find('LaserScanInfo') - detector = detector_settings.find('Detector') - try: - binning = model.Binning(self.text(detector_settings.find('Binning'))) - except ValueError: - binning = model.Binning.OTHER - - filterset = self.text(self.channels_ts[key].find('BeamSplitters')[0].find('Filter')) - filterset_idx = [filterset.model for filterset in self.ome.instruments[0].filter_sets].index(filterset) - - light_sources_settings = channel.find('LightSourcesSettings') - # no space in ome for multiple lightsources simultaneously - if len(light_sources_settings) > idx: - light_source_settings = light_sources_settings[idx] - else: - light_source_settings = light_sources_settings[0] - light_source_settings = model.LightSourceSettings( - id=light_source_settings.find('LightSource').attrib['Id'], - attenuation=float(self.text(light_source_settings.find('Attenuation'))), - wavelength=float(self.text(light_source_settings.find('Wavelength'))), - wavelength_unit=self.nm) - - self.ome.images[0].pixels.channels.append( - model.Channel( - id=f'Channel:{idx}', - name=channel.attrib['Name'], - acquisition_mode=self.text(channel.find('AcquisitionMode')), # type: ignore - color=model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white')), - detector_settings=model.DetectorSettings(id=detector.attrib['Id'], binning=binning), - # emission_wavelength=text(channel.find('EmissionWavelength')), # TODO: fix - excitation_wavelength=light_source_settings.wavelength, - filter_set_ref=model.FilterSetRef(id=self.ome.instruments[0].filter_sets[filterset_idx].id), - illumination_type=self.text(channel.find('IlluminationType')), # type: ignore - light_source_settings=light_source_settings, - samples_per_pixel=int(self.text(laser_scan_info.find('Averaging'))))) - elif self.version in ('1.1', '1.2'): - for idx, (key, channel) in enumerate(self.channels_im.items()): - detector_settings = channel.find('DetectorSettings') - laser_scan_info = channel.find('LaserScanInfo') - detector = detector_settings.find('Detector') - try: - color = model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white')) - except Exception: # noqa - color = None - try: - if (i := self.text(channel.find('EmissionWavelength'))) != '0': - emission_wavelength = float(i) - else: - emission_wavelength = None - except Exception: # noqa - emission_wavelength = None - if laser_scan_info is not None: - samples_per_pixel = int(self.text(laser_scan_info.find('Averaging'), '1')) - else: - samples_per_pixel = 1 - try: - binning = model.Binning(self.text(detector_settings.find('Binning'))) - except ValueError: - binning = model.Binning.OTHER - - light_sources_settings = channel.find('LightSourcesSettings') - # no space in ome for multiple lightsources simultaneously - if light_sources_settings is not None: - light_source_settings = light_sources_settings[0] - light_source_settings = model.LightSourceSettings( - id='LightSource:' + '_'.join([light_source_settings.find('LightSource').attrib['Id'] - for light_source_settings in light_sources_settings]), - attenuation=self.try_default(float, None, self.text(light_source_settings.find('Attenuation'))), - wavelength=self.try_default(float, None, self.text(light_source_settings.find('Wavelength'))), - wavelength_unit=self.nm) - else: - light_source_settings = None - - self.ome.images[0].pixels.channels.append( - model.Channel( - id=f'Channel:{idx}', - name=channel.attrib['Name'], - acquisition_mode=self.text(channel.find('AcquisitionMode')).replace( # type: ignore - 'SingleMoleculeLocalisation', 'SingleMoleculeImaging'), - color=color, - detector_settings=model.DetectorSettings( - id=detector.attrib['Id'].replace(' ', ""), - binning=binning), - emission_wavelength=emission_wavelength, - excitation_wavelength=self.try_default(float, None, - self.text(channel.find('ExcitationWavelength'))), - # filter_set_ref=model.FilterSetRef(id=ome.instruments[0].filter_sets[filterset_idx].id), - illumination_type=self.text(channel.find('IlluminationType')), # type: ignore - light_source_settings=light_source_settings, - samples_per_pixel=samples_per_pixel)) - - def get_planes(self) -> None: - try: - exposure_times = [float(self.text(channel.find('LaserScanInfo').find('FrameTime'))) - for channel in self.channels_im.values()] - except Exception: # noqa - exposure_times = [None] * len(self.channels_im) - delta_ts = self.attachments['TimeStamps'].data() - dt = np.diff(delta_ts) - if len(dt) and np.std(dt) / np.mean(dt) > 0.02: - dt = np.median(dt[dt > 0]) - delta_ts = dt * np.arange(len(delta_ts)) - warnings.warn(f'delta_t is inconsistent, using median value: {dt}') - - for t, z, c in product(range(self.size_t), range(self.size_z), range(self.size_c)): - self.ome.images[0].pixels.planes.append( - model.Plane(the_c=c, the_z=z, the_t=t, delta_t=delta_ts[t], - exposure_time=exposure_times[c], - position_x=self.positions[0], position_x_unit=self.um, - position_y=self.positions[1], position_y_unit=self.um, - position_z=self.positions[2], position_z_unit=self.um)) - - def get_annotations(self) -> None: - idx = 0 - for layer in [] if (ml := self.metadata.find('Layers')) is None else ml: - rectangle = layer.find('Elements').find('Rectangle') - if rectangle is not None: - geometry = rectangle.find('Geometry') - roi = model.ROI(id=f'ROI:{idx}', description=self.text(layer.find('Usage'))) - roi.union.append( - model.Rectangle( - id='Shape:0:0', - height=float(self.text(geometry.find('Height'))), - width=float(self.text(geometry.find('Width'))), - x=float(self.text(geometry.find('Left'))), - y=float(self.text(geometry.find('Top'))))) - self.ome.rois.append(roi) - self.ome.images[0].roi_refs.append(model.ROIRef(id=f'ROI:{idx}')) - idx += 1 diff --git a/ndbioimage/readers/fijiread.py b/ndbioimage/readers/fijiread.py deleted file mode 100644 index f11c0e7..0000000 --- a/ndbioimage/readers/fijiread.py +++ /dev/null @@ -1,59 +0,0 @@ -from abc import ABC -from itertools import product -from pathlib import Path -from struct import unpack -from warnings import warn - -import numpy as np -from ome_types import model -from tifffile import TiffFile - -from .. import AbstractReader - - -class Reader(AbstractReader, ABC): - """ Can read some tif files written with Fiji which are broken because Fiji didn't finish writing. """ - priority = 90 - do_not_pickle = 'reader' - - @staticmethod - def _can_open(path): - if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'): - with TiffFile(path) as tif: - return tif.is_imagej and not tif.is_bigtiff - else: - return False - - def __frame__(self, c, z, t): # Override this, return the frame at c, z, t - self.reader.filehandle.seek(self.offset + t * self.count) - return np.reshape(unpack(self.fmt, self.reader.filehandle.read(self.count)), self.base_shape['yx']) - - def open(self): - warn(f'File {self.path.name} is probably damaged, opening with fijiread.') - self.reader = TiffFile(self.path) - assert self.reader.pages[0].compression == 1, 'Can only read uncompressed tiff files.' - assert self.reader.pages[0].samplesperpixel == 1, 'Can only read 1 sample per pixel.' - self.offset = self.reader.pages[0].dataoffsets[0] # noqa - self.count = self.reader.pages[0].databytecounts[0] # noqa - self.bytes_per_sample = self.reader.pages[0].bitspersample // 8 # noqa - self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * 'BHILQ'[self.bytes_per_sample - 1] # noqa - - def close(self): - self.reader.close() - - def get_ome(self): - size_y, size_x = self.reader.pages[0].shape - size_c, size_z = 1, 1 - size_t = int(np.floor((self.reader.filehandle.size - self.reader.pages[0].dataoffsets[0]) / self.count)) - pixel_type = model.PixelType(self.reader.pages[0].dtype.name) - ome = model.OME() - ome.instruments.append(model.Instrument()) - ome.images.append( - model.Image( - pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=pixel_type), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) - for c, z, t in product(range(size_c), range(size_z), range(size_t)): - ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0)) - return ome diff --git a/ndbioimage/readers/metaseriesread.py b/ndbioimage/readers/metaseriesread.py deleted file mode 100644 index f8087e4..0000000 --- a/ndbioimage/readers/metaseriesread.py +++ /dev/null @@ -1,80 +0,0 @@ -import re -from abc import ABC -from pathlib import Path -from typing import Optional - -import tifffile -from ome_types import model -from ome_types.units import _quantity_property # noqa - -from .. import AbstractReader - - -class Reader(AbstractReader, ABC): - priority = 20 - do_not_pickle = 'last_tif' - - @staticmethod - def _can_open(path): - return isinstance(path, Path) and (path.is_dir() or - (path.parent.is_dir() and path.name.lower().startswith('pos'))) - - @staticmethod - def get_positions(path: str | Path) -> Optional[list[int]]: - pat = re.compile(rf's(\d)_t\d+\.(tif|TIF)$') - return sorted({int(m.group(1)) for file in Path(path).iterdir() if (m := pat.search(file.name))}) - - def get_ome(self): - ome = model.OME() - tif = self.get_tif(0) - metadata = tif.metaseries_metadata - size_z = len(tif.pages) - page = tif.pages[0] - shape = {axis.lower(): size for axis, size in zip(page.axes, page.shape)} - size_x, size_y = shape['x'], shape['y'] - - ome.instruments.append(model.Instrument()) - - size_c = 1 - size_t = max(self.filedict.keys()) + 1 - pixel_type = f"uint{metadata['PlaneInfo']['bits-per-pixel']}" - ome.images.append( - model.Image( - pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, - size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=pixel_type), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) - return ome - - def open(self): - pat = re.compile(rf's{self.series}_t\d+\.(tif|TIF)$') - filelist = sorted([file for file in self.path.iterdir() if pat.search(file.name)]) - pattern = re.compile(r't(\d+)$') - self.filedict = {int(pattern.search(file.stem).group(1)) - 1: file for file in filelist} - if len(self.filedict) == 0: - raise FileNotFoundError - self.last_tif = 0, tifffile.TiffFile(self.filedict[0]) - - def close(self) -> None: - self.last_tif[1].close() - - def get_tif(self, t: int = None): - last_t, tif = self.last_tif - if (t is None or t == last_t) and not tif.filehandle.closed: - return tif - else: - tif.close() - tif = tifffile.TiffFile(self.filedict[t]) - self.last_tif = t, tif - return tif - - def __frame__(self, c=0, z=0, t=0): - tif = self.get_tif(t) - page = tif.pages[z] - if page.axes.upper() == 'YX': - return page.asarray() - elif page.axes.upper() == 'XY': - return page.asarray().T - else: - raise NotImplementedError(f'reading axes {page.axes} is not implemented') diff --git a/ndbioimage/readers/ndread.py b/ndbioimage/readers/ndread.py deleted file mode 100644 index 5a2b216..0000000 --- a/ndbioimage/readers/ndread.py +++ /dev/null @@ -1,53 +0,0 @@ -from abc import ABC -from itertools import product - -import numpy as np -from ome_types import model - -from .. import AbstractReader - - -class Reader(AbstractReader, ABC): - priority = 20 - - @staticmethod - def _can_open(path): - return isinstance(path, np.ndarray) and 1 <= path.ndim <= 5 - - def get_ome(self): - def shape(size_x=1, size_y=1, size_c=1, size_z=1, size_t=1): # noqa - return size_x, size_y, size_c, size_z, size_t - size_x, size_y, size_c, size_z, size_t = shape(*self.array.shape) - try: - pixel_type = model.PixelType(self.array.dtype.name) - except ValueError: - if self.array.dtype.name.startswith('int'): - pixel_type = model.PixelType('int32') - else: - pixel_type = model.PixelType('float') - - ome = model.OME() - ome.instruments.append(model.Instrument()) - ome.images.append( - model.Image( - pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=pixel_type), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) - for c, z, t in product(range(size_c), range(size_z), range(size_t)): - ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0)) - return ome - - def open(self): - if isinstance(self.path, np.ndarray): - self.array = np.array(self.path) - while self.array.ndim < 5: - self.array = np.expand_dims(self.array, -1) # noqa - self.path = 'numpy array' - - def __frame__(self, c, z, t): - frame = self.array[:, :, c, z, t] - if self.axes.find('y') > self.axes.find('x'): - return frame.T - else: - return frame diff --git a/ndbioimage/readers/seqread.py b/ndbioimage/readers/seqread.py deleted file mode 100644 index 4d74683..0000000 --- a/ndbioimage/readers/seqread.py +++ /dev/null @@ -1,146 +0,0 @@ -import re -from abc import ABC -from datetime import datetime -from itertools import product -from pathlib import Path - -import tifffile -import yaml -from ome_types import model -from ome_types.units import _quantity_property # noqa - -from .. import AbstractReader - - -def lazy_property(function, field, *arg_fields): - def lazy(self): - if self.__dict__.get(field) is None: - self.__dict__[field] = function(*[getattr(self, arg_field) for arg_field in arg_fields]) - try: - self.model_fields_set.add(field) - except Exception: # noqa - pass - return self.__dict__[field] - return property(lazy) - - -class Plane(model.Plane): - """ Lazily retrieve delta_t from metadata """ - def __init__(self, t0, file, **kwargs): # noqa - super().__init__(**kwargs) - # setting fields here because they would be removed by ome_types/pydantic after class definition - setattr(self.__class__, 'delta_t', lazy_property(self.get_delta_t, 'delta_t', 't0', 'file')) - setattr(self.__class__, 'delta_t_quantity', _quantity_property('delta_t')) - self.__dict__['t0'] = t0 # noqa - self.__dict__['file'] = file # noqa - - @staticmethod - def get_delta_t(t0, file): - with tifffile.TiffFile(file) as tif: - info = yaml.safe_load(tif.pages[0].tags[50839].value['Info']) - return float((datetime.strptime(info['Time'], '%Y-%m-%d %H:%M:%S %z') - t0).seconds) - - -class Reader(AbstractReader, ABC): - priority = 10 - - @staticmethod - def _can_open(path): - pat = re.compile(r'(?:\d+-)?Pos.*', re.IGNORECASE) - return (isinstance(path, Path) and path.is_dir() and - (pat.match(path.name) or any(file.is_dir() and pat.match(file.stem) for file in path.iterdir()))) - - def get_ome(self): - ome = model.OME() - with tifffile.TiffFile(self.filedict[0, 0, 0]) as tif: - metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()} - ome.experimenters.append( - model.Experimenter(id='Experimenter:0', user_name=metadata['Info']['Summary']['UserName'])) - objective_str = metadata['Info']['ZeissObjectiveTurret-Label'] - ome.instruments.append(model.Instrument()) - ome.instruments[0].objectives.append( - model.Objective( - id='Objective:0', manufacturer='Zeiss', model=objective_str, - nominal_magnification=float(re.findall(r'(\d+)x', objective_str)[0]), - lens_na=float(re.findall(r'/(\d\.\d+)', objective_str)[0]), - immersion=model.Objective_Immersion.OIL if 'oil' in objective_str.lower() else None)) - tubelens_str = metadata['Info']['ZeissOptovar-Label'] - ome.instruments[0].objectives.append( - model.Objective( - id='Objective:Tubelens:0', manufacturer='Zeiss', model=tubelens_str, - nominal_magnification=float(re.findall(r'\d?\d*[,.]?\d+(?=x$)', tubelens_str)[0].replace(',', '.')))) - ome.instruments[0].detectors.append( - model.Detector( - id='Detector:0', amplification_gain=100)) - ome.instruments[0].filter_sets.append( - model.FilterSet(id='FilterSet:0', model=metadata['Info']['ZeissReflectorTurret-Label'])) - - pxsize = metadata['Info']['PixelSizeUm'] - pxsize_cam = 6.5 if 'Hamamatsu' in metadata['Info']['Core-Camera'] else None - if pxsize == 0: - pxsize = pxsize_cam / ome.instruments[0].objectives[0].nominal_magnification - pixel_type = metadata['Info']['PixelType'].lower() - if pixel_type.startswith('gray'): - pixel_type = 'uint' + pixel_type[4:] - else: - pixel_type = 'uint16' # assume - - size_c, size_z, size_t = (max(i) + 1 for i in zip(*self.filedict.keys())) - t0 = datetime.strptime(metadata['Info']['Time'], '%Y-%m-%d %H:%M:%S %z') - ome.images.append( - model.Image( - pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, - size_x=metadata['Info']['Width'], size_y=metadata['Info']['Height'], - dimension_order='XYCZT', # type: ignore - type=pixel_type, physical_size_x=pxsize, physical_size_y=pxsize, - physical_size_z=metadata['Info']['Summary']['z-step_um']), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) - - for c, z, t in product(range(size_c), range(size_z), range(size_t)): - ome.images[0].pixels.planes.append( - Plane(t0, self.filedict[c, z, t], - the_c=c, the_z=z, the_t=t, exposure_time=metadata['Info']['Exposure-ms'] / 1000)) - - # compare channel names from metadata with filenames - pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE) - for c in range(size_c): - ome.images[0].pixels.channels.append( - model.Channel( - id=f'Channel:{c}', name=pattern_c.findall(self.filedict[c, 0, 0].stem)[0], - detector_settings=model.DetectorSettings( - id='Detector:0', binning=metadata['Info']['Hamamatsu_sCMOS-Binning']), - filter_set_ref=model.FilterSetRef(id='FilterSet:0'))) - return ome - - def open(self): - # /some_path/Pos4: path = /some_path, series = 4 - # /some_path/5-Pos_001_005: path = /some_path/5-Pos_001_005, series = 0 - if re.match(r'(?:\d+-)?Pos.*', self.path.name, re.IGNORECASE) is None: - pat = re.compile(rf'^(?:\d+-)?Pos{self.series}$', re.IGNORECASE) - files = sorted(file for file in self.path.iterdir() if pat.match(file.name)) - if len(files): - path = files[0] - else: - raise FileNotFoundError(self.path / pat.pattern) - else: - path = self.path - - pat = re.compile(r'^img_\d{3,}.*\d{3,}.*\.tif$', re.IGNORECASE) - filelist = sorted([file for file in path.iterdir() if pat.search(file.name)]) - with tifffile.TiffFile(self.path / filelist[0]) as tif: - metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()} - - # compare channel names from metadata with filenames - cnamelist = metadata['Info']['Summary']['ChNames'] - cnamelist = [c for c in cnamelist if any([c in f.name for f in filelist])] - - pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE) - pattern_z = re.compile(r'(\d{3,})$') - pattern_t = re.compile(r'img_(\d{3,})', re.IGNORECASE) - self.filedict = {(cnamelist.index(pattern_c.findall(file.stem)[0]), # noqa - int(pattern_z.findall(file.stem)[0]), - int(pattern_t.findall(file.stem)[0])): file for file in filelist} - - def __frame__(self, c=0, z=0, t=0): - return tifffile.imread(self.path / self.filedict[(c, z, t)]) diff --git a/ndbioimage/readers/tifread.py b/ndbioimage/readers/tifread.py deleted file mode 100644 index 5d3c033..0000000 --- a/ndbioimage/readers/tifread.py +++ /dev/null @@ -1,85 +0,0 @@ -from abc import ABC -from functools import cached_property -from itertools import product -from pathlib import Path - -import numpy as np -import tifffile -import yaml -from ome_types import model - -from .. import AbstractReader - - -class Reader(AbstractReader, ABC): - priority = 0 - do_not_pickle = 'reader' - - @staticmethod - def _can_open(path): - if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'): - with tifffile.TiffFile(path) as tif: - return tif.is_imagej and tif.pages[-1]._nextifd() == 0 # noqa - else: - return False - - @cached_property - def metadata(self): - return {key: yaml.safe_load(value) if isinstance(value, str) else value - for key, value in self.reader.imagej_metadata.items()} - - def get_ome(self): - page = self.reader.pages[0] - size_y = page.imagelength - size_x = page.imagewidth - if self.p_ndim == 3: - size_c = page.samplesperpixel - size_t = self.metadata.get('frames', 1) # // C - else: - size_c = self.metadata.get('channels', 1) - size_t = self.metadata.get('frames', 1) - size_z = self.metadata.get('slices', 1) - if 282 in page.tags and 296 in page.tags and page.tags[296].value == 1: - f = page.tags[282].value - pxsize = f[1] / f[0] - else: - pxsize = None - - dtype = page.dtype.name - if dtype not in ('int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32', - 'float', 'double', 'complex', 'double-complex', 'bit'): - dtype = 'float' - - interval_t = self.metadata.get('interval', 0) - - ome = model.OME() - ome.instruments.append(model.Instrument(id='Instrument:0')) - ome.instruments[0].objectives.append(model.Objective(id='Objective:0')) - ome.images.append( - model.Image( - id='Image:0', - pixels=model.Pixels( - id='Pixels:0', - size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=dtype, # type: ignore - physical_size_x=pxsize, physical_size_y=pxsize), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) - for c, z, t in product(range(size_c), range(size_z), range(size_t)): - ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=interval_t * t)) - return ome - - def open(self): - self.reader = tifffile.TiffFile(self.path) - page = self.reader.pages[0] - self.p_ndim = page.ndim # noqa - if self.p_ndim == 3: - self.p_transpose = [i for i in [page.axes.find(j) for j in 'SYX'] if i >= 0] # noqa - - def close(self): - self.reader.close() - - def __frame__(self, c, z, t): - if self.p_ndim == 3: - return np.transpose(self.reader.asarray(z + t * self.base_shape['z']), self.p_transpose)[c] - else: - return self.reader.asarray(c + z * self.base_shape['c'] + t * self.base_shape['c'] * self.base_shape['z']) diff --git a/ndbioimage/transform.txt b/ndbioimage/transform.txt deleted file mode 100644 index a177cc0..0000000 --- a/ndbioimage/transform.txt +++ /dev/null @@ -1,7 +0,0 @@ -#Insight Transform File V1.0 -#Transform 0 -Transform: CompositeTransform_double_2_2 -#Transform 1 -Transform: AffineTransform_double_2_2 -Parameters: 1 0 0 1 0 0 -FixedParameters: 255.5 255.5 diff --git a/ndbioimage/transforms.py b/ndbioimage/transforms.py deleted file mode 100644 index 6415650..0000000 --- a/ndbioimage/transforms.py +++ /dev/null @@ -1,458 +0,0 @@ -import warnings -from copy import deepcopy -from pathlib import Path - -import numpy as np -import yaml -from parfor import Chunks, pmap -from skimage import filters -from tiffwrite import IJTiffFile -from tqdm.auto import tqdm - -try: - # best if SimpleElastix is installed: https://simpleelastix.readthedocs.io/GettingStarted.html - import SimpleITK as sitk # noqa -except ImportError: - sitk = None - -try: - from pandas import DataFrame, Series, concat -except ImportError: - DataFrame, Series, concat = None, None, None - - -if hasattr(yaml, 'full_load'): - yamlload = yaml.full_load -else: - yamlload = yaml.load - - -class Transforms(dict): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.default = Transform() - - @classmethod - def from_file(cls, file, C=True, T=True): - with open(Path(file).with_suffix('.yml')) as f: - return cls.from_dict(yamlload(f), C, T) - - @classmethod - def from_dict(cls, d, C=True, T=True): - new = cls() - for key, value in d.items(): - if isinstance(key, str) and C: - new[key.replace(r'\:', ':').replace('\\\\', '\\')] = Transform.from_dict(value) - elif T: - new[key] = Transform.from_dict(value) - return new - - @classmethod - def from_shifts(cls, shifts): - new = cls() - for key, shift in shifts.items(): - new[key] = Transform.from_shift(shift) - return new - - def __mul__(self, other): - new = Transforms() - if isinstance(other, Transforms): - for key0, value0 in self.items(): - for key1, value1 in other.items(): - new[key0 + key1] = value0 * value1 - return new - elif other is None: - return self - else: - for key in self.keys(): - new[key] = self[key] * other - return new - - def asdict(self): - return {key.replace('\\', '\\\\').replace(':', r'\:') if isinstance(key, str) else key: value.asdict() - for key, value in self.items()} - - def __getitem__(self, item): - return np.prod([self[i] for i in item[::-1]]) if isinstance(item, tuple) else super().__getitem__(item) - - def __missing__(self, key): - return self.default - - def __getstate__(self): - return self.__dict__ - - def __setstate__(self, state): - self.__dict__.update(state) - - def __hash__(self): - return hash(frozenset((*self.__dict__.items(), *self.items()))) - - def save(self, file): - with open(Path(file).with_suffix('.yml'), 'w') as f: - yaml.safe_dump(self.asdict(), f, default_flow_style=None) - - def copy(self): - return deepcopy(self) - - def adapt(self, origin, shape, channel_names): - def key_map(a, b): - def fun(b, key_a): - for key_b in b: - if key_b in key_a or key_a in key_b: - return key_a, key_b - - return {n[0]: n[1] for key_a in a if (n := fun(b, key_a))} - - for value in self.values(): - value.adapt(origin, shape) - self.default.adapt(origin, shape) - transform_channels = {key for key in self.keys() if isinstance(key, str)} - if set(channel_names) - transform_channels: - mapping = key_map(channel_names, transform_channels) - warnings.warn(f'The image file and the transform do not have the same channels,' - f' creating a mapping: {mapping}') - for key_im, key_t in mapping.items(): - self[key_im] = self[key_t] - - @property - def inverse(self): - # TODO: check for C@T - inverse = self.copy() - for key, value in self.items(): - inverse[key] = value.inverse - return inverse - - def coords_pandas(self, array, channel_names, columns=None): - if isinstance(array, DataFrame): - return concat([self.coords_pandas(row, channel_names, columns) for _, row in array.iterrows()], axis=1).T - elif isinstance(array, Series): - key = [] - if 'C' in array: - key.append(channel_names[int(array['C'])]) - if 'T' in array: - key.append(int(array['T'])) - return self[tuple(key)].coords(array, columns) - else: - raise TypeError('Not a pandas DataFrame or Series.') - - def with_beads(self, cyllens, bead_files): - assert len(bead_files) > 0, 'At least one file is needed to calculate the registration.' - transforms = [self.calculate_channel_transforms(file, cyllens) for file in bead_files] - for key in {key for transform in transforms for key in transform.keys()}: - new_transforms = [transform[key] for transform in transforms if key in transform] - if len(new_transforms) == 1: - self[key] = new_transforms[0] - else: - self[key] = Transform() - self[key].parameters = np.mean([t.parameters for t in new_transforms], 0) - self[key].dparameters = (np.std([t.parameters for t in new_transforms], 0) / - np.sqrt(len(new_transforms))).tolist() - return self - - @staticmethod - def get_bead_files(path): - from . import Imread - files = [] - for file in path.iterdir(): - if file.name.lower().startswith('beads'): - try: - with Imread(file): - files.append(file) - except Exception: - pass - files = sorted(files) - if not files: - raise Exception('No bead file found!') - checked_files = [] - for file in files: - try: - if file.is_dir(): - file /= 'Pos0' - with Imread(file): # check for errors opening the file - checked_files.append(file) - except (Exception,): - continue - if not checked_files: - raise Exception('No bead file found!') - return checked_files - - @staticmethod - def calculate_channel_transforms(bead_file, cyllens): - """ When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162 - in the horizontal direction """ - from . import Imread - - with Imread(bead_file, axes='zcyx') as im: # noqa - max_ims = im.max('z') - goodch = [c for c, max_im in enumerate(max_ims) if not im.is_noise(max_im)] - if not goodch: - goodch = list(range(len(max_ims))) - untransformed = [c for c in range(im.shape['c']) if cyllens[im.detector[c]].lower() == 'none'] - - good_and_untrans = sorted(set(goodch) & set(untransformed)) - if good_and_untrans: - masterch = good_and_untrans[0] - else: - masterch = goodch[0] - transform = Transform() - if not good_and_untrans: - matrix = transform.matrix - matrix[0, 0] = 0.86 - transform.matrix = matrix - transforms = Transforms() - for c in tqdm(goodch, desc='Calculating channel transforms'): # noqa - if c == masterch: - transforms[im.channel_names[c]] = transform - else: - transforms[im.channel_names[c]] = Transform.register(max_ims[masterch], max_ims[c]) * transform - return transforms - - @staticmethod - def save_channel_transform_tiff(bead_files, tiffile): - from . import Imread - n_channels = 0 - for file in bead_files: - with Imread(file) as im: - n_channels = max(n_channels, im.shape['c']) - with IJTiffFile(tiffile) as tif: - for t, file in enumerate(bead_files): - with Imread(file) as im: - with Imread(file).with_transform() as jm: - for c in range(im.shape['c']): - tif.save(np.hstack((im(c=c, t=0).max('z'), jm(c=c, t=0).max('z'))), c, 0, t) - - def with_drift(self, im): - """ Calculate shifts relative to the first frame - divide the sequence into groups, - compare each frame to the frame in the middle of the group and compare these middle frames to each other - """ - im = im.transpose('tzycx') - t_groups = [list(chunk) for chunk in Chunks(range(im.shape['t']), size=round(np.sqrt(im.shape['t'])))] - t_keys = [int(np.round(np.mean(t_group))) for t_group in t_groups] - t_pairs = [(int(np.round(np.mean(t_group))), frame) for t_group in t_groups for frame in t_group] - t_pairs.extend(zip(t_keys, t_keys[1:])) - fmaxz_keys = {t_key: filters.gaussian(im[t_key].max('z'), 5) for t_key in t_keys} - - def fun(t_key_t, im, fmaxz_keys): - t_key, t = t_key_t - if t_key == t: - return 0, 0 - else: - fmaxz = filters.gaussian(im[t].max('z'), 5) - return Transform.register(fmaxz_keys[t_key], fmaxz, 'translation').parameters[4:] - - shifts = np.array(pmap(fun, t_pairs, (im, fmaxz_keys), desc='Calculating image shifts.')) - shift_keys_cum = np.zeros(2) - for shift_keys, t_group in zip(np.vstack((-shifts[0], shifts[im.shape['t']:])), t_groups): - shift_keys_cum += shift_keys - shifts[t_group] += shift_keys_cum - - for i, shift in enumerate(shifts[:im.shape['t']]): - self[i] = Transform.from_shift(shift) - return self - - -class Transform: - def __init__(self): - if sitk is None: - self.transform = None - else: - self.transform = sitk.ReadTransform(str(Path(__file__).parent / 'transform.txt')) - self.dparameters = [0., 0., 0., 0., 0., 0.] - self.shape = [512., 512.] - self.origin = [255.5, 255.5] - self._last, self._inverse = None, None - - def __reduce__(self): - return self.from_dict, (self.asdict(),) - - def __repr__(self): - return self.asdict().__repr__() - - def __str__(self): - return self.asdict().__str__() - - @classmethod - def register(cls, fix, mov, kind=None): - """ kind: 'affine', 'translation', 'rigid' """ - if sitk is None: - raise ImportError('SimpleElastix is not installed: ' - 'https://simpleelastix.readthedocs.io/GettingStarted.html') - new = cls() - kind = kind or 'affine' - new.shape = fix.shape - fix, mov = new.cast_image(fix), new.cast_image(mov) - # TODO: implement RigidTransform - tfilter = sitk.ElastixImageFilter() - tfilter.LogToConsoleOff() - tfilter.SetFixedImage(fix) - tfilter.SetMovingImage(mov) - tfilter.SetParameterMap(sitk.GetDefaultParameterMap(kind)) - tfilter.Execute() - transform = tfilter.GetTransformParameterMap()[0] - if kind == 'affine': - new.parameters = [float(t) for t in transform['TransformParameters']] - new.shape = [float(t) for t in transform['Size']] - new.origin = [float(t) for t in transform['CenterOfRotationPoint']] - elif kind == 'translation': - new.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform['TransformParameters']] - new.shape = [float(t) for t in transform['Size']] - new.origin = [(t - 1) / 2 for t in new.shape] - else: - raise NotImplementedError(f'{kind} tranforms not implemented (yet)') - new.dparameters = 6 * [np.nan] - return new - - @classmethod - def from_shift(cls, shift): - return cls.from_array(np.array(((1, 0, shift[0]), (0, 1, shift[1]), (0, 0, 1)))) - - @classmethod - def from_array(cls, array): - new = cls() - new.matrix = array - return new - - @classmethod - def from_file(cls, file): - with open(Path(file).with_suffix('.yml')) as f: - return cls.from_dict(yamlload(f)) - - @classmethod - def from_dict(cls, d): - new = cls() - new.origin = [float(i) for i in d['CenterOfRotationPoint']] - new.parameters = [float(i) for i in d['TransformParameters']] - new.dparameters = [float(i) for i in d['dTransformParameters']] if 'dTransformParameters' in d else 6 * [np.nan] - new.shape = [float(i) for i in d['Size']] - return new - - def __mul__(self, other): # TODO: take care of dmatrix - result = self.copy() - if isinstance(other, Transform): - result.matrix = self.matrix @ other.matrix - result.dmatrix = self.dmatrix @ other.matrix + self.matrix @ other.dmatrix - else: - result.matrix = self.matrix @ other - result.dmatrix = self.dmatrix @ other - return result - - def is_unity(self): - return self.parameters == [1, 0, 0, 1, 0, 0] - - def copy(self): - return deepcopy(self) - - @staticmethod - def cast_image(im): - if not isinstance(im, sitk.Image): - im = sitk.GetImageFromArray(np.asarray(im)) - return im - - @staticmethod - def cast_array(im): - if isinstance(im, sitk.Image): - im = sitk.GetArrayFromImage(im) - return im - - @property - def matrix(self): - return np.array(((*self.parameters[:2], self.parameters[4]), - (*self.parameters[2:4], self.parameters[5]), - (0, 0, 1))) - - @matrix.setter - def matrix(self, value): - value = np.asarray(value) - self.parameters = [*value[0, :2], *value[1, :2], *value[:2, 2]] - - @property - def dmatrix(self): - return np.array(((*self.dparameters[:2], self.dparameters[4]), - (*self.dparameters[2:4], self.dparameters[5]), - (0, 0, 0))) - - @dmatrix.setter - def dmatrix(self, value): - value = np.asarray(value) - self.dparameters = [*value[0, :2], *value[1, :2], *value[:2, 2]] - - @property - def parameters(self): - if self.transform is not None: - return list(self.transform.GetParameters()) - - @parameters.setter - def parameters(self, value): - if self.transform is not None: - value = np.asarray(value) - self.transform.SetParameters(value.tolist()) - - @property - def origin(self): - if self.transform is not None: - return self.transform.GetFixedParameters() - - @origin.setter - def origin(self, value): - if self.transform is not None: - value = np.asarray(value) - self.transform.SetFixedParameters(value.tolist()) - - @property - def inverse(self): - if self.is_unity(): - return self - if self._last is None or self._last != self.asdict(): - self._last = self.asdict() - self._inverse = Transform.from_dict(self.asdict()) - self._inverse.transform = self._inverse.transform.GetInverse() - self._inverse._last = self._inverse.asdict() - self._inverse._inverse = self - return self._inverse - - def adapt(self, origin, shape): - self.origin -= np.array(origin) + (self.shape - np.array(shape)[:2]) / 2 - self.shape = shape[:2] - - def asdict(self): - return {'CenterOfRotationPoint': self.origin, 'Size': self.shape, 'TransformParameters': self.parameters, - 'dTransformParameters': np.nan_to_num(self.dparameters, nan=1e99).tolist()} - - def frame(self, im, default=0): - if self.is_unity(): - return im - else: - if sitk is None: - raise ImportError('SimpleElastix is not installed: ' - 'https://simpleelastix.readthedocs.io/GettingStarted.html') - dtype = im.dtype - im = im.astype('float') - intp = sitk.sitkBSpline if np.issubdtype(dtype, np.floating) else sitk.sitkNearestNeighbor - return self.cast_array(sitk.Resample(self.cast_image(im), self.transform, intp, default)).astype(dtype) - - def coords(self, array, columns=None): - """ Transform coordinates in 2 column numpy array, - or in pandas DataFrame or Series objects in columns ['x', 'y'] - """ - if self.is_unity(): - return array.copy() - elif DataFrame is not None and isinstance(array, (DataFrame, Series)): - columns = columns or ['x', 'y'] - array = array.copy() - if isinstance(array, DataFrame): - array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy())) - elif isinstance(array, Series): - array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))[0] - return array - else: # somehow we need to use the inverse here to get the same effect as when using self.frame - return np.array([self.inverse.transform.TransformPoint(i.tolist()) for i in np.asarray(array)]) - - def save(self, file): - """ save the parameters of the transform calculated - with affine_registration to a yaml file - """ - if not file[-3:] == 'yml': - file += '.yml' - with open(file, 'w') as f: - yaml.safe_dump(self.asdict(), f, default_flow_style=None) diff --git a/pyproject.toml b/pyproject.toml index 3937234..a26350b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,54 +1,15 @@ -[tool.poetry] -name = "ndbioimage" -version = "2025.1.2" -description = "Bio image reading, metadata and some affine registration." -authors = ["W. Pomp "] -license = "GPLv3" -readme = "README.md" -keywords = ["bioformats", "imread", "numpy", "metadata"] -include = ["transform.txt"] -repository = "https://github.com/wimpomp/ndbioimage" -exclude = ["ndbioimage/jars"] - -[tool.poetry.dependencies] -python = "^3.10" -numpy = ">=1.20.0" -pandas = "*" -tifffile = "*" -czifile = "2019.7.2" -tiffwrite = ">=2024.12.1" -ome-types = ">=0.4.0" -pint = "*" -tqdm = "*" -lxml = "*" -pyyaml = "*" -parfor = ">=2025.1.0" -JPype1 = "*" -SimpleITK-SimpleElastix = [ - { version = "*", python = "<3.12" }, - { version = "*", python = ">=3.12", markers = "sys_platform != 'darwin'" }, - { version = "*", python = ">=3.12", markers = "platform_machine == 'aarch64'" }, -] -scikit-image = "*" -imagecodecs = "*" -xsdata = "^23" # until pydantic is up-to-date -matplotlib = { version = "*", optional = true } -scikit-video = { version = "*", optional = true } -pytest = { version = "*", optional = true } - -[tool.poetry.extras] -test = ["pytest"] -write = ["matplotlib", "scikit-video"] - -[tool.poetry.scripts] -ndbioimage = "ndbioimage:main" - -[tool.pytest.ini_options] -filterwarnings = ["ignore:::(colorcet)"] - -[tool.isort] -line_length = 119 - [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["maturin>=1.8,<2.0"] +build-backend = "maturin" + +[project] +name = "ndbioimage_rs" +requires-python = ">=3.8" +classifiers = [ + "Programming Language :: Rust", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dynamic = ["version"] +[tool.maturin] +features = ["pyo3/extension-module"] diff --git a/src/bioformats.rs b/src/bioformats.rs new file mode 100644 index 0000000..43d81d5 --- /dev/null +++ b/src/bioformats.rs @@ -0,0 +1,142 @@ +use anyhow::Result; +use j4rs::{Instance, InvocationArg, Jvm, JvmBuilder}; +use std::cell::OnceCell; +use std::rc::Rc; + +thread_local! { + static JVM: OnceCell> = const { OnceCell::new() } +} + +/// Ensure 1 jvm per thread +fn jvm() -> Rc { + JVM.with(|cell| { + cell.get_or_init(move || Rc::new(JvmBuilder::new().build().expect("Failed to build JVM"))) + .clone() + }) +} + +macro_rules! method_return { + ($R:ty$(|c)?) => { Result<$R> }; + () => { Result<()> }; +} + +macro_rules! method_arg { + ($n:tt: $t:ty|p) => { + InvocationArg::try_from($n)?.into_primitive()? + }; + ($n:tt: $t:ty) => { + InvocationArg::try_from($n)? + }; +} + +macro_rules! method { + ($name:ident, $method:expr $(,[$($n:tt: $t:ty$(|$p:tt)?),*])? $(=> $tt:ty$(|$c:tt)?)?) => { + pub fn $name(&self, $($($n: $t),*)?) -> method_return!($($tt)?) { + let args: Vec = vec![$($( method_arg!($n:$t$(|$p)?) ),*)?]; + let _result = jvm().invoke(&self.0, $method, &args)?; + + macro_rules! method_result { + ($R:ty|c) => { + Ok(jvm().to_rust(_result)?) + }; + ($R:ty|d) => { + Ok(jvm().to_rust_deserialized(_result)?) + }; + ($R:ty) => { + Ok(_result) + }; + () => { + Ok(()) + }; + } + + method_result!($($tt$(|$c)?)?) + } + }; +} + +pub struct DebugTools; + +impl DebugTools { + pub fn set_root_level(level: &str) -> Result<()> { + jvm().invoke_static( + "loci.common.DebugTools", + "setRootLevel", + &[InvocationArg::try_from(level)?], + )?; + Ok(()) + } +} + +pub struct ChannelSeparator(Instance); + +impl ChannelSeparator { + pub fn new(image_reader: &ImageReader) -> Result { + let jvm = jvm(); + let channel_separator = jvm.create_instance( + "loci.formats.ChannelSeparator", + &[InvocationArg::from(jvm.clone_instance(&image_reader.0)?)], + )?; + Ok(ChannelSeparator(channel_separator)) + } + + pub fn open_bytes(&self, index: i32) -> Result> { + let bi8 = self.open_bi8(index)?; + Ok(unsafe { std::mem::transmute::, Vec>(bi8) }) + } + + method!(open_bi8, "openBytes", [index: i32|p] => Vec|c); + method!(get_index, "getIndex", [z: i32|p, c: i32|p, t: i32|p] => i32|c); +} + +pub struct ImageReader(Instance); + +impl Drop for ImageReader { + fn drop(&mut self) { + self.close().unwrap() + } +} + +impl ImageReader { + pub fn new() -> Result { + let reader = jvm().create_instance("loci.formats.ImageReader", InvocationArg::empty())?; + Ok(ImageReader(reader)) + } + + pub fn open_bytes(&self, index: i32) -> Result> { + let bi8 = self.open_bi8(index)?; + Ok(unsafe { std::mem::transmute::, Vec>(bi8) }) + } + + method!(set_metadata_store, "setMetadataStore", [ome_data: Instance]); + method!(set_id, "setId", [id: &str]); + method!(set_series, "setSeries", [series: i32|p]); + method!(open_bi8, "openBytes", [index: i32|p] => Vec|c); + method!(get_size_x, "getSizeX" => i32|c); + method!(get_size_y, "getSizeY" => i32|c); + method!(get_size_c, "getSizeC" => i32|c); + method!(get_size_t, "getSizeT" => i32|c); + method!(get_size_z, "getSizeZ" => i32|c); + method!(get_pixel_type, "getPixelType" => i32|c); + method!(is_little_endian, "isLittleEndian" => bool|c); + method!(is_rgb, "isRGB" => bool|c); + method!(is_interleaved, "isInterleaved" => bool|c); + method!(get_index, "getIndex", [z: i32|p, c: i32|p, t: i32|p] => i32|c); + method!(get_rgb_channel_count, "getRGBChannelCount" => i32|c); + method!(is_indexed, "isIndexed" => bool|c); + method!(get_8bit_lookup_table, "get8BitLookupTable" => Instance); + method!(get_16bit_lookup_table, "get16BitLookupTable" => Instance); + method!(close, "close"); +} + +pub struct MetadataTools(Instance); + +impl MetadataTools { + pub fn new() -> Result { + let meta_data_tools = + jvm().create_instance("loci.formats.MetadataTools", InvocationArg::empty())?; + Ok(MetadataTools(meta_data_tools)) + } + + method!(create_ome_xml_metadata, "createOMEXMLMetadata" => Instance); +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..3372ab1 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,366 @@ +mod bioformats; + +#[cfg(feature = "python")] +mod py; + +use anyhow::{anyhow, Result}; +use bioformats::{DebugTools, ImageReader, MetadataTools}; +use ndarray::Array2; +use num::{FromPrimitive, Zero}; +use std::any::type_name; +use std::fmt::Debug; +use std::path::{Path, PathBuf}; + +/// Pixel types (u)int(8/16/32) or float(32/64) +#[derive(Clone, Debug)] +pub enum PixelType { + INT8 = 0, + UINT8 = 1, + INT16 = 2, + UINT16 = 3, + INT32 = 4, + UINT32 = 5, + FLOAT = 6, + DOUBLE = 7, +} + +impl TryFrom for PixelType { + type Error = anyhow::Error; + + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(PixelType::INT8), + 1 => Ok(PixelType::UINT8), + 2 => Ok(PixelType::INT16), + 3 => Ok(PixelType::UINT16), + 4 => Ok(PixelType::INT32), + 5 => Ok(PixelType::UINT32), + 6 => Ok(PixelType::FLOAT), + 7 => Ok(PixelType::DOUBLE), + _ => Err(anyhow::anyhow!("Unknown pixel type {}", value)), + } + } +} + +/// Struct containing frame data in one of eight pixel types. Cast to Array2 using try_into. +#[derive(Clone, Debug)] +pub enum Frame { + INT8(Array2), + UINT8(Array2), + INT16(Array2), + UINT16(Array2), + INT32(Array2), + UINT32(Array2), + FLOAT(Array2), + DOUBLE(Array2), +} + +macro_rules! impl_frame_cast { + ($t:tt, $s:ident) => { + impl From> for Frame { + fn from(value: Array2<$t>) -> Self { + Frame::$s(value) + } + } + }; +} + +impl_frame_cast!(i8, INT8); +impl_frame_cast!(u8, UINT8); +impl_frame_cast!(i16, INT16); +impl_frame_cast!(u16, UINT16); +impl_frame_cast!(i32, INT32); +impl_frame_cast!(u32, UINT32); +impl_frame_cast!(f32, FLOAT); +impl_frame_cast!(f64, DOUBLE); + +impl TryInto> for Frame +where + T: FromPrimitive + Zero + 'static, +{ + type Error = anyhow::Error; + + fn try_into(self) -> std::result::Result, Self::Error> { + let mut err = Ok(()); + let arr = match self { + Frame::INT8(v) => v.mapv_into_any(|x| { + T::from_i8(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::UINT8(v) => v.mapv_into_any(|x| { + T::from_u8(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::INT16(v) => v.mapv_into_any(|x| { + T::from_i16(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::UINT16(v) => v.mapv_into_any(|x| { + T::from_u16(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::INT32(v) => v.mapv_into_any(|x| { + T::from_i32(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::UINT32(v) => v.mapv_into_any(|x| { + T::from_u32(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::FLOAT(v) => v.mapv_into_any(|x| { + T::from_f32(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + Frame::DOUBLE(v) => v.mapv_into_any(|x| { + T::from_f64(x).unwrap_or_else(|| { + err = Err(anyhow!("cannot convert {} into {}", x, type_name::())); + T::zero() + }) + }), + }; + match err { + Err(err) => Err(err), + Ok(()) => Ok(arr), + } + } +} + +/// Reader interface to file. Use get_frame to get data. +pub struct Reader { + image_reader: ImageReader, + /// path to file + pub path: PathBuf, + /// which (if more) than 1 of the series in the file to open + pub series: i32, + /// size x (horizontal) + pub size_x: usize, + /// size y (vertical) + pub size_y: usize, + /// size c (# channels) + pub size_c: usize, + /// size z (# slices) + pub size_z: usize, + /// size t (time/frames) + pub size_t: usize, + /// pixel type ((u)int(8/16/32) or float(32/64)) + pub pixel_type: PixelType, + little_endian: bool, +} + +impl Clone for Reader { + fn clone(&self) -> Self { + Reader::new(&self.path, self.series).unwrap() + } +} + +impl Debug for Reader { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Reader") + .field("path", &self.path) + .field("series", &self.series) + .field("size_x", &self.size_x) + .field("size_y", &self.size_y) + .field("size_c", &self.size_c) + .field("size_z", &self.size_z) + .field("size_t", &self.size_t) + .field("pixel_type", &self.pixel_type) + .field("little_endian", &self.little_endian) + .finish() + } +} + +impl Reader { + /// Create new reader for image file at path. + pub fn new(path: &Path, series: i32) -> Result { + DebugTools::set_root_level("ERROR")?; + let reader = ImageReader::new()?; + let meta_data_tools = MetadataTools::new()?; + let ome_meta = meta_data_tools.create_ome_xml_metadata()?; + reader.set_metadata_store(ome_meta)?; + reader.set_id(path.to_str().unwrap())?; + reader.set_series(series)?; + let size_x = reader.get_size_x()?; + let size_y = reader.get_size_y()?; + let size_c = reader.get_size_c()?; + let size_z = reader.get_size_z()?; + let size_t = reader.get_size_t()?; + let pixel_type = PixelType::try_from(reader.get_pixel_type()?)?; + let little_endian = reader.is_little_endian()?; + Ok(Reader { + image_reader: reader, + path: PathBuf::from(path), + series, + size_x: size_x as usize, + size_y: size_y as usize, + size_c: size_c as usize, + size_z: size_z as usize, + size_t: size_t as usize, + pixel_type, + little_endian, + }) + } + + fn deinterleave(&self, bytes: Vec, channel: usize) -> Result> { + let chunk_size = match self.pixel_type { + PixelType::INT8 => 1, + PixelType::UINT8 => 1, + PixelType::INT16 => 2, + PixelType::UINT16 => 2, + PixelType::INT32 => 4, + PixelType::UINT32 => 4, + PixelType::FLOAT => 4, + PixelType::DOUBLE => 8, + }; + Ok(bytes + .chunks(chunk_size) + .skip(channel) + .step_by(self.size_c) + .flat_map(|a| a.to_vec()) + .collect()) + } + + /// Retrieve fame at channel c, slize z and time t. + pub fn get_frame(&self, c: usize, z: usize, t: usize) -> Result { + let bytes = if self.image_reader.is_rgb()? & self.image_reader.is_interleaved()? { + let index = self.image_reader.get_index(z as i32, 0, t as i32)?; + self.deinterleave(self.image_reader.open_bytes(index)?, c)? + } else if self.image_reader.get_rgb_channel_count()? > 1 { + let channel_separator = bioformats::ChannelSeparator::new(&self.image_reader)?; + let index = channel_separator.get_index(z as i32, c as i32, t as i32)?; + channel_separator.open_bytes(index)? + } else if self.image_reader.is_indexed()? { + let index = self.image_reader.get_index(z as i32, 0, t as i32)?; + self.image_reader.open_bytes(index)? + // TODO: apply LUT + // let _bytes_lut = match self.pixel_type { + // PixelType::INT8 | PixelType::UINT8 => { + // let _lut = self.image_reader.get_8bit_lookup_table()?; + // } + // PixelType::INT16 | PixelType::UINT16 => { + // let _lut = self.image_reader.get_16bit_lookup_table()?; + // } + // _ => {} + // }; + } else { + let index = self.image_reader.get_index(z as i32, c as i32, t as i32)?; + self.image_reader.open_bytes(index)? + }; + self.bytes_to_frame(bytes) + } + + fn bytes_to_frame(&self, bytes: Vec) -> Result { + macro_rules! get_frame { + ($t:tt, <$n:expr) => { + Ok(Frame::from(Array2::from_shape_vec( + (self.size_y, self.size_x), + bytes + .chunks($n) + .map(|x| $t::from_le_bytes(x.try_into().unwrap())) + .collect(), + )?)) + }; + ($t:tt, >$n:expr) => { + Ok(Frame::from(Array2::from_shape_vec( + (self.size_y, self.size_x), + bytes + .chunks($n) + .map(|x| $t::from_be_bytes(x.try_into().unwrap())) + .collect(), + )?)) + }; + } + + match (&self.pixel_type, self.little_endian) { + (PixelType::INT8, true) => get_frame!(i8, <1), + (PixelType::UINT8, true) => get_frame!(u8, <1), + (PixelType::INT16, true) => get_frame!(i16, <2), + (PixelType::UINT16, true) => get_frame!(u16, <2), + (PixelType::INT32, true) => get_frame!(i32, <4), + (PixelType::UINT32, true) => get_frame!(u32, <4), + (PixelType::FLOAT, true) => get_frame!(f32, <4), + (PixelType::DOUBLE, true) => get_frame!(f64, <8), + (PixelType::INT8, false) => get_frame!(i8, >1), + (PixelType::UINT8, false) => get_frame!(u8, >1), + (PixelType::INT16, false) => get_frame!(i16, >2), + (PixelType::UINT16, false) => get_frame!(u16, >2), + (PixelType::INT32, false) => get_frame!(i32, >4), + (PixelType::UINT32, false) => get_frame!(u32, >4), + (PixelType::FLOAT, false) => get_frame!(f32, >4), + (PixelType::DOUBLE, false) => get_frame!(f64, >8), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rayon::prelude::*; + + fn open(file: &str) -> Result { + let path = std::env::current_dir()?.join("tests").join("files").join(file); + Reader::new(&path, 0) + } + + fn get_pixel_type(file: &str) -> Result { + let reader = open(file)?; + Ok(format!( + "file: {}, pixel type: {:?}", + file, reader.pixel_type + )) + } + + fn get_frame(file: &str) -> Result { + let reader = open(file)?; + reader.get_frame(0, 0, 0) + } + + #[test] + fn read_ser() -> Result<()> { + let file = "Experiment-2029.czi"; + let reader = open(file)?; + println!("size: {}, {}", reader.size_y, reader.size_y); + let frame = reader.get_frame(0, 0, 0)?; + if let Ok(arr) = >>::try_into(frame) { + println!("{:?}", arr); + } else { + println!("could not convert Frame to Array"); + } + Ok(()) + } + + #[test] + fn read_par() -> Result<()> { + let files = vec!["Experiment-2029.czi", "test.tif"]; + let pixel_type = files + .into_par_iter() + .map(|file| get_pixel_type(file).unwrap()) + .collect::>(); + println!("{:?}", pixel_type); + Ok(()) + } + + #[test] + fn read_frame_par() -> Result<()> { + let files = vec!["Experiment-2029.czi", "test.tif"]; + let frames = files + .into_par_iter() + .map(|file| get_frame(file).unwrap()) + .collect::>(); + println!("{:?}", frames); + Ok(()) + } +} diff --git a/src/py.rs b/src/py.rs new file mode 100644 index 0000000..ca8ef08 --- /dev/null +++ b/src/py.rs @@ -0,0 +1,14 @@ +use pyo3::prelude::*; + +/// Formats the sum of two numbers as string. +#[pyfunction] +fn sum_as_string(a: usize, b: usize) -> PyResult { + Ok((a + b).to_string()) +} + +/// A Python module implemented in Rust. +#[pymodule] +fn ndbioimage_rs(m: &Bound<'_, PyModule>) -> PyResult<()> { + m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; + Ok(()) +}