- start rust rewrite

This commit is contained in:
Wim Pomp
2025-02-03 15:33:32 +01:00
parent 4a6a15686c
commit 3db6dc8ee1
22 changed files with 1487 additions and 3329 deletions

View File

@@ -1,22 +0,0 @@
name: PyTest
on: [workflow_call, push, pull_request]
jobs:
pytest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
pip install .[test]
- name: Test with pytest
run: pytest

84
.gitignore vendored
View File

@@ -1,12 +1,72 @@
._*
*.pyc
/build/
*.egg-info
/venv/
.idea
/.pytest_cache/
/ndbioimage/_version.py
/ndbioimage/jars
/tests/files/*
/poetry.lock
/dist/
/target
# Byte-compiled / optimized / DLL files
__pycache__/
.pytest_cache/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
.venv/
env/
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
include/
man/
venv/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
pip-selfcheck.json
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
.DS_Store
# Sphinx documentation
docs/_build/
# PyCharm
.idea/
# VSCode
.vscode/
# Pyenv
.python-version

816
Cargo.lock generated Normal file
View File

@@ -0,0 +1,816 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "anyhow"
version = "1.0.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
[[package]]
name = "autocfg"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cc"
version = "1.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4730490333d58093109dc02c23174c3f4d490998c3fed3cc8e82d57afedb9cf"
dependencies = [
"shlex",
]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crossbeam-deque"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
[[package]]
name = "dunce"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
[[package]]
name = "either"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
[[package]]
name = "fs_extra"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.98",
]
[[package]]
name = "futures-sink"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
[[package]]
name = "futures-task"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
[[package]]
name = "futures-util"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
]
[[package]]
name = "getrandom"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "glob"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "indoc"
version = "2.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5"
[[package]]
name = "itoa"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
[[package]]
name = "j4rs"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dacf87fdd07b36f3124894a5bf20c8d418aaa28d4974d717672507f6d05cd31c"
dependencies = [
"cesu8",
"dunce",
"fs_extra",
"futures",
"java-locator",
"jni-sys",
"lazy_static",
"libc",
"libloading",
"log",
"serde",
"serde_json",
]
[[package]]
name = "java-locator"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09c46c1fe465c59b1474e665e85e1256c3893dd00927b8d55f63b09044c1e64f"
dependencies = [
"glob",
]
[[package]]
name = "jni-sys"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c30a312d782b8d56a1e0897d45c1af33f31f9b4a4d13d31207a8675e0223b818"
dependencies = [
"jni-sys-macros",
]
[[package]]
name = "jni-sys-macros"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c199962dfd5610ced8eca382606e349f7940a4ac7d867b58a046123411cbb4"
dependencies = [
"quote",
"syn 1.0.109",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.169"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a"
[[package]]
name = "libloading"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
"windows-targets",
]
[[package]]
name = "log"
version = "0.4.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
[[package]]
name = "matrixmultiply"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a"
dependencies = [
"autocfg",
"rawpointer",
]
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "memoffset"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
dependencies = [
"autocfg",
]
[[package]]
name = "ndarray"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841"
dependencies = [
"matrixmultiply",
"num-complex",
"num-integer",
"num-traits",
"portable-atomic",
"portable-atomic-util",
"rawpointer",
]
[[package]]
name = "ndbioimage_rs"
version = "2025.1.0"
dependencies = [
"anyhow",
"j4rs",
"ndarray",
"num",
"numpy",
"pyo3",
"rayon",
"retry",
]
[[package]]
name = "num"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23"
dependencies = [
"num-bigint",
"num-complex",
"num-integer",
"num-iter",
"num-rational",
"num-traits",
]
[[package]]
name = "num-bigint"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
dependencies = [
"num-integer",
"num-traits",
]
[[package]]
name = "num-complex"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495"
dependencies = [
"num-traits",
]
[[package]]
name = "num-integer"
version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [
"num-traits",
]
[[package]]
name = "num-iter"
version = "0.1.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
dependencies = [
"autocfg",
"num-integer",
"num-traits",
]
[[package]]
name = "num-rational"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824"
dependencies = [
"num-bigint",
"num-integer",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "numpy"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b94caae805f998a07d33af06e6a3891e38556051b8045c615470a71590e13e78"
dependencies = [
"libc",
"ndarray",
"num-complex",
"num-integer",
"num-traits",
"pyo3",
"rustc-hash",
]
[[package]]
name = "once_cell"
version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "portable-atomic"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6"
[[package]]
name = "portable-atomic-util"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507"
dependencies = [
"portable-atomic",
]
[[package]]
name = "ppv-lite86"
version = "0.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
dependencies = [
"zerocopy",
]
[[package]]
name = "proc-macro2"
version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57fe09249128b3173d092de9523eaa75136bf7ba85e0d69eca241c7939c933cc"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"once_cell",
"portable-atomic",
"pyo3-build-config",
"pyo3-ffi",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cd3927b5a78757a0d71aa9dff669f903b1eb64b54142a9bd9f757f8fde65fd7"
dependencies = [
"once_cell",
"python3-dll-a",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dab6bb2102bd8f991e7749f130a70d05dd557613e39ed2deeee8e9ca0c4d548d"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-macros"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91871864b353fd5ffcb3f91f2f703a22a9797c91b9ab497b1acac7b07ae509c7"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
"syn 2.0.98",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.23.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43abc3b80bc20f3facd86cd3c60beed58c3e2aa26213f3cda368de39c60a27e4"
dependencies = [
"heck",
"proc-macro2",
"pyo3-build-config",
"quote",
"syn 2.0.98",
]
[[package]]
name = "python3-dll-a"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b66f9171950e674e64bad3456e11bb3cca108e5c34844383cfe277f45c8a7a8"
dependencies = [
"cc",
]
[[package]]
name = "quote"
version = "1.0.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "rawpointer"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
[[package]]
name = "rayon"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "retry"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9166d72162de3575f950507683fac47e30f6f2c3836b71b7fbc61aa517c9c5f4"
dependencies = [
"rand",
]
[[package]]
name = "rustc-hash"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497"
[[package]]
name = "ryu"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd"
[[package]]
name = "serde"
version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.217"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.98",
]
[[package]]
name = "serde_json"
version = "1.0.138"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "slab"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
dependencies = [
"autocfg",
]
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "target-lexicon"
version = "0.12.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
[[package]]
name = "unicode-ident"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034"
[[package]]
name = "unindent"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "zerocopy"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.98",
]

33
Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "ndbioimage_rs"
version = "2025.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "ndbioimage_rs"
crate-type = ["cdylib"]
[dependencies]
anyhow = "1.0.95"
j4rs = "0.22.0"
ndarray = "0.16.1"
num = "0.4.3"
numpy = { version = "0.23.0", optional = true }
[dependencies.pyo3]
version = "0.23.4"
features = ["extension-module", "abi3-py310", "generate-import-lib", "anyhow"]
optional = true
[dev-dependencies]
rayon = "1.10.0"
[build-dependencies]
j4rs = { version = "0.22", features = [] }
retry = { version = "2.0.0"}
anyhow = { version = "1.0.95"}
[features]
python = ["dep:pyo3", "dep:numpy"]
gpl-formats = []

View File

@@ -1,94 +1,4 @@
[![Pytest](https://github.com/wimpomp/ndbioimage/actions/workflows/pytest.yml/badge.svg)](https://github.com/wimpomp/ndbioimage/actions/workflows/pytest.yml)
# ndbioimage
Exposes (bio) images as a numpy ndarray-like object, but without loading the whole
image into memory, reading from the file only when needed. Some metadata is read
and stored in an [ome](https://genomebiology.biomedcentral.com/articles/10.1186/gb-2005-6-5-r47) structure.
Additionally, it can automatically calculate an affine transform that corrects for chromatic aberrations etc. and apply
it on the fly to the image.
Currently, it supports imagej tif files, czi files, micromanager tif sequences and anything
[bioformats](https://www.openmicroscopy.org/bio-formats/) can handle.
## Installation
```
pip install ndbioimage
```
### Installation with option to write mp4 or mkv:
Work in progress! Make sure ffmpeg is installed.
```
pip install ndbioimage[write]
```
## Usage
### Python
- Reading an image file and plotting the frame at channel=2, time=1
```
import matplotlib.pyplot as plt
from ndbioimage import Imread
with Imread('image_file.tif', axes='ctyx', dtype=int) as im:
plt.imshow(im[2, 1])
```
- Showing some image metadata
```
from ndbioimage import Imread
from pprint import pprint
with Imread('image_file.tif') as im:
pprint(im)
```
- Slicing the image without loading the image into memory
```
from ndbioimage import Imread
with Imread('image_file.tif', axes='cztyx') as im:
sliced_im = im[1, :, :, 100:200, 100:200]
```
sliced_im is an instance of Imread which will load any image data from file only when needed
- Converting (part) of the image to a numpy ndarray
```
from ndbioimage import Imread
import numpy as np
with Imread('image_file.tif', axes='cztyx') as im:
array = np.asarray(im[0, 0])
```
### Command line
```ndbioimage --help```: show help
```ndbioimage image```: show metadata about image
```ndbioimage image -w {name}.tif -r```: copy image into image.tif (replacing {name} with image), while registering channels
```ndbioimage image -w image.mp4 -C cyan lime red``` copy image into image.mp4 (z will be max projected), make channel colors cyan lime and red
## Adding more formats
Readers for image formats subclass AbstractReader. When an image reader is imported, Imread will
automatically recognize it and use it to open the appropriate file format. Image readers
are required to implement the following methods:
- staticmethod _can_open(path): return True if path can be opened by this reader
- \_\_frame__(self, c, z, t): return the frame at channel=c, z-slice=z, time=t from the file
Optional methods:
- get_ome: reads metadata from file and adds them to an OME object imported
from the ome-types library
- open(self): maybe open some file handle
- close(self): close any file handles
Optional fields:
- priority (int): Imread will try readers with a lower number first, default: 99
- do_not_pickle (strings): any attributes that should not be included when the object is pickled,
for example: any file handles
# TODO
- more image formats
# Work in progress
Rust rewrite of python version

28
build.rs Normal file
View File

@@ -0,0 +1,28 @@
// copied from https://github.com/AzHicham/bioformats-rs
use j4rs::{errors::J4RsError, JvmBuilder, MavenArtifact, MavenArtifactRepo, MavenSettings};
use retry::{delay, delay::Exponential, retry};
fn main() -> anyhow::Result<()> {
println!("cargo:rerun-if-changed=build.rs");
Ok(retry(
Exponential::from_millis(1000).map(delay::jitter).take(4),
deploy_java_artifacts,
)?)
}
fn deploy_java_artifacts() -> Result<(), J4RsError> {
let jvm = JvmBuilder::new()
.with_maven_settings(MavenSettings::new(vec![MavenArtifactRepo::from(
"openmicroscopy::https://artifacts.openmicroscopy.org/artifactory/ome.releases",
)]))
.build()?;
jvm.deploy_artifact(&MavenArtifact::from("ome:bioformats_package:8.0.1"))?;
#[cfg(feature = "gpl-formats")]
jvm.deploy_artifact(&MavenArtifact::from("ome:formats-gpl:8.0.1"))?;
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,77 +0,0 @@
from pathlib import Path
from urllib import request
class JVMException(Exception):
pass
try:
class JVM:
""" There can be only one java virtual machine per python process,
so this is a singleton class to manage the jvm.
"""
_instance = None
vm_started = False
vm_killed = False
success = True
def __new__(cls, *args):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, jars=None):
if not self.vm_started and not self.vm_killed:
try:
jar_path = Path(__file__).parent / 'jars'
if jars is None:
jars = {}
for jar, src in jars.items():
if not (jar_path / jar).exists():
JVM.download(src, jar_path / jar)
classpath = [str(jar_path / jar) for jar in jars.keys()]
import jpype
jpype.startJVM(classpath=classpath)
except Exception: # noqa
self.vm_started = False
else:
self.vm_started = True
try:
import jpype.imports
from loci.common import DebugTools # noqa
from loci.formats import ChannelSeparator # noqa
from loci.formats import FormatTools # noqa
from loci.formats import ImageReader # noqa
from loci.formats import MetadataTools # noqa
DebugTools.setRootLevel("ERROR")
self.image_reader = ImageReader
self.channel_separator = ChannelSeparator
self.format_tools = FormatTools
self.metadata_tools = MetadataTools
except Exception: # noqa
pass
if self.vm_killed:
raise Exception('The JVM was killed before, and cannot be restarted in this Python process.')
@staticmethod
def download(src, dest):
print(f'Downloading {dest.name} to {dest}.')
dest.parent.mkdir(exist_ok=True)
dest.write_bytes(request.urlopen(src).read())
@classmethod
def kill_vm(cls):
self = cls._instance
if self is not None and self.vm_started and not self.vm_killed:
import jpype
jpype.shutdownJVM() # noqa
self.vm_started = False
self.vm_killed = True
except ImportError:
JVM = None

View File

@@ -1 +0,0 @@
__all__ = 'bfread', 'cziread', 'fijiread', 'ndread', 'seqread', 'tifread', 'metaseriesread'

View File

@@ -1,208 +0,0 @@
import multiprocessing
from abc import ABC
from multiprocessing import queues
from pathlib import Path
from traceback import format_exc
import numpy as np
from .. import JVM, AbstractReader, JVMException
jars = {'bioformats_package.jar': 'https://downloads.openmicroscopy.org/bio-formats/latest/artifacts/'
'bioformats_package.jar'}
class JVMReader:
def __init__(self, path: Path, series: int) -> None:
mp = multiprocessing.get_context('spawn')
self.path = path
self.series = series
self.queue_in = mp.Queue()
self.queue_out = mp.Queue()
self.done = mp.Event()
self.process = mp.Process(target=self.run)
self.process.start()
status, message = self.queue_out.get()
if status == 'status' and message == 'started':
self.is_alive = True
else:
raise JVMException(message)
def close(self) -> None:
if self.is_alive:
self.done.set()
while not self.queue_in.empty():
self.queue_in.get()
self.queue_in.close()
self.queue_in.join_thread()
while not self.queue_out.empty():
print(self.queue_out.get())
self.queue_out.close()
self.process.join()
self.process.close()
self.is_alive = False
def frame(self, c: int, z: int, t: int) -> np.ndarray:
self.queue_in.put((c, z, t))
status, message = self.queue_out.get()
if status == 'frame':
return message
else:
raise JVMException(message)
def run(self) -> None:
""" Read planes from the image reader file.
adapted from python-bioformats/bioformats/formatreader.py
"""
jvm = None
try:
jvm = JVM(jars)
reader = jvm.image_reader()
ome_meta = jvm.metadata_tools.createOMEXMLMetadata()
reader.setMetadataStore(ome_meta)
reader.setId(str(self.path))
reader.setSeries(self.series)
open_bytes_func = reader.openBytes
width, height = int(reader.getSizeX()), int(reader.getSizeY())
pixel_type = reader.getPixelType()
little_endian = reader.isLittleEndian()
if pixel_type == jvm.format_tools.INT8:
dtype = np.int8
elif pixel_type == jvm.format_tools.UINT8:
dtype = np.uint8
elif pixel_type == jvm.format_tools.UINT16:
dtype = '<u2' if little_endian else '>u2'
elif pixel_type == jvm.format_tools.INT16:
dtype = '<i2' if little_endian else '>i2'
elif pixel_type == jvm.format_tools.UINT32:
dtype = '<u4' if little_endian else '>u4'
elif pixel_type == jvm.format_tools.INT32:
dtype = '<i4' if little_endian else '>i4'
elif pixel_type == jvm.format_tools.FLOAT:
dtype = '<f4' if little_endian else '>f4'
elif pixel_type == jvm.format_tools.DOUBLE:
dtype = '<f8' if little_endian else '>f8'
else:
dtype = None
self.queue_out.put(('status', 'started'))
while not self.done.is_set():
try:
c, z, t = self.queue_in.get(True, 0.02)
if reader.isRGB() and reader.isInterleaved():
index = reader.getIndex(z, 0, t)
image = np.frombuffer(open_bytes_func(index), dtype)
image.shape = (height, width, reader.getSizeC())
if image.shape[2] > 3:
image = image[:, :, :3]
elif c is not None and reader.getRGBChannelCount() == 1:
index = reader.getIndex(z, c, t)
image = np.frombuffer(open_bytes_func(index), dtype)
image.shape = (height, width)
elif reader.getRGBChannelCount() > 1:
n_planes = reader.getRGBChannelCount()
rdr = jvm.channel_separator(reader)
planes = [np.frombuffer(rdr.openBytes(rdr.getIndex(z, i, t)), dtype) for i in range(n_planes)]
if len(planes) > 3:
planes = planes[:3]
elif len(planes) < 3:
# > 1 and < 3 means must be 2
# see issue #775
planes.append(np.zeros(planes[0].shape, planes[0].dtype))
image = np.dstack(planes)
image.shape = (height, width, 3)
del rdr
elif reader.getSizeC() > 1:
images = [np.frombuffer(open_bytes_func(reader.getIndex(z, i, t)), dtype)
for i in range(reader.getSizeC())]
image = np.dstack(images)
image.shape = (height, width, reader.getSizeC())
# if not channel_names is None:
# metadata = MetadataRetrieve(self.metadata)
# for i in range(self.reader.getSizeC()):
# index = self.reader.getIndex(z, 0, t)
# channel_name = metadata.getChannelName(index, i)
# if channel_name is None:
# channel_name = metadata.getChannelID(index, i)
# channel_names.append(channel_name)
elif reader.isIndexed():
#
# The image data is indexes into a color lookup-table
# But sometimes the table is the identity table and just generates
# a monochrome RGB image
#
index = reader.getIndex(z, 0, t)
image = np.frombuffer(open_bytes_func(index), dtype)
if pixel_type in (jvm.format_tools.INT16, jvm.format_tools.UINT16):
lut = reader.get16BitLookupTable()
if lut is not None:
lut = np.array(lut)
# lut = np.array(
# [env.get_short_array_elements(d)
# for d in env.get_object_array_elements(lut)]) \
# .transpose()
else:
lut = reader.get8BitLookupTable()
if lut is not None:
lut = np.array(lut)
# lut = np.array(
# [env.get_byte_array_elements(d)
# for d in env.get_object_array_elements(lut)]) \
# .transpose()
image.shape = (height, width)
if (lut is not None) and not np.all(lut == np.arange(lut.shape[0])[:, np.newaxis]):
image = lut[image, :]
else:
index = reader.getIndex(z, 0, t)
image = np.frombuffer(open_bytes_func(index), dtype)
image.shape = (height, width)
if image.ndim == 3:
self.queue_out.put(('frame', image[..., c]))
else:
self.queue_out.put(('frame', image))
except queues.Empty: # noqa
continue
except (Exception,):
self.queue_out.put(('error', format_exc()))
finally:
if jvm is not None:
jvm.kill_vm()
def can_open(path: Path) -> bool:
try:
jvm = JVM(jars)
reader = jvm.image_reader()
reader.getFormat(str(path))
return True
except (Exception,):
return False
finally:
jvm.kill_vm() # noqa
class Reader(AbstractReader, ABC):
""" This class is used as a last resort, when we don't have another way to open the file. We don't like it
because it requires the java vm.
"""
priority = 99 # panic and open with BioFormats
do_not_pickle = 'reader', 'key', 'jvm'
@staticmethod
def _can_open(path: Path) -> bool:
""" Use java BioFormats to make an ome metadata structure. """
with multiprocessing.get_context('spawn').Pool(1) as pool:
return pool.map(can_open, (path,))[0]
def open(self) -> None:
self.reader = JVMReader(self.path, self.series)
def __frame__(self, c: int, z: int, t: int) -> np.ndarray:
return self.reader.frame(c, z, t)
def close(self) -> None:
self.reader.close()

View File

@@ -1,606 +0,0 @@
import re
import warnings
from abc import ABC
from functools import cached_property
from io import BytesIO
from itertools import product
from pathlib import Path
from typing import Any, Callable, Optional, TypeVar
import czifile
import imagecodecs
import numpy as np
from lxml import etree
from ome_types import OME, model
from tifffile import repeat_nd
from .. import AbstractReader
try:
# TODO: use zoom from imagecodecs implementation when available
from scipy.ndimage.interpolation import zoom
except ImportError:
try:
from ndimage.interpolation import zoom
except ImportError:
zoom = None
Element = TypeVar('Element')
def zstd_decode(data: bytes) -> bytes: # noqa
""" decode zstd bytes, copied from BioFormats ZeissCZIReader """
def read_var_int(stream: BytesIO) -> int: # noqa
a = stream.read(1)[0]
if a & 128:
b = stream.read(1)[0]
if b & 128:
c = stream.read(1)[0]
return (c << 14) | ((b & 127) << 7) | (a & 127)
return (b << 7) | (a & 127)
return a & 255
try:
with BytesIO(data) as stream:
size_of_header = read_var_int(stream)
high_low_unpacking = False
while stream.tell() < size_of_header:
chunk_id = read_var_int(stream)
# only one chunk ID defined so far
if chunk_id == 1:
high_low_unpacking = (stream.read(1)[0] & 1) == 1
else:
raise ValueError(f'Invalid chunk id: {chunk_id}')
pointer = stream.tell()
except Exception: # noqa
high_low_unpacking = False
pointer = 0
decoded = imagecodecs.zstd_decode(data[pointer:])
if high_low_unpacking:
second_half = len(decoded) // 2
return bytes([decoded[second_half + i // 2] if i % 2 else decoded[i // 2] for i in range(len(decoded))])
else:
return decoded
def data(self, raw: bool = False, resize: bool = True, order: int = 0) -> np.ndarray:
"""Read image data from file and return as numpy array."""
DECOMPRESS = czifile.czifile.DECOMPRESS # noqa
DECOMPRESS[5] = imagecodecs.zstd_decode
DECOMPRESS[6] = zstd_decode
de = self.directory_entry
fh = self._fh
if raw:
with fh.lock:
fh.seek(self.data_offset)
data = fh.read(self.data_size) # noqa
return data
if de.compression:
# if de.compression not in DECOMPRESS:
# raise ValueError('compression unknown or not supported')
with fh.lock:
fh.seek(self.data_offset)
data = fh.read(self.data_size) # noqa
data = DECOMPRESS[de.compression](data) # noqa
if de.compression == 2:
# LZW
data = np.fromstring(data, de.dtype) # noqa
elif de.compression in (5, 6):
# ZSTD
data = np.frombuffer(data, de.dtype) # noqa
else:
dtype = np.dtype(de.dtype)
with fh.lock:
fh.seek(self.data_offset)
data = fh.read_array(dtype, self.data_size // dtype.itemsize) # noqa
data = data.reshape(de.stored_shape) # noqa
if de.compression != 4 and de.stored_shape[-1] in (3, 4):
if de.stored_shape[-1] == 3:
# BGR -> RGB
data = data[..., ::-1] # noqa
else:
# BGRA -> RGBA
tmp = data[..., 0].copy()
data[..., 0] = data[..., 2]
data[..., 2] = tmp
if de.stored_shape == de.shape or not resize:
return data
# sub / supersampling
factors = [j / i for i, j in zip(de.stored_shape, de.shape)]
factors = [(int(round(f)) if abs(f - round(f)) < 0.0001 else f)
for f in factors]
# use repeat if possible
if order == 0 and all(isinstance(f, int) for f in factors):
data = repeat_nd(data, factors).copy() # noqa
data.shape = de.shape
return data
# remove leading dimensions with size 1 for speed
shape = list(de.stored_shape)
i = 0
for s in shape:
if s != 1:
break
i += 1
shape = shape[i:]
factors = factors[i:]
data.shape = shape
# resize RGB components separately for speed
if zoom is None:
raise ImportError("cannot import 'zoom' from scipy or ndimage")
if shape[-1] in (3, 4) and factors[-1] == 1.0:
factors = factors[:-1]
old = data
data = np.empty(de.shape, de.dtype[-2:]) # noqa
for i in range(shape[-1]):
data[..., i] = zoom(old[..., i], zoom=factors, order=order)
else:
data = zoom(data, zoom=factors, order=order) # noqa
data.shape = de.shape
return data
# monkeypatch zstd into czifile
czifile.czifile.SubBlockSegment.data = data
class Reader(AbstractReader, ABC):
priority = 0
do_not_pickle = 'reader', 'filedict'
@staticmethod
def _can_open(path: Path) -> bool:
return isinstance(path, Path) and path.suffix == '.czi'
def open(self) -> None:
self.reader = czifile.CziFile(self.path)
filedict = {}
for directory_entry in self.reader.filtered_subblock_directory:
idx = self.get_index(directory_entry, self.reader.start)
if 'S' not in self.reader.axes or self.series in range(*idx[self.reader.axes.index('S')]):
for c in range(*idx[self.reader.axes.index('C')]):
for z in range(*idx[self.reader.axes.index('Z')]):
for t in range(*idx[self.reader.axes.index('T')]):
if (c, z, t) in filedict:
filedict[c, z, t].append(directory_entry)
else:
filedict[c, z, t] = [directory_entry]
if len(filedict) == 0:
raise FileNotFoundError(f'Series {self.series} not found in {self.path}.')
self.filedict = filedict # noqa
def close(self) -> None:
self.reader.close()
def get_ome(self) -> OME:
return OmeParse.get_ome(self.reader, self.filedict)
def __frame__(self, c: int = 0, z: int = 0, t: int = 0) -> np.ndarray:
f = np.zeros(self.base_shape['yx'], self.dtype)
if (c, z, t) in self.filedict:
directory_entries = self.filedict[c, z, t]
x_min = min([f.start[f.axes.index('X')] for f in directory_entries])
y_min = min([f.start[f.axes.index('Y')] for f in directory_entries])
xy_min = {'X': x_min, 'Y': y_min}
for directory_entry in directory_entries:
subblock = directory_entry.data_segment()
tile = subblock.data(resize=True, order=0)
axes_min = [xy_min.get(ax, 0) for ax in directory_entry.axes]
index = [slice(i - j - m, i - j + k)
for i, j, k, m in zip(directory_entry.start, self.reader.start, tile.shape, axes_min)]
index = tuple(index[self.reader.axes.index(i)] for i in 'YX')
f[index] = tile.squeeze()
return f
@staticmethod
def get_index(directory_entry: czifile.DirectoryEntryDV, start: tuple[int]) -> list[tuple[int, int]]:
return [(i - j, i - j + k) for i, j, k in zip(directory_entry.start, start, directory_entry.shape)]
class OmeParse:
size_x: int
size_y: int
size_c: int
size_z: int
size_t: int
nm = model.UnitsLength.NANOMETER
um = model.UnitsLength.MICROMETER
@classmethod
def get_ome(cls, reader: czifile.CziFile, filedict: dict[tuple[int, int, int], Any]) -> OME:
new = cls(reader, filedict)
new.parse()
return new.ome
def __init__(self, reader: czifile.CziFile, filedict: dict[tuple[int, int, int], Any]) -> None:
self.reader = reader
self.filedict = filedict
xml = reader.metadata()
self.attachments = {i.attachment_entry.name: i.attachment_entry.data_segment()
for i in reader.attachments()}
self.tree = etree.fromstring(xml)
self.metadata = self.tree.find('Metadata')
version = self.metadata.find('Version')
if version is not None:
self.version = version.text
else:
self.version = self.metadata.find('Experiment').attrib['Version']
self.ome = OME()
self.information = self.metadata.find('Information')
self.display_setting = self.metadata.find('DisplaySetting')
self.experiment = self.metadata.find('Experiment')
self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock')
self.instrument = self.information.find('Instrument')
self.image = self.information.find('Image')
if self.version == '1.0':
self.experiment = self.metadata.find('Experiment')
self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock')
self.multi_track_setup = self.acquisition_block.find('MultiTrackSetup')
else:
self.experiment = None
self.acquisition_block = None
self.multi_track_setup = None
def parse(self) -> None:
self.get_experimenters()
self.get_instruments()
self.get_detectors()
self.get_objectives()
self.get_tubelenses()
self.get_light_sources()
self.get_filters()
self.get_pixels()
self.get_channels()
self.get_planes()
self.get_annotations()
@staticmethod
def text(item: Optional[Element], default: str = "") -> str:
return default if item is None else item.text
@staticmethod
def def_list(item: Any) -> list[Any]:
return [] if item is None else item
@staticmethod
def try_default(fun: Callable[[Any, ...], Any] | type, default: Any = None, *args: Any, **kwargs: Any) -> Any:
try:
return fun(*args, **kwargs)
except Exception: # noqa
return default
def get_experimenters(self) -> None:
if self.version == '1.0':
self.ome.experimenters = [
model.Experimenter(id='Experimenter:0',
user_name=self.information.find('User').find('DisplayName').text)]
elif self.version in ('1.1', '1.2'):
self.ome.experimenters = [
model.Experimenter(id='Experimenter:0',
user_name=self.information.find('Document').find('UserName').text)]
def get_instruments(self) -> None:
if self.version == '1.0':
self.ome.instruments.append(model.Instrument(id=self.instrument.attrib['Id']))
elif self.version in ('1.1', '1.2'):
for _ in self.instrument.find('Microscopes'):
self.ome.instruments.append(model.Instrument(id='Instrument:0'))
def get_detectors(self) -> None:
if self.version == '1.0':
for detector in self.instrument.find('Detectors'):
try:
detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "")
except ValueError:
detector_type = model.Detector_Type.OTHER
self.ome.instruments[0].detectors.append(
model.Detector(
id=detector.attrib['Id'], model=self.text(detector.find('Manufacturer').find('Model')),
amplification_gain=float(self.text(detector.find('AmplificationGain'))),
gain=float(self.text(detector.find('Gain'))), zoom=float(self.text(detector.find('Zoom'))),
type=detector_type
))
elif self.version in ('1.1', '1.2'):
for detector in self.instrument.find('Detectors'):
try:
detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "")
except ValueError:
detector_type = model.Detector_Type.OTHER
self.ome.instruments[0].detectors.append(
model.Detector(
id=detector.attrib['Id'].replace(' ', ''),
model=self.text(detector.find('Manufacturer').find('Model')),
type=detector_type
))
def get_objectives(self) -> None:
for objective in self.instrument.find('Objectives'):
self.ome.instruments[0].objectives.append(
model.Objective(
id=objective.attrib['Id'],
model=self.text(objective.find('Manufacturer').find('Model')),
immersion=self.text(objective.find('Immersion')), # type: ignore
lens_na=float(self.text(objective.find('LensNA'))),
nominal_magnification=float(self.text(objective.find('NominalMagnification')))))
def get_tubelenses(self) -> None:
if self.version == '1.0':
for idx, tube_lens in enumerate({self.text(track_setup.find('TubeLensPosition'))
for track_setup in self.multi_track_setup}):
try:
nominal_magnification = float(re.findall(r'\d+[,.]\d*', tube_lens)[0].replace(',', '.'))
except Exception: # noqa
nominal_magnification = 1.0
self.ome.instruments[0].objectives.append(
model.Objective(id=f'Objective:Tubelens:{idx}', model=tube_lens,
nominal_magnification=nominal_magnification))
elif self.version in ('1.1', '1.2'):
for tubelens in self.def_list(self.instrument.find('TubeLenses')):
try:
nominal_magnification = float(re.findall(r'\d+(?:[,.]\d*)?',
tubelens.attrib['Name'])[0].replace(',', '.'))
except Exception: # noqa
nominal_magnification = 1.0
self.ome.instruments[0].objectives.append(
model.Objective(
id=f"Objective:{tubelens.attrib['Id']}",
model=tubelens.attrib['Name'],
nominal_magnification=nominal_magnification))
def get_light_sources(self) -> None:
if self.version == '1.0':
for light_source in self.def_list(self.instrument.find('LightSources')):
try:
if light_source.find('LightSourceType').find('Laser') is not None:
self.ome.instruments[0].lasers.append(
model.Laser(
id=light_source.attrib['Id'],
model=self.text(light_source.find('Manufacturer').find('Model')),
power=float(self.text(light_source.find('Power'))),
wavelength=float(
self.text(light_source.find('LightSourceType').find('Laser').find('Wavelength')))))
except AttributeError:
pass
elif self.version in ('1.1', '1.2'):
for light_source in self.def_list(self.instrument.find('LightSources')):
try:
if light_source.find('LightSourceType').find('Laser') is not None:
self.ome.instruments[0].lasers.append(
model.Laser(
id=f"LightSource:{light_source.attrib['Id']}",
power=float(self.text(light_source.find('Power'))),
wavelength=float(light_source.attrib['Id'][-3:]))) # TODO: follow Id reference
except (AttributeError, ValueError):
pass
def get_filters(self) -> None:
if self.version == '1.0':
for idx, filter_ in enumerate({self.text(beam_splitter.find('Filter'))
for track_setup in self.multi_track_setup
for beam_splitter in track_setup.find('BeamSplitters')}):
self.ome.instruments[0].filter_sets.append(
model.FilterSet(id=f'FilterSet:{idx}', model=filter_)
)
def get_pixels(self) -> None:
x_min = min([f.start[f.axes.index('X')] for f in self.filedict[0, 0, 0]])
y_min = min([f.start[f.axes.index('Y')] for f in self.filedict[0, 0, 0]])
x_max = max([f.start[f.axes.index('X')] + f.shape[f.axes.index('X')] for f in self.filedict[0, 0, 0]])
y_max = max([f.start[f.axes.index('Y')] + f.shape[f.axes.index('Y')] for f in self.filedict[0, 0, 0]])
self.size_x = x_max - x_min
self.size_y = y_max - y_min
self.size_c, self.size_z, self.size_t = (self.reader.shape[self.reader.axes.index(directory_entry)]
for directory_entry in 'CZT')
image = self.information.find('Image')
pixel_type = self.text(image.find('PixelType'), 'Gray16')
if pixel_type.startswith('Gray'):
pixel_type = 'uint' + pixel_type[4:]
objective_settings = image.find('ObjectiveSettings')
self.ome.images.append(
model.Image(
id='Image:0',
name=f"{self.text(self.information.find('Document').find('Name'))} #1",
pixels=model.Pixels(
id='Pixels:0', size_x=self.size_x, size_y=self.size_y,
size_c=self.size_c, size_z=self.size_z, size_t=self.size_t,
dimension_order='XYCZT', type=pixel_type, # type: ignore
significant_bits=int(self.text(image.find('ComponentBitCount'))),
big_endian=False, interleaved=False, metadata_only=True), # type: ignore
experimenter_ref=model.ExperimenterRef(id='Experimenter:0'),
instrument_ref=model.InstrumentRef(id='Instrument:0'),
objective_settings=model.ObjectiveSettings(
id=objective_settings.find('ObjectiveRef').attrib['Id'],
medium=self.text(objective_settings.find('Medium')), # type: ignore
refractive_index=float(self.text(objective_settings.find('RefractiveIndex')))),
stage_label=model.StageLabel(
name=f'Scene position #0',
x=self.positions[0], x_unit=self.um,
y=self.positions[1], y_unit=self.um,
z=self.positions[2], z_unit=self.um)))
for distance in self.metadata.find('Scaling').find('Items'):
if distance.attrib['Id'] == 'X':
self.ome.images[0].pixels.physical_size_x = float(self.text(distance.find('Value'))) * 1e6
elif distance.attrib['Id'] == 'Y':
self.ome.images[0].pixels.physical_size_y = float(self.text(distance.find('Value'))) * 1e6
elif self.size_z > 1 and distance.attrib['Id'] == 'Z':
self.ome.images[0].pixels.physical_size_z = float(self.text(distance.find('Value'))) * 1e6
@cached_property
def positions(self) -> tuple[float, float, Optional[float]]:
if self.version == '1.0':
scenes = self.image.find('Dimensions').find('S').find('Scenes')
positions = scenes[0].find('Positions')[0]
return float(positions.attrib['X']), float(positions.attrib['Y']), float(positions.attrib['Z'])
elif self.version in ('1.1', '1.2'):
try: # TODO
scenes = self.image.find('Dimensions').find('S').find('Scenes')
center_position = [float(pos) for pos in self.text(scenes[0].find('CenterPosition')).split(',')]
except AttributeError:
center_position = [0, 0]
return center_position[0], center_position[1], None
@cached_property
def channels_im(self) -> dict:
return {channel.attrib['Id']: channel for channel in self.image.find('Dimensions').find('Channels')}
@cached_property
def channels_ds(self) -> dict:
return {channel.attrib['Id']: channel for channel in self.display_setting.find('Channels')}
@cached_property
def channels_ts(self) -> dict:
return {detector.attrib['Id']: track_setup
for track_setup in
self.experiment.find('ExperimentBlocks').find('AcquisitionBlock').find('MultiTrackSetup')
for detector in track_setup.find('Detectors')}
def get_channels(self) -> None:
if self.version == '1.0':
for idx, (key, channel) in enumerate(self.channels_im.items()):
detector_settings = channel.find('DetectorSettings')
laser_scan_info = channel.find('LaserScanInfo')
detector = detector_settings.find('Detector')
try:
binning = model.Binning(self.text(detector_settings.find('Binning')))
except ValueError:
binning = model.Binning.OTHER
filterset = self.text(self.channels_ts[key].find('BeamSplitters')[0].find('Filter'))
filterset_idx = [filterset.model for filterset in self.ome.instruments[0].filter_sets].index(filterset)
light_sources_settings = channel.find('LightSourcesSettings')
# no space in ome for multiple lightsources simultaneously
if len(light_sources_settings) > idx:
light_source_settings = light_sources_settings[idx]
else:
light_source_settings = light_sources_settings[0]
light_source_settings = model.LightSourceSettings(
id=light_source_settings.find('LightSource').attrib['Id'],
attenuation=float(self.text(light_source_settings.find('Attenuation'))),
wavelength=float(self.text(light_source_settings.find('Wavelength'))),
wavelength_unit=self.nm)
self.ome.images[0].pixels.channels.append(
model.Channel(
id=f'Channel:{idx}',
name=channel.attrib['Name'],
acquisition_mode=self.text(channel.find('AcquisitionMode')), # type: ignore
color=model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white')),
detector_settings=model.DetectorSettings(id=detector.attrib['Id'], binning=binning),
# emission_wavelength=text(channel.find('EmissionWavelength')), # TODO: fix
excitation_wavelength=light_source_settings.wavelength,
filter_set_ref=model.FilterSetRef(id=self.ome.instruments[0].filter_sets[filterset_idx].id),
illumination_type=self.text(channel.find('IlluminationType')), # type: ignore
light_source_settings=light_source_settings,
samples_per_pixel=int(self.text(laser_scan_info.find('Averaging')))))
elif self.version in ('1.1', '1.2'):
for idx, (key, channel) in enumerate(self.channels_im.items()):
detector_settings = channel.find('DetectorSettings')
laser_scan_info = channel.find('LaserScanInfo')
detector = detector_settings.find('Detector')
try:
color = model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white'))
except Exception: # noqa
color = None
try:
if (i := self.text(channel.find('EmissionWavelength'))) != '0':
emission_wavelength = float(i)
else:
emission_wavelength = None
except Exception: # noqa
emission_wavelength = None
if laser_scan_info is not None:
samples_per_pixel = int(self.text(laser_scan_info.find('Averaging'), '1'))
else:
samples_per_pixel = 1
try:
binning = model.Binning(self.text(detector_settings.find('Binning')))
except ValueError:
binning = model.Binning.OTHER
light_sources_settings = channel.find('LightSourcesSettings')
# no space in ome for multiple lightsources simultaneously
if light_sources_settings is not None:
light_source_settings = light_sources_settings[0]
light_source_settings = model.LightSourceSettings(
id='LightSource:' + '_'.join([light_source_settings.find('LightSource').attrib['Id']
for light_source_settings in light_sources_settings]),
attenuation=self.try_default(float, None, self.text(light_source_settings.find('Attenuation'))),
wavelength=self.try_default(float, None, self.text(light_source_settings.find('Wavelength'))),
wavelength_unit=self.nm)
else:
light_source_settings = None
self.ome.images[0].pixels.channels.append(
model.Channel(
id=f'Channel:{idx}',
name=channel.attrib['Name'],
acquisition_mode=self.text(channel.find('AcquisitionMode')).replace( # type: ignore
'SingleMoleculeLocalisation', 'SingleMoleculeImaging'),
color=color,
detector_settings=model.DetectorSettings(
id=detector.attrib['Id'].replace(' ', ""),
binning=binning),
emission_wavelength=emission_wavelength,
excitation_wavelength=self.try_default(float, None,
self.text(channel.find('ExcitationWavelength'))),
# filter_set_ref=model.FilterSetRef(id=ome.instruments[0].filter_sets[filterset_idx].id),
illumination_type=self.text(channel.find('IlluminationType')), # type: ignore
light_source_settings=light_source_settings,
samples_per_pixel=samples_per_pixel))
def get_planes(self) -> None:
try:
exposure_times = [float(self.text(channel.find('LaserScanInfo').find('FrameTime')))
for channel in self.channels_im.values()]
except Exception: # noqa
exposure_times = [None] * len(self.channels_im)
delta_ts = self.attachments['TimeStamps'].data()
dt = np.diff(delta_ts)
if len(dt) and np.std(dt) / np.mean(dt) > 0.02:
dt = np.median(dt[dt > 0])
delta_ts = dt * np.arange(len(delta_ts))
warnings.warn(f'delta_t is inconsistent, using median value: {dt}')
for t, z, c in product(range(self.size_t), range(self.size_z), range(self.size_c)):
self.ome.images[0].pixels.planes.append(
model.Plane(the_c=c, the_z=z, the_t=t, delta_t=delta_ts[t],
exposure_time=exposure_times[c],
position_x=self.positions[0], position_x_unit=self.um,
position_y=self.positions[1], position_y_unit=self.um,
position_z=self.positions[2], position_z_unit=self.um))
def get_annotations(self) -> None:
idx = 0
for layer in [] if (ml := self.metadata.find('Layers')) is None else ml:
rectangle = layer.find('Elements').find('Rectangle')
if rectangle is not None:
geometry = rectangle.find('Geometry')
roi = model.ROI(id=f'ROI:{idx}', description=self.text(layer.find('Usage')))
roi.union.append(
model.Rectangle(
id='Shape:0:0',
height=float(self.text(geometry.find('Height'))),
width=float(self.text(geometry.find('Width'))),
x=float(self.text(geometry.find('Left'))),
y=float(self.text(geometry.find('Top')))))
self.ome.rois.append(roi)
self.ome.images[0].roi_refs.append(model.ROIRef(id=f'ROI:{idx}'))
idx += 1

View File

@@ -1,59 +0,0 @@
from abc import ABC
from itertools import product
from pathlib import Path
from struct import unpack
from warnings import warn
import numpy as np
from ome_types import model
from tifffile import TiffFile
from .. import AbstractReader
class Reader(AbstractReader, ABC):
""" Can read some tif files written with Fiji which are broken because Fiji didn't finish writing. """
priority = 90
do_not_pickle = 'reader'
@staticmethod
def _can_open(path):
if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'):
with TiffFile(path) as tif:
return tif.is_imagej and not tif.is_bigtiff
else:
return False
def __frame__(self, c, z, t): # Override this, return the frame at c, z, t
self.reader.filehandle.seek(self.offset + t * self.count)
return np.reshape(unpack(self.fmt, self.reader.filehandle.read(self.count)), self.base_shape['yx'])
def open(self):
warn(f'File {self.path.name} is probably damaged, opening with fijiread.')
self.reader = TiffFile(self.path)
assert self.reader.pages[0].compression == 1, 'Can only read uncompressed tiff files.'
assert self.reader.pages[0].samplesperpixel == 1, 'Can only read 1 sample per pixel.'
self.offset = self.reader.pages[0].dataoffsets[0] # noqa
self.count = self.reader.pages[0].databytecounts[0] # noqa
self.bytes_per_sample = self.reader.pages[0].bitspersample // 8 # noqa
self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * 'BHILQ'[self.bytes_per_sample - 1] # noqa
def close(self):
self.reader.close()
def get_ome(self):
size_y, size_x = self.reader.pages[0].shape
size_c, size_z = 1, 1
size_t = int(np.floor((self.reader.filehandle.size - self.reader.pages[0].dataoffsets[0]) / self.count))
pixel_type = model.PixelType(self.reader.pages[0].dtype.name)
ome = model.OME()
ome.instruments.append(model.Instrument())
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=pixel_type),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0))
return ome

View File

@@ -1,80 +0,0 @@
import re
from abc import ABC
from pathlib import Path
from typing import Optional
import tifffile
from ome_types import model
from ome_types.units import _quantity_property # noqa
from .. import AbstractReader
class Reader(AbstractReader, ABC):
priority = 20
do_not_pickle = 'last_tif'
@staticmethod
def _can_open(path):
return isinstance(path, Path) and (path.is_dir() or
(path.parent.is_dir() and path.name.lower().startswith('pos')))
@staticmethod
def get_positions(path: str | Path) -> Optional[list[int]]:
pat = re.compile(rf's(\d)_t\d+\.(tif|TIF)$')
return sorted({int(m.group(1)) for file in Path(path).iterdir() if (m := pat.search(file.name))})
def get_ome(self):
ome = model.OME()
tif = self.get_tif(0)
metadata = tif.metaseries_metadata
size_z = len(tif.pages)
page = tif.pages[0]
shape = {axis.lower(): size for axis, size in zip(page.axes, page.shape)}
size_x, size_y = shape['x'], shape['y']
ome.instruments.append(model.Instrument())
size_c = 1
size_t = max(self.filedict.keys()) + 1
pixel_type = f"uint{metadata['PlaneInfo']['bits-per-pixel']}"
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t,
size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=pixel_type),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
return ome
def open(self):
pat = re.compile(rf's{self.series}_t\d+\.(tif|TIF)$')
filelist = sorted([file for file in self.path.iterdir() if pat.search(file.name)])
pattern = re.compile(r't(\d+)$')
self.filedict = {int(pattern.search(file.stem).group(1)) - 1: file for file in filelist}
if len(self.filedict) == 0:
raise FileNotFoundError
self.last_tif = 0, tifffile.TiffFile(self.filedict[0])
def close(self) -> None:
self.last_tif[1].close()
def get_tif(self, t: int = None):
last_t, tif = self.last_tif
if (t is None or t == last_t) and not tif.filehandle.closed:
return tif
else:
tif.close()
tif = tifffile.TiffFile(self.filedict[t])
self.last_tif = t, tif
return tif
def __frame__(self, c=0, z=0, t=0):
tif = self.get_tif(t)
page = tif.pages[z]
if page.axes.upper() == 'YX':
return page.asarray()
elif page.axes.upper() == 'XY':
return page.asarray().T
else:
raise NotImplementedError(f'reading axes {page.axes} is not implemented')

View File

@@ -1,53 +0,0 @@
from abc import ABC
from itertools import product
import numpy as np
from ome_types import model
from .. import AbstractReader
class Reader(AbstractReader, ABC):
priority = 20
@staticmethod
def _can_open(path):
return isinstance(path, np.ndarray) and 1 <= path.ndim <= 5
def get_ome(self):
def shape(size_x=1, size_y=1, size_c=1, size_z=1, size_t=1): # noqa
return size_x, size_y, size_c, size_z, size_t
size_x, size_y, size_c, size_z, size_t = shape(*self.array.shape)
try:
pixel_type = model.PixelType(self.array.dtype.name)
except ValueError:
if self.array.dtype.name.startswith('int'):
pixel_type = model.PixelType('int32')
else:
pixel_type = model.PixelType('float')
ome = model.OME()
ome.instruments.append(model.Instrument())
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=pixel_type),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0))
return ome
def open(self):
if isinstance(self.path, np.ndarray):
self.array = np.array(self.path)
while self.array.ndim < 5:
self.array = np.expand_dims(self.array, -1) # noqa
self.path = 'numpy array'
def __frame__(self, c, z, t):
frame = self.array[:, :, c, z, t]
if self.axes.find('y') > self.axes.find('x'):
return frame.T
else:
return frame

View File

@@ -1,146 +0,0 @@
import re
from abc import ABC
from datetime import datetime
from itertools import product
from pathlib import Path
import tifffile
import yaml
from ome_types import model
from ome_types.units import _quantity_property # noqa
from .. import AbstractReader
def lazy_property(function, field, *arg_fields):
def lazy(self):
if self.__dict__.get(field) is None:
self.__dict__[field] = function(*[getattr(self, arg_field) for arg_field in arg_fields])
try:
self.model_fields_set.add(field)
except Exception: # noqa
pass
return self.__dict__[field]
return property(lazy)
class Plane(model.Plane):
""" Lazily retrieve delta_t from metadata """
def __init__(self, t0, file, **kwargs): # noqa
super().__init__(**kwargs)
# setting fields here because they would be removed by ome_types/pydantic after class definition
setattr(self.__class__, 'delta_t', lazy_property(self.get_delta_t, 'delta_t', 't0', 'file'))
setattr(self.__class__, 'delta_t_quantity', _quantity_property('delta_t'))
self.__dict__['t0'] = t0 # noqa
self.__dict__['file'] = file # noqa
@staticmethod
def get_delta_t(t0, file):
with tifffile.TiffFile(file) as tif:
info = yaml.safe_load(tif.pages[0].tags[50839].value['Info'])
return float((datetime.strptime(info['Time'], '%Y-%m-%d %H:%M:%S %z') - t0).seconds)
class Reader(AbstractReader, ABC):
priority = 10
@staticmethod
def _can_open(path):
pat = re.compile(r'(?:\d+-)?Pos.*', re.IGNORECASE)
return (isinstance(path, Path) and path.is_dir() and
(pat.match(path.name) or any(file.is_dir() and pat.match(file.stem) for file in path.iterdir())))
def get_ome(self):
ome = model.OME()
with tifffile.TiffFile(self.filedict[0, 0, 0]) as tif:
metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()}
ome.experimenters.append(
model.Experimenter(id='Experimenter:0', user_name=metadata['Info']['Summary']['UserName']))
objective_str = metadata['Info']['ZeissObjectiveTurret-Label']
ome.instruments.append(model.Instrument())
ome.instruments[0].objectives.append(
model.Objective(
id='Objective:0', manufacturer='Zeiss', model=objective_str,
nominal_magnification=float(re.findall(r'(\d+)x', objective_str)[0]),
lens_na=float(re.findall(r'/(\d\.\d+)', objective_str)[0]),
immersion=model.Objective_Immersion.OIL if 'oil' in objective_str.lower() else None))
tubelens_str = metadata['Info']['ZeissOptovar-Label']
ome.instruments[0].objectives.append(
model.Objective(
id='Objective:Tubelens:0', manufacturer='Zeiss', model=tubelens_str,
nominal_magnification=float(re.findall(r'\d?\d*[,.]?\d+(?=x$)', tubelens_str)[0].replace(',', '.'))))
ome.instruments[0].detectors.append(
model.Detector(
id='Detector:0', amplification_gain=100))
ome.instruments[0].filter_sets.append(
model.FilterSet(id='FilterSet:0', model=metadata['Info']['ZeissReflectorTurret-Label']))
pxsize = metadata['Info']['PixelSizeUm']
pxsize_cam = 6.5 if 'Hamamatsu' in metadata['Info']['Core-Camera'] else None
if pxsize == 0:
pxsize = pxsize_cam / ome.instruments[0].objectives[0].nominal_magnification
pixel_type = metadata['Info']['PixelType'].lower()
if pixel_type.startswith('gray'):
pixel_type = 'uint' + pixel_type[4:]
else:
pixel_type = 'uint16' # assume
size_c, size_z, size_t = (max(i) + 1 for i in zip(*self.filedict.keys()))
t0 = datetime.strptime(metadata['Info']['Time'], '%Y-%m-%d %H:%M:%S %z')
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t,
size_x=metadata['Info']['Width'], size_y=metadata['Info']['Height'],
dimension_order='XYCZT', # type: ignore
type=pixel_type, physical_size_x=pxsize, physical_size_y=pxsize,
physical_size_z=metadata['Info']['Summary']['z-step_um']),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(
Plane(t0, self.filedict[c, z, t],
the_c=c, the_z=z, the_t=t, exposure_time=metadata['Info']['Exposure-ms'] / 1000))
# compare channel names from metadata with filenames
pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE)
for c in range(size_c):
ome.images[0].pixels.channels.append(
model.Channel(
id=f'Channel:{c}', name=pattern_c.findall(self.filedict[c, 0, 0].stem)[0],
detector_settings=model.DetectorSettings(
id='Detector:0', binning=metadata['Info']['Hamamatsu_sCMOS-Binning']),
filter_set_ref=model.FilterSetRef(id='FilterSet:0')))
return ome
def open(self):
# /some_path/Pos4: path = /some_path, series = 4
# /some_path/5-Pos_001_005: path = /some_path/5-Pos_001_005, series = 0
if re.match(r'(?:\d+-)?Pos.*', self.path.name, re.IGNORECASE) is None:
pat = re.compile(rf'^(?:\d+-)?Pos{self.series}$', re.IGNORECASE)
files = sorted(file for file in self.path.iterdir() if pat.match(file.name))
if len(files):
path = files[0]
else:
raise FileNotFoundError(self.path / pat.pattern)
else:
path = self.path
pat = re.compile(r'^img_\d{3,}.*\d{3,}.*\.tif$', re.IGNORECASE)
filelist = sorted([file for file in path.iterdir() if pat.search(file.name)])
with tifffile.TiffFile(self.path / filelist[0]) as tif:
metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()}
# compare channel names from metadata with filenames
cnamelist = metadata['Info']['Summary']['ChNames']
cnamelist = [c for c in cnamelist if any([c in f.name for f in filelist])]
pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE)
pattern_z = re.compile(r'(\d{3,})$')
pattern_t = re.compile(r'img_(\d{3,})', re.IGNORECASE)
self.filedict = {(cnamelist.index(pattern_c.findall(file.stem)[0]), # noqa
int(pattern_z.findall(file.stem)[0]),
int(pattern_t.findall(file.stem)[0])): file for file in filelist}
def __frame__(self, c=0, z=0, t=0):
return tifffile.imread(self.path / self.filedict[(c, z, t)])

View File

@@ -1,85 +0,0 @@
from abc import ABC
from functools import cached_property
from itertools import product
from pathlib import Path
import numpy as np
import tifffile
import yaml
from ome_types import model
from .. import AbstractReader
class Reader(AbstractReader, ABC):
priority = 0
do_not_pickle = 'reader'
@staticmethod
def _can_open(path):
if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'):
with tifffile.TiffFile(path) as tif:
return tif.is_imagej and tif.pages[-1]._nextifd() == 0 # noqa
else:
return False
@cached_property
def metadata(self):
return {key: yaml.safe_load(value) if isinstance(value, str) else value
for key, value in self.reader.imagej_metadata.items()}
def get_ome(self):
page = self.reader.pages[0]
size_y = page.imagelength
size_x = page.imagewidth
if self.p_ndim == 3:
size_c = page.samplesperpixel
size_t = self.metadata.get('frames', 1) # // C
else:
size_c = self.metadata.get('channels', 1)
size_t = self.metadata.get('frames', 1)
size_z = self.metadata.get('slices', 1)
if 282 in page.tags and 296 in page.tags and page.tags[296].value == 1:
f = page.tags[282].value
pxsize = f[1] / f[0]
else:
pxsize = None
dtype = page.dtype.name
if dtype not in ('int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32',
'float', 'double', 'complex', 'double-complex', 'bit'):
dtype = 'float'
interval_t = self.metadata.get('interval', 0)
ome = model.OME()
ome.instruments.append(model.Instrument(id='Instrument:0'))
ome.instruments[0].objectives.append(model.Objective(id='Objective:0'))
ome.images.append(
model.Image(
id='Image:0',
pixels=model.Pixels(
id='Pixels:0',
size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=dtype, # type: ignore
physical_size_x=pxsize, physical_size_y=pxsize),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=interval_t * t))
return ome
def open(self):
self.reader = tifffile.TiffFile(self.path)
page = self.reader.pages[0]
self.p_ndim = page.ndim # noqa
if self.p_ndim == 3:
self.p_transpose = [i for i in [page.axes.find(j) for j in 'SYX'] if i >= 0] # noqa
def close(self):
self.reader.close()
def __frame__(self, c, z, t):
if self.p_ndim == 3:
return np.transpose(self.reader.asarray(z + t * self.base_shape['z']), self.p_transpose)[c]
else:
return self.reader.asarray(c + z * self.base_shape['c'] + t * self.base_shape['c'] * self.base_shape['z'])

View File

@@ -1,7 +0,0 @@
#Insight Transform File V1.0
#Transform 0
Transform: CompositeTransform_double_2_2
#Transform 1
Transform: AffineTransform_double_2_2
Parameters: 1 0 0 1 0 0
FixedParameters: 255.5 255.5

View File

@@ -1,458 +0,0 @@
import warnings
from copy import deepcopy
from pathlib import Path
import numpy as np
import yaml
from parfor import Chunks, pmap
from skimage import filters
from tiffwrite import IJTiffFile
from tqdm.auto import tqdm
try:
# best if SimpleElastix is installed: https://simpleelastix.readthedocs.io/GettingStarted.html
import SimpleITK as sitk # noqa
except ImportError:
sitk = None
try:
from pandas import DataFrame, Series, concat
except ImportError:
DataFrame, Series, concat = None, None, None
if hasattr(yaml, 'full_load'):
yamlload = yaml.full_load
else:
yamlload = yaml.load
class Transforms(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.default = Transform()
@classmethod
def from_file(cls, file, C=True, T=True):
with open(Path(file).with_suffix('.yml')) as f:
return cls.from_dict(yamlload(f), C, T)
@classmethod
def from_dict(cls, d, C=True, T=True):
new = cls()
for key, value in d.items():
if isinstance(key, str) and C:
new[key.replace(r'\:', ':').replace('\\\\', '\\')] = Transform.from_dict(value)
elif T:
new[key] = Transform.from_dict(value)
return new
@classmethod
def from_shifts(cls, shifts):
new = cls()
for key, shift in shifts.items():
new[key] = Transform.from_shift(shift)
return new
def __mul__(self, other):
new = Transforms()
if isinstance(other, Transforms):
for key0, value0 in self.items():
for key1, value1 in other.items():
new[key0 + key1] = value0 * value1
return new
elif other is None:
return self
else:
for key in self.keys():
new[key] = self[key] * other
return new
def asdict(self):
return {key.replace('\\', '\\\\').replace(':', r'\:') if isinstance(key, str) else key: value.asdict()
for key, value in self.items()}
def __getitem__(self, item):
return np.prod([self[i] for i in item[::-1]]) if isinstance(item, tuple) else super().__getitem__(item)
def __missing__(self, key):
return self.default
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __hash__(self):
return hash(frozenset((*self.__dict__.items(), *self.items())))
def save(self, file):
with open(Path(file).with_suffix('.yml'), 'w') as f:
yaml.safe_dump(self.asdict(), f, default_flow_style=None)
def copy(self):
return deepcopy(self)
def adapt(self, origin, shape, channel_names):
def key_map(a, b):
def fun(b, key_a):
for key_b in b:
if key_b in key_a or key_a in key_b:
return key_a, key_b
return {n[0]: n[1] for key_a in a if (n := fun(b, key_a))}
for value in self.values():
value.adapt(origin, shape)
self.default.adapt(origin, shape)
transform_channels = {key for key in self.keys() if isinstance(key, str)}
if set(channel_names) - transform_channels:
mapping = key_map(channel_names, transform_channels)
warnings.warn(f'The image file and the transform do not have the same channels,'
f' creating a mapping: {mapping}')
for key_im, key_t in mapping.items():
self[key_im] = self[key_t]
@property
def inverse(self):
# TODO: check for C@T
inverse = self.copy()
for key, value in self.items():
inverse[key] = value.inverse
return inverse
def coords_pandas(self, array, channel_names, columns=None):
if isinstance(array, DataFrame):
return concat([self.coords_pandas(row, channel_names, columns) for _, row in array.iterrows()], axis=1).T
elif isinstance(array, Series):
key = []
if 'C' in array:
key.append(channel_names[int(array['C'])])
if 'T' in array:
key.append(int(array['T']))
return self[tuple(key)].coords(array, columns)
else:
raise TypeError('Not a pandas DataFrame or Series.')
def with_beads(self, cyllens, bead_files):
assert len(bead_files) > 0, 'At least one file is needed to calculate the registration.'
transforms = [self.calculate_channel_transforms(file, cyllens) for file in bead_files]
for key in {key for transform in transforms for key in transform.keys()}:
new_transforms = [transform[key] for transform in transforms if key in transform]
if len(new_transforms) == 1:
self[key] = new_transforms[0]
else:
self[key] = Transform()
self[key].parameters = np.mean([t.parameters for t in new_transforms], 0)
self[key].dparameters = (np.std([t.parameters for t in new_transforms], 0) /
np.sqrt(len(new_transforms))).tolist()
return self
@staticmethod
def get_bead_files(path):
from . import Imread
files = []
for file in path.iterdir():
if file.name.lower().startswith('beads'):
try:
with Imread(file):
files.append(file)
except Exception:
pass
files = sorted(files)
if not files:
raise Exception('No bead file found!')
checked_files = []
for file in files:
try:
if file.is_dir():
file /= 'Pos0'
with Imread(file): # check for errors opening the file
checked_files.append(file)
except (Exception,):
continue
if not checked_files:
raise Exception('No bead file found!')
return checked_files
@staticmethod
def calculate_channel_transforms(bead_file, cyllens):
""" When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162
in the horizontal direction """
from . import Imread
with Imread(bead_file, axes='zcyx') as im: # noqa
max_ims = im.max('z')
goodch = [c for c, max_im in enumerate(max_ims) if not im.is_noise(max_im)]
if not goodch:
goodch = list(range(len(max_ims)))
untransformed = [c for c in range(im.shape['c']) if cyllens[im.detector[c]].lower() == 'none']
good_and_untrans = sorted(set(goodch) & set(untransformed))
if good_and_untrans:
masterch = good_and_untrans[0]
else:
masterch = goodch[0]
transform = Transform()
if not good_and_untrans:
matrix = transform.matrix
matrix[0, 0] = 0.86
transform.matrix = matrix
transforms = Transforms()
for c in tqdm(goodch, desc='Calculating channel transforms'): # noqa
if c == masterch:
transforms[im.channel_names[c]] = transform
else:
transforms[im.channel_names[c]] = Transform.register(max_ims[masterch], max_ims[c]) * transform
return transforms
@staticmethod
def save_channel_transform_tiff(bead_files, tiffile):
from . import Imread
n_channels = 0
for file in bead_files:
with Imread(file) as im:
n_channels = max(n_channels, im.shape['c'])
with IJTiffFile(tiffile) as tif:
for t, file in enumerate(bead_files):
with Imread(file) as im:
with Imread(file).with_transform() as jm:
for c in range(im.shape['c']):
tif.save(np.hstack((im(c=c, t=0).max('z'), jm(c=c, t=0).max('z'))), c, 0, t)
def with_drift(self, im):
""" Calculate shifts relative to the first frame
divide the sequence into groups,
compare each frame to the frame in the middle of the group and compare these middle frames to each other
"""
im = im.transpose('tzycx')
t_groups = [list(chunk) for chunk in Chunks(range(im.shape['t']), size=round(np.sqrt(im.shape['t'])))]
t_keys = [int(np.round(np.mean(t_group))) for t_group in t_groups]
t_pairs = [(int(np.round(np.mean(t_group))), frame) for t_group in t_groups for frame in t_group]
t_pairs.extend(zip(t_keys, t_keys[1:]))
fmaxz_keys = {t_key: filters.gaussian(im[t_key].max('z'), 5) for t_key in t_keys}
def fun(t_key_t, im, fmaxz_keys):
t_key, t = t_key_t
if t_key == t:
return 0, 0
else:
fmaxz = filters.gaussian(im[t].max('z'), 5)
return Transform.register(fmaxz_keys[t_key], fmaxz, 'translation').parameters[4:]
shifts = np.array(pmap(fun, t_pairs, (im, fmaxz_keys), desc='Calculating image shifts.'))
shift_keys_cum = np.zeros(2)
for shift_keys, t_group in zip(np.vstack((-shifts[0], shifts[im.shape['t']:])), t_groups):
shift_keys_cum += shift_keys
shifts[t_group] += shift_keys_cum
for i, shift in enumerate(shifts[:im.shape['t']]):
self[i] = Transform.from_shift(shift)
return self
class Transform:
def __init__(self):
if sitk is None:
self.transform = None
else:
self.transform = sitk.ReadTransform(str(Path(__file__).parent / 'transform.txt'))
self.dparameters = [0., 0., 0., 0., 0., 0.]
self.shape = [512., 512.]
self.origin = [255.5, 255.5]
self._last, self._inverse = None, None
def __reduce__(self):
return self.from_dict, (self.asdict(),)
def __repr__(self):
return self.asdict().__repr__()
def __str__(self):
return self.asdict().__str__()
@classmethod
def register(cls, fix, mov, kind=None):
""" kind: 'affine', 'translation', 'rigid' """
if sitk is None:
raise ImportError('SimpleElastix is not installed: '
'https://simpleelastix.readthedocs.io/GettingStarted.html')
new = cls()
kind = kind or 'affine'
new.shape = fix.shape
fix, mov = new.cast_image(fix), new.cast_image(mov)
# TODO: implement RigidTransform
tfilter = sitk.ElastixImageFilter()
tfilter.LogToConsoleOff()
tfilter.SetFixedImage(fix)
tfilter.SetMovingImage(mov)
tfilter.SetParameterMap(sitk.GetDefaultParameterMap(kind))
tfilter.Execute()
transform = tfilter.GetTransformParameterMap()[0]
if kind == 'affine':
new.parameters = [float(t) for t in transform['TransformParameters']]
new.shape = [float(t) for t in transform['Size']]
new.origin = [float(t) for t in transform['CenterOfRotationPoint']]
elif kind == 'translation':
new.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform['TransformParameters']]
new.shape = [float(t) for t in transform['Size']]
new.origin = [(t - 1) / 2 for t in new.shape]
else:
raise NotImplementedError(f'{kind} tranforms not implemented (yet)')
new.dparameters = 6 * [np.nan]
return new
@classmethod
def from_shift(cls, shift):
return cls.from_array(np.array(((1, 0, shift[0]), (0, 1, shift[1]), (0, 0, 1))))
@classmethod
def from_array(cls, array):
new = cls()
new.matrix = array
return new
@classmethod
def from_file(cls, file):
with open(Path(file).with_suffix('.yml')) as f:
return cls.from_dict(yamlload(f))
@classmethod
def from_dict(cls, d):
new = cls()
new.origin = [float(i) for i in d['CenterOfRotationPoint']]
new.parameters = [float(i) for i in d['TransformParameters']]
new.dparameters = [float(i) for i in d['dTransformParameters']] if 'dTransformParameters' in d else 6 * [np.nan]
new.shape = [float(i) for i in d['Size']]
return new
def __mul__(self, other): # TODO: take care of dmatrix
result = self.copy()
if isinstance(other, Transform):
result.matrix = self.matrix @ other.matrix
result.dmatrix = self.dmatrix @ other.matrix + self.matrix @ other.dmatrix
else:
result.matrix = self.matrix @ other
result.dmatrix = self.dmatrix @ other
return result
def is_unity(self):
return self.parameters == [1, 0, 0, 1, 0, 0]
def copy(self):
return deepcopy(self)
@staticmethod
def cast_image(im):
if not isinstance(im, sitk.Image):
im = sitk.GetImageFromArray(np.asarray(im))
return im
@staticmethod
def cast_array(im):
if isinstance(im, sitk.Image):
im = sitk.GetArrayFromImage(im)
return im
@property
def matrix(self):
return np.array(((*self.parameters[:2], self.parameters[4]),
(*self.parameters[2:4], self.parameters[5]),
(0, 0, 1)))
@matrix.setter
def matrix(self, value):
value = np.asarray(value)
self.parameters = [*value[0, :2], *value[1, :2], *value[:2, 2]]
@property
def dmatrix(self):
return np.array(((*self.dparameters[:2], self.dparameters[4]),
(*self.dparameters[2:4], self.dparameters[5]),
(0, 0, 0)))
@dmatrix.setter
def dmatrix(self, value):
value = np.asarray(value)
self.dparameters = [*value[0, :2], *value[1, :2], *value[:2, 2]]
@property
def parameters(self):
if self.transform is not None:
return list(self.transform.GetParameters())
@parameters.setter
def parameters(self, value):
if self.transform is not None:
value = np.asarray(value)
self.transform.SetParameters(value.tolist())
@property
def origin(self):
if self.transform is not None:
return self.transform.GetFixedParameters()
@origin.setter
def origin(self, value):
if self.transform is not None:
value = np.asarray(value)
self.transform.SetFixedParameters(value.tolist())
@property
def inverse(self):
if self.is_unity():
return self
if self._last is None or self._last != self.asdict():
self._last = self.asdict()
self._inverse = Transform.from_dict(self.asdict())
self._inverse.transform = self._inverse.transform.GetInverse()
self._inverse._last = self._inverse.asdict()
self._inverse._inverse = self
return self._inverse
def adapt(self, origin, shape):
self.origin -= np.array(origin) + (self.shape - np.array(shape)[:2]) / 2
self.shape = shape[:2]
def asdict(self):
return {'CenterOfRotationPoint': self.origin, 'Size': self.shape, 'TransformParameters': self.parameters,
'dTransformParameters': np.nan_to_num(self.dparameters, nan=1e99).tolist()}
def frame(self, im, default=0):
if self.is_unity():
return im
else:
if sitk is None:
raise ImportError('SimpleElastix is not installed: '
'https://simpleelastix.readthedocs.io/GettingStarted.html')
dtype = im.dtype
im = im.astype('float')
intp = sitk.sitkBSpline if np.issubdtype(dtype, np.floating) else sitk.sitkNearestNeighbor
return self.cast_array(sitk.Resample(self.cast_image(im), self.transform, intp, default)).astype(dtype)
def coords(self, array, columns=None):
""" Transform coordinates in 2 column numpy array,
or in pandas DataFrame or Series objects in columns ['x', 'y']
"""
if self.is_unity():
return array.copy()
elif DataFrame is not None and isinstance(array, (DataFrame, Series)):
columns = columns or ['x', 'y']
array = array.copy()
if isinstance(array, DataFrame):
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))
elif isinstance(array, Series):
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))[0]
return array
else: # somehow we need to use the inverse here to get the same effect as when using self.frame
return np.array([self.inverse.transform.TransformPoint(i.tolist()) for i in np.asarray(array)])
def save(self, file):
""" save the parameters of the transform calculated
with affine_registration to a yaml file
"""
if not file[-3:] == 'yml':
file += '.yml'
with open(file, 'w') as f:
yaml.safe_dump(self.asdict(), f, default_flow_style=None)

View File

@@ -1,54 +1,15 @@
[tool.poetry]
name = "ndbioimage"
version = "2025.1.2"
description = "Bio image reading, metadata and some affine registration."
authors = ["W. Pomp <w.pomp@nki.nl>"]
license = "GPLv3"
readme = "README.md"
keywords = ["bioformats", "imread", "numpy", "metadata"]
include = ["transform.txt"]
repository = "https://github.com/wimpomp/ndbioimage"
exclude = ["ndbioimage/jars"]
[tool.poetry.dependencies]
python = "^3.10"
numpy = ">=1.20.0"
pandas = "*"
tifffile = "*"
czifile = "2019.7.2"
tiffwrite = ">=2024.12.1"
ome-types = ">=0.4.0"
pint = "*"
tqdm = "*"
lxml = "*"
pyyaml = "*"
parfor = ">=2025.1.0"
JPype1 = "*"
SimpleITK-SimpleElastix = [
{ version = "*", python = "<3.12" },
{ version = "*", python = ">=3.12", markers = "sys_platform != 'darwin'" },
{ version = "*", python = ">=3.12", markers = "platform_machine == 'aarch64'" },
]
scikit-image = "*"
imagecodecs = "*"
xsdata = "^23" # until pydantic is up-to-date
matplotlib = { version = "*", optional = true }
scikit-video = { version = "*", optional = true }
pytest = { version = "*", optional = true }
[tool.poetry.extras]
test = ["pytest"]
write = ["matplotlib", "scikit-video"]
[tool.poetry.scripts]
ndbioimage = "ndbioimage:main"
[tool.pytest.ini_options]
filterwarnings = ["ignore:::(colorcet)"]
[tool.isort]
line_length = 119
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
requires = ["maturin>=1.8,<2.0"]
build-backend = "maturin"
[project]
name = "ndbioimage_rs"
requires-python = ">=3.8"
classifiers = [
"Programming Language :: Rust",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dynamic = ["version"]
[tool.maturin]
features = ["pyo3/extension-module"]

142
src/bioformats.rs Normal file
View File

@@ -0,0 +1,142 @@
use anyhow::Result;
use j4rs::{Instance, InvocationArg, Jvm, JvmBuilder};
use std::cell::OnceCell;
use std::rc::Rc;
thread_local! {
static JVM: OnceCell<Rc<Jvm>> = const { OnceCell::new() }
}
/// Ensure 1 jvm per thread
fn jvm() -> Rc<Jvm> {
JVM.with(|cell| {
cell.get_or_init(move || Rc::new(JvmBuilder::new().build().expect("Failed to build JVM")))
.clone()
})
}
macro_rules! method_return {
($R:ty$(|c)?) => { Result<$R> };
() => { Result<()> };
}
macro_rules! method_arg {
($n:tt: $t:ty|p) => {
InvocationArg::try_from($n)?.into_primitive()?
};
($n:tt: $t:ty) => {
InvocationArg::try_from($n)?
};
}
macro_rules! method {
($name:ident, $method:expr $(,[$($n:tt: $t:ty$(|$p:tt)?),*])? $(=> $tt:ty$(|$c:tt)?)?) => {
pub fn $name(&self, $($($n: $t),*)?) -> method_return!($($tt)?) {
let args: Vec<InvocationArg> = vec![$($( method_arg!($n:$t$(|$p)?) ),*)?];
let _result = jvm().invoke(&self.0, $method, &args)?;
macro_rules! method_result {
($R:ty|c) => {
Ok(jvm().to_rust(_result)?)
};
($R:ty|d) => {
Ok(jvm().to_rust_deserialized(_result)?)
};
($R:ty) => {
Ok(_result)
};
() => {
Ok(())
};
}
method_result!($($tt$(|$c)?)?)
}
};
}
pub struct DebugTools;
impl DebugTools {
pub fn set_root_level(level: &str) -> Result<()> {
jvm().invoke_static(
"loci.common.DebugTools",
"setRootLevel",
&[InvocationArg::try_from(level)?],
)?;
Ok(())
}
}
pub struct ChannelSeparator(Instance);
impl ChannelSeparator {
pub fn new(image_reader: &ImageReader) -> Result<Self> {
let jvm = jvm();
let channel_separator = jvm.create_instance(
"loci.formats.ChannelSeparator",
&[InvocationArg::from(jvm.clone_instance(&image_reader.0)?)],
)?;
Ok(ChannelSeparator(channel_separator))
}
pub fn open_bytes(&self, index: i32) -> Result<Vec<u8>> {
let bi8 = self.open_bi8(index)?;
Ok(unsafe { std::mem::transmute::<Vec<i8>, Vec<u8>>(bi8) })
}
method!(open_bi8, "openBytes", [index: i32|p] => Vec<i8>|c);
method!(get_index, "getIndex", [z: i32|p, c: i32|p, t: i32|p] => i32|c);
}
pub struct ImageReader(Instance);
impl Drop for ImageReader {
fn drop(&mut self) {
self.close().unwrap()
}
}
impl ImageReader {
pub fn new() -> Result<Self> {
let reader = jvm().create_instance("loci.formats.ImageReader", InvocationArg::empty())?;
Ok(ImageReader(reader))
}
pub fn open_bytes(&self, index: i32) -> Result<Vec<u8>> {
let bi8 = self.open_bi8(index)?;
Ok(unsafe { std::mem::transmute::<Vec<i8>, Vec<u8>>(bi8) })
}
method!(set_metadata_store, "setMetadataStore", [ome_data: Instance]);
method!(set_id, "setId", [id: &str]);
method!(set_series, "setSeries", [series: i32|p]);
method!(open_bi8, "openBytes", [index: i32|p] => Vec<i8>|c);
method!(get_size_x, "getSizeX" => i32|c);
method!(get_size_y, "getSizeY" => i32|c);
method!(get_size_c, "getSizeC" => i32|c);
method!(get_size_t, "getSizeT" => i32|c);
method!(get_size_z, "getSizeZ" => i32|c);
method!(get_pixel_type, "getPixelType" => i32|c);
method!(is_little_endian, "isLittleEndian" => bool|c);
method!(is_rgb, "isRGB" => bool|c);
method!(is_interleaved, "isInterleaved" => bool|c);
method!(get_index, "getIndex", [z: i32|p, c: i32|p, t: i32|p] => i32|c);
method!(get_rgb_channel_count, "getRGBChannelCount" => i32|c);
method!(is_indexed, "isIndexed" => bool|c);
method!(get_8bit_lookup_table, "get8BitLookupTable" => Instance);
method!(get_16bit_lookup_table, "get16BitLookupTable" => Instance);
method!(close, "close");
}
pub struct MetadataTools(Instance);
impl MetadataTools {
pub fn new() -> Result<Self> {
let meta_data_tools =
jvm().create_instance("loci.formats.MetadataTools", InvocationArg::empty())?;
Ok(MetadataTools(meta_data_tools))
}
method!(create_ome_xml_metadata, "createOMEXMLMetadata" => Instance);
}

366
src/lib.rs Normal file
View File

@@ -0,0 +1,366 @@
mod bioformats;
#[cfg(feature = "python")]
mod py;
use anyhow::{anyhow, Result};
use bioformats::{DebugTools, ImageReader, MetadataTools};
use ndarray::Array2;
use num::{FromPrimitive, Zero};
use std::any::type_name;
use std::fmt::Debug;
use std::path::{Path, PathBuf};
/// Pixel types (u)int(8/16/32) or float(32/64)
#[derive(Clone, Debug)]
pub enum PixelType {
INT8 = 0,
UINT8 = 1,
INT16 = 2,
UINT16 = 3,
INT32 = 4,
UINT32 = 5,
FLOAT = 6,
DOUBLE = 7,
}
impl TryFrom<i32> for PixelType {
type Error = anyhow::Error;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
0 => Ok(PixelType::INT8),
1 => Ok(PixelType::UINT8),
2 => Ok(PixelType::INT16),
3 => Ok(PixelType::UINT16),
4 => Ok(PixelType::INT32),
5 => Ok(PixelType::UINT32),
6 => Ok(PixelType::FLOAT),
7 => Ok(PixelType::DOUBLE),
_ => Err(anyhow::anyhow!("Unknown pixel type {}", value)),
}
}
}
/// Struct containing frame data in one of eight pixel types. Cast to Array2<T> using try_into.
#[derive(Clone, Debug)]
pub enum Frame {
INT8(Array2<i8>),
UINT8(Array2<u8>),
INT16(Array2<i16>),
UINT16(Array2<u16>),
INT32(Array2<i32>),
UINT32(Array2<u32>),
FLOAT(Array2<f32>),
DOUBLE(Array2<f64>),
}
macro_rules! impl_frame_cast {
($t:tt, $s:ident) => {
impl From<Array2<$t>> for Frame {
fn from(value: Array2<$t>) -> Self {
Frame::$s(value)
}
}
};
}
impl_frame_cast!(i8, INT8);
impl_frame_cast!(u8, UINT8);
impl_frame_cast!(i16, INT16);
impl_frame_cast!(u16, UINT16);
impl_frame_cast!(i32, INT32);
impl_frame_cast!(u32, UINT32);
impl_frame_cast!(f32, FLOAT);
impl_frame_cast!(f64, DOUBLE);
impl<T> TryInto<Array2<T>> for Frame
where
T: FromPrimitive + Zero + 'static,
{
type Error = anyhow::Error;
fn try_into(self) -> std::result::Result<Array2<T>, Self::Error> {
let mut err = Ok(());
let arr = match self {
Frame::INT8(v) => v.mapv_into_any(|x| {
T::from_i8(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::UINT8(v) => v.mapv_into_any(|x| {
T::from_u8(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::INT16(v) => v.mapv_into_any(|x| {
T::from_i16(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::UINT16(v) => v.mapv_into_any(|x| {
T::from_u16(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::INT32(v) => v.mapv_into_any(|x| {
T::from_i32(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::UINT32(v) => v.mapv_into_any(|x| {
T::from_u32(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::FLOAT(v) => v.mapv_into_any(|x| {
T::from_f32(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
Frame::DOUBLE(v) => v.mapv_into_any(|x| {
T::from_f64(x).unwrap_or_else(|| {
err = Err(anyhow!("cannot convert {} into {}", x, type_name::<T>()));
T::zero()
})
}),
};
match err {
Err(err) => Err(err),
Ok(()) => Ok(arr),
}
}
}
/// Reader interface to file. Use get_frame to get data.
pub struct Reader {
image_reader: ImageReader,
/// path to file
pub path: PathBuf,
/// which (if more) than 1 of the series in the file to open
pub series: i32,
/// size x (horizontal)
pub size_x: usize,
/// size y (vertical)
pub size_y: usize,
/// size c (# channels)
pub size_c: usize,
/// size z (# slices)
pub size_z: usize,
/// size t (time/frames)
pub size_t: usize,
/// pixel type ((u)int(8/16/32) or float(32/64))
pub pixel_type: PixelType,
little_endian: bool,
}
impl Clone for Reader {
fn clone(&self) -> Self {
Reader::new(&self.path, self.series).unwrap()
}
}
impl Debug for Reader {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Reader")
.field("path", &self.path)
.field("series", &self.series)
.field("size_x", &self.size_x)
.field("size_y", &self.size_y)
.field("size_c", &self.size_c)
.field("size_z", &self.size_z)
.field("size_t", &self.size_t)
.field("pixel_type", &self.pixel_type)
.field("little_endian", &self.little_endian)
.finish()
}
}
impl Reader {
/// Create new reader for image file at path.
pub fn new(path: &Path, series: i32) -> Result<Self> {
DebugTools::set_root_level("ERROR")?;
let reader = ImageReader::new()?;
let meta_data_tools = MetadataTools::new()?;
let ome_meta = meta_data_tools.create_ome_xml_metadata()?;
reader.set_metadata_store(ome_meta)?;
reader.set_id(path.to_str().unwrap())?;
reader.set_series(series)?;
let size_x = reader.get_size_x()?;
let size_y = reader.get_size_y()?;
let size_c = reader.get_size_c()?;
let size_z = reader.get_size_z()?;
let size_t = reader.get_size_t()?;
let pixel_type = PixelType::try_from(reader.get_pixel_type()?)?;
let little_endian = reader.is_little_endian()?;
Ok(Reader {
image_reader: reader,
path: PathBuf::from(path),
series,
size_x: size_x as usize,
size_y: size_y as usize,
size_c: size_c as usize,
size_z: size_z as usize,
size_t: size_t as usize,
pixel_type,
little_endian,
})
}
fn deinterleave(&self, bytes: Vec<u8>, channel: usize) -> Result<Vec<u8>> {
let chunk_size = match self.pixel_type {
PixelType::INT8 => 1,
PixelType::UINT8 => 1,
PixelType::INT16 => 2,
PixelType::UINT16 => 2,
PixelType::INT32 => 4,
PixelType::UINT32 => 4,
PixelType::FLOAT => 4,
PixelType::DOUBLE => 8,
};
Ok(bytes
.chunks(chunk_size)
.skip(channel)
.step_by(self.size_c)
.flat_map(|a| a.to_vec())
.collect())
}
/// Retrieve fame at channel c, slize z and time t.
pub fn get_frame(&self, c: usize, z: usize, t: usize) -> Result<Frame> {
let bytes = if self.image_reader.is_rgb()? & self.image_reader.is_interleaved()? {
let index = self.image_reader.get_index(z as i32, 0, t as i32)?;
self.deinterleave(self.image_reader.open_bytes(index)?, c)?
} else if self.image_reader.get_rgb_channel_count()? > 1 {
let channel_separator = bioformats::ChannelSeparator::new(&self.image_reader)?;
let index = channel_separator.get_index(z as i32, c as i32, t as i32)?;
channel_separator.open_bytes(index)?
} else if self.image_reader.is_indexed()? {
let index = self.image_reader.get_index(z as i32, 0, t as i32)?;
self.image_reader.open_bytes(index)?
// TODO: apply LUT
// let _bytes_lut = match self.pixel_type {
// PixelType::INT8 | PixelType::UINT8 => {
// let _lut = self.image_reader.get_8bit_lookup_table()?;
// }
// PixelType::INT16 | PixelType::UINT16 => {
// let _lut = self.image_reader.get_16bit_lookup_table()?;
// }
// _ => {}
// };
} else {
let index = self.image_reader.get_index(z as i32, c as i32, t as i32)?;
self.image_reader.open_bytes(index)?
};
self.bytes_to_frame(bytes)
}
fn bytes_to_frame(&self, bytes: Vec<u8>) -> Result<Frame> {
macro_rules! get_frame {
($t:tt, <$n:expr) => {
Ok(Frame::from(Array2::from_shape_vec(
(self.size_y, self.size_x),
bytes
.chunks($n)
.map(|x| $t::from_le_bytes(x.try_into().unwrap()))
.collect(),
)?))
};
($t:tt, >$n:expr) => {
Ok(Frame::from(Array2::from_shape_vec(
(self.size_y, self.size_x),
bytes
.chunks($n)
.map(|x| $t::from_be_bytes(x.try_into().unwrap()))
.collect(),
)?))
};
}
match (&self.pixel_type, self.little_endian) {
(PixelType::INT8, true) => get_frame!(i8, <1),
(PixelType::UINT8, true) => get_frame!(u8, <1),
(PixelType::INT16, true) => get_frame!(i16, <2),
(PixelType::UINT16, true) => get_frame!(u16, <2),
(PixelType::INT32, true) => get_frame!(i32, <4),
(PixelType::UINT32, true) => get_frame!(u32, <4),
(PixelType::FLOAT, true) => get_frame!(f32, <4),
(PixelType::DOUBLE, true) => get_frame!(f64, <8),
(PixelType::INT8, false) => get_frame!(i8, >1),
(PixelType::UINT8, false) => get_frame!(u8, >1),
(PixelType::INT16, false) => get_frame!(i16, >2),
(PixelType::UINT16, false) => get_frame!(u16, >2),
(PixelType::INT32, false) => get_frame!(i32, >4),
(PixelType::UINT32, false) => get_frame!(u32, >4),
(PixelType::FLOAT, false) => get_frame!(f32, >4),
(PixelType::DOUBLE, false) => get_frame!(f64, >8),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rayon::prelude::*;
fn open(file: &str) -> Result<Reader> {
let path = std::env::current_dir()?.join("tests").join("files").join(file);
Reader::new(&path, 0)
}
fn get_pixel_type(file: &str) -> Result<String> {
let reader = open(file)?;
Ok(format!(
"file: {}, pixel type: {:?}",
file, reader.pixel_type
))
}
fn get_frame(file: &str) -> Result<Frame> {
let reader = open(file)?;
reader.get_frame(0, 0, 0)
}
#[test]
fn read_ser() -> Result<()> {
let file = "Experiment-2029.czi";
let reader = open(file)?;
println!("size: {}, {}", reader.size_y, reader.size_y);
let frame = reader.get_frame(0, 0, 0)?;
if let Ok(arr) = <Frame as TryInto<Array2<i8>>>::try_into(frame) {
println!("{:?}", arr);
} else {
println!("could not convert Frame to Array<i8>");
}
Ok(())
}
#[test]
fn read_par() -> Result<()> {
let files = vec!["Experiment-2029.czi", "test.tif"];
let pixel_type = files
.into_par_iter()
.map(|file| get_pixel_type(file).unwrap())
.collect::<Vec<_>>();
println!("{:?}", pixel_type);
Ok(())
}
#[test]
fn read_frame_par() -> Result<()> {
let files = vec!["Experiment-2029.czi", "test.tif"];
let frames = files
.into_par_iter()
.map(|file| get_frame(file).unwrap())
.collect::<Vec<_>>();
println!("{:?}", frames);
Ok(())
}
}

14
src/py.rs Normal file
View File

@@ -0,0 +1,14 @@
use pyo3::prelude::*;
/// Formats the sum of two numbers as string.
#[pyfunction]
fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
Ok((a + b).to_string())
}
/// A Python module implemented in Rust.
#[pymodule]
fn ndbioimage_rs(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
Ok(())
}