Compare commits

22 Commits
main ... rs

Author SHA1 Message Date
Wim Pomp
d2c725440c - bump ome-metadata dependency
- add extract-ome cli subcommand
2026-01-04 17:03:14 +01:00
Wim Pomp
b1f8dd998b - test ome reading from some example files 2026-01-04 15:13:35 +01:00
Wim Pomp
8db873c425 - bump ome-metadata dependency
- make Reader::new error instead of panic if file cannot be read
2026-01-04 15:09:23 +01:00
Wim Pomp
3c14168878 - implement custom error types
- less restrictive dependency versions
- some extra features and bugfixes for movie writing
- make python tests work again
2026-01-04 13:59:57 +01:00
Wim Pomp
3dc8e6af04 - bump bioformats to 8.3.0
- rust: command line binary, save as mp4, save as tiff, ome metadata, more methods for View, bugfixes, less unsafe code
- python: ome as dict
2025-08-21 19:45:02 +02:00
Wim Pomp
24af64ac7e - make modules public 2025-04-27 20:14:57 +02:00
Wim Pomp
5195ccfcb5 - implement sliced views, including min, max, sum and mean operations 2025-04-27 20:07:49 +02:00
Wim Pomp
87e9715f97 - remove all readers but bioformats
- open folder with sequence
2025-02-19 21:28:59 +01:00
Wim Pomp
2247a994be - change to MIT license
- thread local image reader instances
- add keywords and categories
- add python dependencies
- README
2025-02-17 19:53:34 +01:00
Wim Pomp
e5c6361086 Merge remote-tracking branch 'origin/rs' into rs 2025-02-16 23:03:55 +01:00
Wim Pomp
83ea9722f6 - some workarounds to get jars and shared libs in the right place for python
- add most ndbioimage python code and use rs code as bfread
2025-02-16 23:03:48 +01:00
Wim Pomp
372b816f93 - some workarounds to get jars and shared libs in the right place for python
- add most ndbioimage python code and use rs code as bfread
2025-02-16 23:02:40 +01:00
Wim Pomp
fefdd6448b - added ome_xml method
- some pyo3 methods
2025-02-08 20:22:45 +01:00
Wim Pomp
a3dfc075a8 - do not try java things when building for docs.rs 2025-02-03 16:27:24 +01:00
Wim Pomp
15eae99272 - do not try java things when building for docs.rs 2025-02-03 16:22:49 +01:00
wimpomp
1a8c3f22e5 Delete Cargo.lock 2025-02-03 16:21:58 +01:00
Wim Pomp
b612d33a35 - do not try java things when building for docs.rs 2025-02-03 16:17:14 +01:00
Wim Pomp
3c22cf743a - rename ndbioimage 2025-02-03 15:52:03 +01:00
Wim Pomp
dd5e2d393f - exclude tests on publish 2025-02-03 15:48:27 +01:00
Wim Pomp
45aa72d14c - exclude tests on publish 2025-02-03 15:45:29 +01:00
Wim Pomp
1aad79b441 - start rust rewrite 2025-02-03 15:41:25 +01:00
Wim Pomp
3db6dc8ee1 - start rust rewrite 2025-02-03 15:33:32 +01:00
39 changed files with 6336 additions and 3581 deletions

View File

@@ -1,22 +0,0 @@
name: PyTest
on: [workflow_call, push, pull_request]
jobs:
pytest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
pip install .[test]
- name: Test with pytest
run: pytest

85
.gitignore vendored
View File

@@ -1,12 +1,75 @@
._*
*.pyc
/build/
*.egg-info
/venv/
.idea
/.pytest_cache/
/ndbioimage/_version.py
/ndbioimage/jars
/target
/Cargo.lock
# Byte-compiled / optimized / DLL files
__pycache__/
.pytest_cache/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
.Python
.venv/
env/
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
include/
man/
venv/
*.egg-info/
.installed.cfg
*.egg
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
pip-selfcheck.json
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Rope
.ropeproject
# Django stuff:
*.log
*.pot
.DS_Store
# Sphinx documentation
docs/_build/
# PyCharm
.idea/
# VSCode
.vscode/
# Pyenv
.python-version
/tests/files/*
/poetry.lock
/dist/

70
Cargo.toml Normal file
View File

@@ -0,0 +1,70 @@
[package]
name = "ndbioimage"
version = "2026.1.2"
edition = "2024"
rust-version = "1.85.1"
authors = ["Wim Pomp <w.pomp@nki.nl>"]
license = "MIT"
description = "Read bio image formats using the bio-formats java package."
homepage = "https://github.com/wimpomp/ndbioimage/tree/rs"
repository = "https://github.com/wimpomp/ndbioimage/tree/rs"
documentation = "https://docs.rs/ndbioimage"
readme = "README.md"
keywords = ["bioformats", "imread", "ndarray", "metadata"]
categories = ["multimedia::images", "science"]
exclude = ["/tests"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "ndbioimage"
crate-type = ["cdylib", "rlib"]
[dependencies]
clap = { version = "4", features = ["derive"] }
ffmpeg-sidecar = { version = "2", optional = true }
itertools = "0.14"
indexmap = { version = "2", features = ["serde"] }
indicatif = { version = "0.18", features = ["rayon"], optional = true }
j4rs = "0.24"
ndarray = { version = "0.17", features = ["serde"] }
num = "0.4"
numpy = { version = "0.27", optional = true }
ordered-float = "5"
rayon = { version = "1", optional = true }
serde = { version = "1", features = ["rc"] }
serde_json = { version = "1", optional = true }
serde_with = "3"
tiffwrite = { version = "2025.12.0", optional = true}
thread_local = "1"
ome-metadata = "0.4"
lazy_static = "1"
thiserror = "2"
[dependencies.pyo3]
version = "0.27"
features = ["extension-module", "abi3-py310", "generate-import-lib", "anyhow"]
optional = true
[dev-dependencies]
downloader = "0.2"
rayon = "1"
regex = "1"
reqwest = { version = "0.13", features = ["blocking"] }
[build-dependencies]
j4rs = "0.24"
ffmpeg-sidecar = "2"
retry = "2"
[features]
# Enables formats for which code in bioformats with a GPL license is needed
gpl-formats = []
# Enables python ffi using pyO3
python = ["dep:pyo3", "dep:numpy", "dep:serde_json"]
# Enables writing as tiff
tiff = ["dep:tiffwrite", "dep:indicatif", "dep:rayon"]
# Enables writing as mp4 using ffmpeg
movie = ["dep:ffmpeg-sidecar"]
[package.metadata.docs.rs]
features = ["gpl-formats", "tiff", "movie"]

693
LICENSE
View File

@@ -1,674 +1,19 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
Copyright (c) 2025 Wim Pomp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,6 +1,9 @@
# ndbioimage
[![Pytest](https://github.com/wimpomp/ndbioimage/actions/workflows/pytest.yml/badge.svg)](https://github.com/wimpomp/ndbioimage/actions/workflows/pytest.yml)
# ndbioimage
## Work in progress
Rust rewrite of python version. Read bio image formats using the bio-formats java package.
[https://www.openmicroscopy.org/bio-formats/](https://www.openmicroscopy.org/bio-formats/)
Exposes (bio) images as a numpy ndarray-like object, but without loading the whole
image into memory, reading from the file only when needed. Some metadata is read
@@ -65,30 +68,37 @@ with Imread('image_file.tif', axes='cztyx') as im:
array = np.asarray(im[0, 0])
```
### Rust
```
use ndarray::Array2;
use ndbioimage::Reader;
let path = "/path/to/file";
let reader = Reader::new(&path, 0)?;
println!("size: {}, {}", reader.size_y, reader.size_y);
let frame = reader.get_frame(0, 0, 0).unwrap();
if let Ok(arr) = <Frame as TryInto<Array2<i8>>>::try_into(frame) {
println!("{:?}", arr);
} else {
println!("could not convert Frame to Array<i8>");
}
let xml = reader.get_ome_xml().unwrap();
println!("{}", xml);
```
```
use ndarray::Array2;
use ndbioimage::Reader;
let path = "/path/to/file";
let reader = Reader::new(&path, 0)?;
let view = reader.view();
let view = view.max_proj(3)?;
let array = view.as_array::<u16>()?
```
### Command line
```ndbioimage --help```: show help
```ndbioimage image```: show metadata about image
```ndbioimage image -w {name}.tif -r```: copy image into image.tif (replacing {name} with image), while registering channels
```ndbioimage image -w image.mp4 -C cyan lime red``` copy image into image.mp4 (z will be max projected), make channel colors cyan lime and red
## Adding more formats
Readers for image formats subclass AbstractReader. When an image reader is imported, Imread will
automatically recognize it and use it to open the appropriate file format. Image readers
are required to implement the following methods:
- staticmethod _can_open(path): return True if path can be opened by this reader
- \_\_frame__(self, c, z, t): return the frame at channel=c, z-slice=z, time=t from the file
Optional methods:
- get_ome: reads metadata from file and adds them to an OME object imported
from the ome-types library
- open(self): maybe open some file handle
- close(self): close any file handles
Optional fields:
- priority (int): Imread will try readers with a lower number first, default: 99
- do_not_pickle (strings): any attributes that should not be included when the object is pickled,
for example: any file handles
# TODO
- more image formats

131
build.rs Normal file
View File

@@ -0,0 +1,131 @@
#[cfg(not(feature = "python"))]
use j4rs::{JvmBuilder, MavenArtifact, MavenArtifactRepo, MavenSettings, errors::J4RsError};
#[cfg(not(feature = "python"))]
use retry::{delay, delay::Exponential, retry};
use std::error::Error;
#[cfg(not(feature = "python"))]
use std::fmt::Display;
#[cfg(not(feature = "python"))]
use std::fmt::Formatter;
#[cfg(not(feature = "python"))]
use std::path::PathBuf;
#[cfg(not(feature = "python"))]
use std::{env, fs};
#[cfg(feature = "python")]
use j4rs::Jvm;
#[cfg(feature = "movie")]
use ffmpeg_sidecar::download::auto_download;
#[cfg(not(feature = "python"))]
#[derive(Clone, Debug)]
enum BuildError {
BioFormatsNotDownloaded,
}
#[cfg(not(feature = "python"))]
impl Display for BuildError {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), std::fmt::Error> {
write!(fmt, "Bioformats package not downloaded")
}
}
#[cfg(not(feature = "python"))]
impl Error for BuildError {}
fn main() -> Result<(), Box<dyn Error>> {
println!("cargo::rerun-if-changed=build.rs");
if std::env::var("DOCS_RS").is_err() {
#[cfg(feature = "movie")]
auto_download()?;
#[cfg(not(feature = "python"))]
{
retry(
Exponential::from_millis(1000).map(delay::jitter).take(4),
deploy_java_artifacts,
)?;
let path = default_jassets_path()?;
if !path.join("bioformats_package-8.3.0.jar").exists() {
Err(BuildError::BioFormatsNotDownloaded)?;
}
}
#[cfg(feature = "python")]
{
let py_src_path = std::env::current_dir()?.join("py").join("ndbioimage");
let py_jassets_path = py_src_path.join("jassets");
let py_deps_path = py_src_path.join("deps");
if py_jassets_path.exists() {
std::fs::remove_dir_all(&py_jassets_path)?;
}
if py_deps_path.exists() {
std::fs::remove_dir_all(&py_deps_path)?;
}
Jvm::copy_j4rs_libs_under(py_src_path.to_str().unwrap())?;
// rename else maturin will ignore them
for file in std::fs::read_dir(&py_deps_path)? {
let f = file?.path().to_str().unwrap().to_string();
if !f.ends_with("_") {
std::fs::rename(&f, std::format!("{f}_"))?;
}
}
// remove so we don't include too much accidentally
for file in std::fs::read_dir(&py_jassets_path)? {
let f = file?.path();
if !f.file_name().unwrap().to_str().unwrap().starts_with("j4rs") {
std::fs::remove_file(&f)?;
}
}
}
}
Ok(())
}
#[cfg(not(feature = "python"))]
fn default_jassets_path() -> Result<PathBuf, J4RsError> {
let is_build_script = env::var("OUT_DIR").is_ok();
let mut start_path = if is_build_script {
PathBuf::from(env::var("OUT_DIR")?)
} else {
env::current_exe()?
};
start_path = fs::canonicalize(start_path)?;
while start_path.pop() {
for entry in std::fs::read_dir(&start_path)? {
let path = entry?.path();
if path.file_name().map(|x| x == "jassets").unwrap_or(false) {
return Ok(path);
}
}
}
Err(J4RsError::GeneralError(
"Can not find jassets directory".to_owned(),
))
}
#[cfg(not(feature = "python"))]
fn deploy_java_artifacts() -> Result<(), J4RsError> {
let jvm = JvmBuilder::new()
.skip_setting_native_lib()
.with_maven_settings(MavenSettings::new(vec![MavenArtifactRepo::from(
"openmicroscopy::https://artifacts.openmicroscopy.org/artifactory/ome.releases",
)]))
.build()?;
jvm.deploy_artifact(&MavenArtifact::from("ome:bioformats_package:8.3.0"))?;
#[cfg(feature = "gpl-formats")]
jvm.deploy_artifact(&MavenArtifact::from("ome:formats-gpl:8.3.0"))?;
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,77 +0,0 @@
from pathlib import Path
from urllib import request
class JVMException(Exception):
pass
try:
class JVM:
""" There can be only one java virtual machine per python process,
so this is a singleton class to manage the jvm.
"""
_instance = None
vm_started = False
vm_killed = False
success = True
def __new__(cls, *args):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, jars=None):
if not self.vm_started and not self.vm_killed:
try:
jar_path = Path(__file__).parent / 'jars'
if jars is None:
jars = {}
for jar, src in jars.items():
if not (jar_path / jar).exists():
JVM.download(src, jar_path / jar)
classpath = [str(jar_path / jar) for jar in jars.keys()]
import jpype
jpype.startJVM(classpath=classpath)
except Exception: # noqa
self.vm_started = False
else:
self.vm_started = True
try:
import jpype.imports
from loci.common import DebugTools # noqa
from loci.formats import ChannelSeparator # noqa
from loci.formats import FormatTools # noqa
from loci.formats import ImageReader # noqa
from loci.formats import MetadataTools # noqa
DebugTools.setRootLevel("ERROR")
self.image_reader = ImageReader
self.channel_separator = ChannelSeparator
self.format_tools = FormatTools
self.metadata_tools = MetadataTools
except Exception: # noqa
pass
if self.vm_killed:
raise Exception('The JVM was killed before, and cannot be restarted in this Python process.')
@staticmethod
def download(src, dest):
print(f'Downloading {dest.name} to {dest}.')
dest.parent.mkdir(exist_ok=True)
dest.write_bytes(request.urlopen(src).read())
@classmethod
def kill_vm(cls):
self = cls._instance
if self is not None and self.vm_started and not self.vm_killed:
import jpype
jpype.shutdownJVM() # noqa
self.vm_started = False
self.vm_killed = True
except ImportError:
JVM = None

View File

@@ -1 +0,0 @@
__all__ = 'bfread', 'cziread', 'fijiread', 'ndread', 'seqread', 'tifread', 'metaseriesread'

View File

@@ -1,208 +0,0 @@
import multiprocessing
from abc import ABC
from multiprocessing import queues
from pathlib import Path
from traceback import format_exc
import numpy as np
from .. import JVM, AbstractReader, JVMException
jars = {'bioformats_package.jar': 'https://downloads.openmicroscopy.org/bio-formats/latest/artifacts/'
'bioformats_package.jar'}
class JVMReader:
def __init__(self, path: Path, series: int) -> None:
mp = multiprocessing.get_context('spawn')
self.path = path
self.series = series
self.queue_in = mp.Queue()
self.queue_out = mp.Queue()
self.done = mp.Event()
self.process = mp.Process(target=self.run)
self.process.start()
status, message = self.queue_out.get()
if status == 'status' and message == 'started':
self.is_alive = True
else:
raise JVMException(message)
def close(self) -> None:
if self.is_alive:
self.done.set()
while not self.queue_in.empty():
self.queue_in.get()
self.queue_in.close()
self.queue_in.join_thread()
while not self.queue_out.empty():
print(self.queue_out.get())
self.queue_out.close()
self.process.join()
self.process.close()
self.is_alive = False
def frame(self, c: int, z: int, t: int) -> np.ndarray:
self.queue_in.put((c, z, t))
status, message = self.queue_out.get()
if status == 'frame':
return message
else:
raise JVMException(message)
def run(self) -> None:
""" Read planes from the image reader file.
adapted from python-bioformats/bioformats/formatreader.py
"""
jvm = None
try:
jvm = JVM(jars)
reader = jvm.image_reader()
ome_meta = jvm.metadata_tools.createOMEXMLMetadata()
reader.setMetadataStore(ome_meta)
reader.setId(str(self.path))
reader.setSeries(self.series)
open_bytes_func = reader.openBytes
width, height = int(reader.getSizeX()), int(reader.getSizeY())
pixel_type = reader.getPixelType()
little_endian = reader.isLittleEndian()
if pixel_type == jvm.format_tools.INT8:
dtype = np.int8
elif pixel_type == jvm.format_tools.UINT8:
dtype = np.uint8
elif pixel_type == jvm.format_tools.UINT16:
dtype = '<u2' if little_endian else '>u2'
elif pixel_type == jvm.format_tools.INT16:
dtype = '<i2' if little_endian else '>i2'
elif pixel_type == jvm.format_tools.UINT32:
dtype = '<u4' if little_endian else '>u4'
elif pixel_type == jvm.format_tools.INT32:
dtype = '<i4' if little_endian else '>i4'
elif pixel_type == jvm.format_tools.FLOAT:
dtype = '<f4' if little_endian else '>f4'
elif pixel_type == jvm.format_tools.DOUBLE:
dtype = '<f8' if little_endian else '>f8'
else:
dtype = None
self.queue_out.put(('status', 'started'))
while not self.done.is_set():
try:
c, z, t = self.queue_in.get(True, 0.02)
if reader.isRGB() and reader.isInterleaved():
index = reader.getIndex(z, 0, t)
image = np.frombuffer(open_bytes_func(index), dtype)
image.shape = (height, width, reader.getSizeC())
if image.shape[2] > 3:
image = image[:, :, :3]
elif c is not None and reader.getRGBChannelCount() == 1:
index = reader.getIndex(z, c, t)
image = np.frombuffer(open_bytes_func(index), dtype)
image.shape = (height, width)
elif reader.getRGBChannelCount() > 1:
n_planes = reader.getRGBChannelCount()
rdr = jvm.channel_separator(reader)
planes = [np.frombuffer(rdr.openBytes(rdr.getIndex(z, i, t)), dtype) for i in range(n_planes)]
if len(planes) > 3:
planes = planes[:3]
elif len(planes) < 3:
# > 1 and < 3 means must be 2
# see issue #775
planes.append(np.zeros(planes[0].shape, planes[0].dtype))
image = np.dstack(planes)
image.shape = (height, width, 3)
del rdr
elif reader.getSizeC() > 1:
images = [np.frombuffer(open_bytes_func(reader.getIndex(z, i, t)), dtype)
for i in range(reader.getSizeC())]
image = np.dstack(images)
image.shape = (height, width, reader.getSizeC())
# if not channel_names is None:
# metadata = MetadataRetrieve(self.metadata)
# for i in range(self.reader.getSizeC()):
# index = self.reader.getIndex(z, 0, t)
# channel_name = metadata.getChannelName(index, i)
# if channel_name is None:
# channel_name = metadata.getChannelID(index, i)
# channel_names.append(channel_name)
elif reader.isIndexed():
#
# The image data is indexes into a color lookup-table
# But sometimes the table is the identity table and just generates
# a monochrome RGB image
#
index = reader.getIndex(z, 0, t)
image = np.frombuffer(open_bytes_func(index), dtype)
if pixel_type in (jvm.format_tools.INT16, jvm.format_tools.UINT16):
lut = reader.get16BitLookupTable()
if lut is not None:
lut = np.array(lut)
# lut = np.array(
# [env.get_short_array_elements(d)
# for d in env.get_object_array_elements(lut)]) \
# .transpose()
else:
lut = reader.get8BitLookupTable()
if lut is not None:
lut = np.array(lut)
# lut = np.array(
# [env.get_byte_array_elements(d)
# for d in env.get_object_array_elements(lut)]) \
# .transpose()
image.shape = (height, width)
if (lut is not None) and not np.all(lut == np.arange(lut.shape[0])[:, np.newaxis]):
image = lut[image, :]
else:
index = reader.getIndex(z, 0, t)
image = np.frombuffer(open_bytes_func(index), dtype)
image.shape = (height, width)
if image.ndim == 3:
self.queue_out.put(('frame', image[..., c]))
else:
self.queue_out.put(('frame', image))
except queues.Empty: # noqa
continue
except (Exception,):
self.queue_out.put(('error', format_exc()))
finally:
if jvm is not None:
jvm.kill_vm()
def can_open(path: Path) -> bool:
try:
jvm = JVM(jars)
reader = jvm.image_reader()
reader.getFormat(str(path))
return True
except (Exception,):
return False
finally:
jvm.kill_vm() # noqa
class Reader(AbstractReader, ABC):
""" This class is used as a last resort, when we don't have another way to open the file. We don't like it
because it requires the java vm.
"""
priority = 99 # panic and open with BioFormats
do_not_pickle = 'reader', 'key', 'jvm'
@staticmethod
def _can_open(path: Path) -> bool:
""" Use java BioFormats to make an ome metadata structure. """
with multiprocessing.get_context('spawn').Pool(1) as pool:
return pool.map(can_open, (path,))[0]
def open(self) -> None:
self.reader = JVMReader(self.path, self.series)
def __frame__(self, c: int, z: int, t: int) -> np.ndarray:
return self.reader.frame(c, z, t)
def close(self) -> None:
self.reader.close()

View File

@@ -1,606 +0,0 @@
import re
import warnings
from abc import ABC
from functools import cached_property
from io import BytesIO
from itertools import product
from pathlib import Path
from typing import Any, Callable, Optional, TypeVar
import czifile
import imagecodecs
import numpy as np
from lxml import etree
from ome_types import OME, model
from tifffile import repeat_nd
from .. import AbstractReader
try:
# TODO: use zoom from imagecodecs implementation when available
from scipy.ndimage.interpolation import zoom
except ImportError:
try:
from ndimage.interpolation import zoom
except ImportError:
zoom = None
Element = TypeVar('Element')
def zstd_decode(data: bytes) -> bytes: # noqa
""" decode zstd bytes, copied from BioFormats ZeissCZIReader """
def read_var_int(stream: BytesIO) -> int: # noqa
a = stream.read(1)[0]
if a & 128:
b = stream.read(1)[0]
if b & 128:
c = stream.read(1)[0]
return (c << 14) | ((b & 127) << 7) | (a & 127)
return (b << 7) | (a & 127)
return a & 255
try:
with BytesIO(data) as stream:
size_of_header = read_var_int(stream)
high_low_unpacking = False
while stream.tell() < size_of_header:
chunk_id = read_var_int(stream)
# only one chunk ID defined so far
if chunk_id == 1:
high_low_unpacking = (stream.read(1)[0] & 1) == 1
else:
raise ValueError(f'Invalid chunk id: {chunk_id}')
pointer = stream.tell()
except Exception: # noqa
high_low_unpacking = False
pointer = 0
decoded = imagecodecs.zstd_decode(data[pointer:])
if high_low_unpacking:
second_half = len(decoded) // 2
return bytes([decoded[second_half + i // 2] if i % 2 else decoded[i // 2] for i in range(len(decoded))])
else:
return decoded
def data(self, raw: bool = False, resize: bool = True, order: int = 0) -> np.ndarray:
"""Read image data from file and return as numpy array."""
DECOMPRESS = czifile.czifile.DECOMPRESS # noqa
DECOMPRESS[5] = imagecodecs.zstd_decode
DECOMPRESS[6] = zstd_decode
de = self.directory_entry
fh = self._fh
if raw:
with fh.lock:
fh.seek(self.data_offset)
data = fh.read(self.data_size) # noqa
return data
if de.compression:
# if de.compression not in DECOMPRESS:
# raise ValueError('compression unknown or not supported')
with fh.lock:
fh.seek(self.data_offset)
data = fh.read(self.data_size) # noqa
data = DECOMPRESS[de.compression](data) # noqa
if de.compression == 2:
# LZW
data = np.fromstring(data, de.dtype) # noqa
elif de.compression in (5, 6):
# ZSTD
data = np.frombuffer(data, de.dtype) # noqa
else:
dtype = np.dtype(de.dtype)
with fh.lock:
fh.seek(self.data_offset)
data = fh.read_array(dtype, self.data_size // dtype.itemsize) # noqa
data = data.reshape(de.stored_shape) # noqa
if de.compression != 4 and de.stored_shape[-1] in (3, 4):
if de.stored_shape[-1] == 3:
# BGR -> RGB
data = data[..., ::-1] # noqa
else:
# BGRA -> RGBA
tmp = data[..., 0].copy()
data[..., 0] = data[..., 2]
data[..., 2] = tmp
if de.stored_shape == de.shape or not resize:
return data
# sub / supersampling
factors = [j / i for i, j in zip(de.stored_shape, de.shape)]
factors = [(int(round(f)) if abs(f - round(f)) < 0.0001 else f)
for f in factors]
# use repeat if possible
if order == 0 and all(isinstance(f, int) for f in factors):
data = repeat_nd(data, factors).copy() # noqa
data.shape = de.shape
return data
# remove leading dimensions with size 1 for speed
shape = list(de.stored_shape)
i = 0
for s in shape:
if s != 1:
break
i += 1
shape = shape[i:]
factors = factors[i:]
data.shape = shape
# resize RGB components separately for speed
if zoom is None:
raise ImportError("cannot import 'zoom' from scipy or ndimage")
if shape[-1] in (3, 4) and factors[-1] == 1.0:
factors = factors[:-1]
old = data
data = np.empty(de.shape, de.dtype[-2:]) # noqa
for i in range(shape[-1]):
data[..., i] = zoom(old[..., i], zoom=factors, order=order)
else:
data = zoom(data, zoom=factors, order=order) # noqa
data.shape = de.shape
return data
# monkeypatch zstd into czifile
czifile.czifile.SubBlockSegment.data = data
class Reader(AbstractReader, ABC):
priority = 0
do_not_pickle = 'reader', 'filedict'
@staticmethod
def _can_open(path: Path) -> bool:
return isinstance(path, Path) and path.suffix == '.czi'
def open(self) -> None:
self.reader = czifile.CziFile(self.path)
filedict = {}
for directory_entry in self.reader.filtered_subblock_directory:
idx = self.get_index(directory_entry, self.reader.start)
if 'S' not in self.reader.axes or self.series in range(*idx[self.reader.axes.index('S')]):
for c in range(*idx[self.reader.axes.index('C')]):
for z in range(*idx[self.reader.axes.index('Z')]):
for t in range(*idx[self.reader.axes.index('T')]):
if (c, z, t) in filedict:
filedict[c, z, t].append(directory_entry)
else:
filedict[c, z, t] = [directory_entry]
if len(filedict) == 0:
raise FileNotFoundError(f'Series {self.series} not found in {self.path}.')
self.filedict = filedict # noqa
def close(self) -> None:
self.reader.close()
def get_ome(self) -> OME:
return OmeParse.get_ome(self.reader, self.filedict)
def __frame__(self, c: int = 0, z: int = 0, t: int = 0) -> np.ndarray:
f = np.zeros(self.base_shape['yx'], self.dtype)
if (c, z, t) in self.filedict:
directory_entries = self.filedict[c, z, t]
x_min = min([f.start[f.axes.index('X')] for f in directory_entries])
y_min = min([f.start[f.axes.index('Y')] for f in directory_entries])
xy_min = {'X': x_min, 'Y': y_min}
for directory_entry in directory_entries:
subblock = directory_entry.data_segment()
tile = subblock.data(resize=True, order=0)
axes_min = [xy_min.get(ax, 0) for ax in directory_entry.axes]
index = [slice(i - j - m, i - j + k)
for i, j, k, m in zip(directory_entry.start, self.reader.start, tile.shape, axes_min)]
index = tuple(index[self.reader.axes.index(i)] for i in 'YX')
f[index] = tile.squeeze()
return f
@staticmethod
def get_index(directory_entry: czifile.DirectoryEntryDV, start: tuple[int]) -> list[tuple[int, int]]:
return [(i - j, i - j + k) for i, j, k in zip(directory_entry.start, start, directory_entry.shape)]
class OmeParse:
size_x: int
size_y: int
size_c: int
size_z: int
size_t: int
nm = model.UnitsLength.NANOMETER
um = model.UnitsLength.MICROMETER
@classmethod
def get_ome(cls, reader: czifile.CziFile, filedict: dict[tuple[int, int, int], Any]) -> OME:
new = cls(reader, filedict)
new.parse()
return new.ome
def __init__(self, reader: czifile.CziFile, filedict: dict[tuple[int, int, int], Any]) -> None:
self.reader = reader
self.filedict = filedict
xml = reader.metadata()
self.attachments = {i.attachment_entry.name: i.attachment_entry.data_segment()
for i in reader.attachments()}
self.tree = etree.fromstring(xml)
self.metadata = self.tree.find('Metadata')
version = self.metadata.find('Version')
if version is not None:
self.version = version.text
else:
self.version = self.metadata.find('Experiment').attrib['Version']
self.ome = OME()
self.information = self.metadata.find('Information')
self.display_setting = self.metadata.find('DisplaySetting')
self.experiment = self.metadata.find('Experiment')
self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock')
self.instrument = self.information.find('Instrument')
self.image = self.information.find('Image')
if self.version == '1.0':
self.experiment = self.metadata.find('Experiment')
self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock')
self.multi_track_setup = self.acquisition_block.find('MultiTrackSetup')
else:
self.experiment = None
self.acquisition_block = None
self.multi_track_setup = None
def parse(self) -> None:
self.get_experimenters()
self.get_instruments()
self.get_detectors()
self.get_objectives()
self.get_tubelenses()
self.get_light_sources()
self.get_filters()
self.get_pixels()
self.get_channels()
self.get_planes()
self.get_annotations()
@staticmethod
def text(item: Optional[Element], default: str = "") -> str:
return default if item is None else item.text
@staticmethod
def def_list(item: Any) -> list[Any]:
return [] if item is None else item
@staticmethod
def try_default(fun: Callable[[Any, ...], Any] | type, default: Any = None, *args: Any, **kwargs: Any) -> Any:
try:
return fun(*args, **kwargs)
except Exception: # noqa
return default
def get_experimenters(self) -> None:
if self.version == '1.0':
self.ome.experimenters = [
model.Experimenter(id='Experimenter:0',
user_name=self.information.find('User').find('DisplayName').text)]
elif self.version in ('1.1', '1.2'):
self.ome.experimenters = [
model.Experimenter(id='Experimenter:0',
user_name=self.information.find('Document').find('UserName').text)]
def get_instruments(self) -> None:
if self.version == '1.0':
self.ome.instruments.append(model.Instrument(id=self.instrument.attrib['Id']))
elif self.version in ('1.1', '1.2'):
for _ in self.instrument.find('Microscopes'):
self.ome.instruments.append(model.Instrument(id='Instrument:0'))
def get_detectors(self) -> None:
if self.version == '1.0':
for detector in self.instrument.find('Detectors'):
try:
detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "")
except ValueError:
detector_type = model.Detector_Type.OTHER
self.ome.instruments[0].detectors.append(
model.Detector(
id=detector.attrib['Id'], model=self.text(detector.find('Manufacturer').find('Model')),
amplification_gain=float(self.text(detector.find('AmplificationGain'))),
gain=float(self.text(detector.find('Gain'))), zoom=float(self.text(detector.find('Zoom'))),
type=detector_type
))
elif self.version in ('1.1', '1.2'):
for detector in self.instrument.find('Detectors'):
try:
detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "")
except ValueError:
detector_type = model.Detector_Type.OTHER
self.ome.instruments[0].detectors.append(
model.Detector(
id=detector.attrib['Id'].replace(' ', ''),
model=self.text(detector.find('Manufacturer').find('Model')),
type=detector_type
))
def get_objectives(self) -> None:
for objective in self.instrument.find('Objectives'):
self.ome.instruments[0].objectives.append(
model.Objective(
id=objective.attrib['Id'],
model=self.text(objective.find('Manufacturer').find('Model')),
immersion=self.text(objective.find('Immersion')), # type: ignore
lens_na=float(self.text(objective.find('LensNA'))),
nominal_magnification=float(self.text(objective.find('NominalMagnification')))))
def get_tubelenses(self) -> None:
if self.version == '1.0':
for idx, tube_lens in enumerate({self.text(track_setup.find('TubeLensPosition'))
for track_setup in self.multi_track_setup}):
try:
nominal_magnification = float(re.findall(r'\d+[,.]\d*', tube_lens)[0].replace(',', '.'))
except Exception: # noqa
nominal_magnification = 1.0
self.ome.instruments[0].objectives.append(
model.Objective(id=f'Objective:Tubelens:{idx}', model=tube_lens,
nominal_magnification=nominal_magnification))
elif self.version in ('1.1', '1.2'):
for tubelens in self.def_list(self.instrument.find('TubeLenses')):
try:
nominal_magnification = float(re.findall(r'\d+(?:[,.]\d*)?',
tubelens.attrib['Name'])[0].replace(',', '.'))
except Exception: # noqa
nominal_magnification = 1.0
self.ome.instruments[0].objectives.append(
model.Objective(
id=f"Objective:{tubelens.attrib['Id']}",
model=tubelens.attrib['Name'],
nominal_magnification=nominal_magnification))
def get_light_sources(self) -> None:
if self.version == '1.0':
for light_source in self.def_list(self.instrument.find('LightSources')):
try:
if light_source.find('LightSourceType').find('Laser') is not None:
self.ome.instruments[0].lasers.append(
model.Laser(
id=light_source.attrib['Id'],
model=self.text(light_source.find('Manufacturer').find('Model')),
power=float(self.text(light_source.find('Power'))),
wavelength=float(
self.text(light_source.find('LightSourceType').find('Laser').find('Wavelength')))))
except AttributeError:
pass
elif self.version in ('1.1', '1.2'):
for light_source in self.def_list(self.instrument.find('LightSources')):
try:
if light_source.find('LightSourceType').find('Laser') is not None:
self.ome.instruments[0].lasers.append(
model.Laser(
id=f"LightSource:{light_source.attrib['Id']}",
power=float(self.text(light_source.find('Power'))),
wavelength=float(light_source.attrib['Id'][-3:]))) # TODO: follow Id reference
except (AttributeError, ValueError):
pass
def get_filters(self) -> None:
if self.version == '1.0':
for idx, filter_ in enumerate({self.text(beam_splitter.find('Filter'))
for track_setup in self.multi_track_setup
for beam_splitter in track_setup.find('BeamSplitters')}):
self.ome.instruments[0].filter_sets.append(
model.FilterSet(id=f'FilterSet:{idx}', model=filter_)
)
def get_pixels(self) -> None:
x_min = min([f.start[f.axes.index('X')] for f in self.filedict[0, 0, 0]])
y_min = min([f.start[f.axes.index('Y')] for f in self.filedict[0, 0, 0]])
x_max = max([f.start[f.axes.index('X')] + f.shape[f.axes.index('X')] for f in self.filedict[0, 0, 0]])
y_max = max([f.start[f.axes.index('Y')] + f.shape[f.axes.index('Y')] for f in self.filedict[0, 0, 0]])
self.size_x = x_max - x_min
self.size_y = y_max - y_min
self.size_c, self.size_z, self.size_t = (self.reader.shape[self.reader.axes.index(directory_entry)]
for directory_entry in 'CZT')
image = self.information.find('Image')
pixel_type = self.text(image.find('PixelType'), 'Gray16')
if pixel_type.startswith('Gray'):
pixel_type = 'uint' + pixel_type[4:]
objective_settings = image.find('ObjectiveSettings')
self.ome.images.append(
model.Image(
id='Image:0',
name=f"{self.text(self.information.find('Document').find('Name'))} #1",
pixels=model.Pixels(
id='Pixels:0', size_x=self.size_x, size_y=self.size_y,
size_c=self.size_c, size_z=self.size_z, size_t=self.size_t,
dimension_order='XYCZT', type=pixel_type, # type: ignore
significant_bits=int(self.text(image.find('ComponentBitCount'))),
big_endian=False, interleaved=False, metadata_only=True), # type: ignore
experimenter_ref=model.ExperimenterRef(id='Experimenter:0'),
instrument_ref=model.InstrumentRef(id='Instrument:0'),
objective_settings=model.ObjectiveSettings(
id=objective_settings.find('ObjectiveRef').attrib['Id'],
medium=self.text(objective_settings.find('Medium')), # type: ignore
refractive_index=float(self.text(objective_settings.find('RefractiveIndex')))),
stage_label=model.StageLabel(
name=f'Scene position #0',
x=self.positions[0], x_unit=self.um,
y=self.positions[1], y_unit=self.um,
z=self.positions[2], z_unit=self.um)))
for distance in self.metadata.find('Scaling').find('Items'):
if distance.attrib['Id'] == 'X':
self.ome.images[0].pixels.physical_size_x = float(self.text(distance.find('Value'))) * 1e6
elif distance.attrib['Id'] == 'Y':
self.ome.images[0].pixels.physical_size_y = float(self.text(distance.find('Value'))) * 1e6
elif self.size_z > 1 and distance.attrib['Id'] == 'Z':
self.ome.images[0].pixels.physical_size_z = float(self.text(distance.find('Value'))) * 1e6
@cached_property
def positions(self) -> tuple[float, float, Optional[float]]:
if self.version == '1.0':
scenes = self.image.find('Dimensions').find('S').find('Scenes')
positions = scenes[0].find('Positions')[0]
return float(positions.attrib['X']), float(positions.attrib['Y']), float(positions.attrib['Z'])
elif self.version in ('1.1', '1.2'):
try: # TODO
scenes = self.image.find('Dimensions').find('S').find('Scenes')
center_position = [float(pos) for pos in self.text(scenes[0].find('CenterPosition')).split(',')]
except AttributeError:
center_position = [0, 0]
return center_position[0], center_position[1], None
@cached_property
def channels_im(self) -> dict:
return {channel.attrib['Id']: channel for channel in self.image.find('Dimensions').find('Channels')}
@cached_property
def channels_ds(self) -> dict:
return {channel.attrib['Id']: channel for channel in self.display_setting.find('Channels')}
@cached_property
def channels_ts(self) -> dict:
return {detector.attrib['Id']: track_setup
for track_setup in
self.experiment.find('ExperimentBlocks').find('AcquisitionBlock').find('MultiTrackSetup')
for detector in track_setup.find('Detectors')}
def get_channels(self) -> None:
if self.version == '1.0':
for idx, (key, channel) in enumerate(self.channels_im.items()):
detector_settings = channel.find('DetectorSettings')
laser_scan_info = channel.find('LaserScanInfo')
detector = detector_settings.find('Detector')
try:
binning = model.Binning(self.text(detector_settings.find('Binning')))
except ValueError:
binning = model.Binning.OTHER
filterset = self.text(self.channels_ts[key].find('BeamSplitters')[0].find('Filter'))
filterset_idx = [filterset.model for filterset in self.ome.instruments[0].filter_sets].index(filterset)
light_sources_settings = channel.find('LightSourcesSettings')
# no space in ome for multiple lightsources simultaneously
if len(light_sources_settings) > idx:
light_source_settings = light_sources_settings[idx]
else:
light_source_settings = light_sources_settings[0]
light_source_settings = model.LightSourceSettings(
id=light_source_settings.find('LightSource').attrib['Id'],
attenuation=float(self.text(light_source_settings.find('Attenuation'))),
wavelength=float(self.text(light_source_settings.find('Wavelength'))),
wavelength_unit=self.nm)
self.ome.images[0].pixels.channels.append(
model.Channel(
id=f'Channel:{idx}',
name=channel.attrib['Name'],
acquisition_mode=self.text(channel.find('AcquisitionMode')), # type: ignore
color=model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white')),
detector_settings=model.DetectorSettings(id=detector.attrib['Id'], binning=binning),
# emission_wavelength=text(channel.find('EmissionWavelength')), # TODO: fix
excitation_wavelength=light_source_settings.wavelength,
filter_set_ref=model.FilterSetRef(id=self.ome.instruments[0].filter_sets[filterset_idx].id),
illumination_type=self.text(channel.find('IlluminationType')), # type: ignore
light_source_settings=light_source_settings,
samples_per_pixel=int(self.text(laser_scan_info.find('Averaging')))))
elif self.version in ('1.1', '1.2'):
for idx, (key, channel) in enumerate(self.channels_im.items()):
detector_settings = channel.find('DetectorSettings')
laser_scan_info = channel.find('LaserScanInfo')
detector = detector_settings.find('Detector')
try:
color = model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white'))
except Exception: # noqa
color = None
try:
if (i := self.text(channel.find('EmissionWavelength'))) != '0':
emission_wavelength = float(i)
else:
emission_wavelength = None
except Exception: # noqa
emission_wavelength = None
if laser_scan_info is not None:
samples_per_pixel = int(self.text(laser_scan_info.find('Averaging'), '1'))
else:
samples_per_pixel = 1
try:
binning = model.Binning(self.text(detector_settings.find('Binning')))
except ValueError:
binning = model.Binning.OTHER
light_sources_settings = channel.find('LightSourcesSettings')
# no space in ome for multiple lightsources simultaneously
if light_sources_settings is not None:
light_source_settings = light_sources_settings[0]
light_source_settings = model.LightSourceSettings(
id='LightSource:' + '_'.join([light_source_settings.find('LightSource').attrib['Id']
for light_source_settings in light_sources_settings]),
attenuation=self.try_default(float, None, self.text(light_source_settings.find('Attenuation'))),
wavelength=self.try_default(float, None, self.text(light_source_settings.find('Wavelength'))),
wavelength_unit=self.nm)
else:
light_source_settings = None
self.ome.images[0].pixels.channels.append(
model.Channel(
id=f'Channel:{idx}',
name=channel.attrib['Name'],
acquisition_mode=self.text(channel.find('AcquisitionMode')).replace( # type: ignore
'SingleMoleculeLocalisation', 'SingleMoleculeImaging'),
color=color,
detector_settings=model.DetectorSettings(
id=detector.attrib['Id'].replace(' ', ""),
binning=binning),
emission_wavelength=emission_wavelength,
excitation_wavelength=self.try_default(float, None,
self.text(channel.find('ExcitationWavelength'))),
# filter_set_ref=model.FilterSetRef(id=ome.instruments[0].filter_sets[filterset_idx].id),
illumination_type=self.text(channel.find('IlluminationType')), # type: ignore
light_source_settings=light_source_settings,
samples_per_pixel=samples_per_pixel))
def get_planes(self) -> None:
try:
exposure_times = [float(self.text(channel.find('LaserScanInfo').find('FrameTime')))
for channel in self.channels_im.values()]
except Exception: # noqa
exposure_times = [None] * len(self.channels_im)
delta_ts = self.attachments['TimeStamps'].data()
dt = np.diff(delta_ts)
if len(dt) and np.std(dt) / np.mean(dt) > 0.02:
dt = np.median(dt[dt > 0])
delta_ts = dt * np.arange(len(delta_ts))
warnings.warn(f'delta_t is inconsistent, using median value: {dt}')
for t, z, c in product(range(self.size_t), range(self.size_z), range(self.size_c)):
self.ome.images[0].pixels.planes.append(
model.Plane(the_c=c, the_z=z, the_t=t, delta_t=delta_ts[t],
exposure_time=exposure_times[c],
position_x=self.positions[0], position_x_unit=self.um,
position_y=self.positions[1], position_y_unit=self.um,
position_z=self.positions[2], position_z_unit=self.um))
def get_annotations(self) -> None:
idx = 0
for layer in [] if (ml := self.metadata.find('Layers')) is None else ml:
rectangle = layer.find('Elements').find('Rectangle')
if rectangle is not None:
geometry = rectangle.find('Geometry')
roi = model.ROI(id=f'ROI:{idx}', description=self.text(layer.find('Usage')))
roi.union.append(
model.Rectangle(
id='Shape:0:0',
height=float(self.text(geometry.find('Height'))),
width=float(self.text(geometry.find('Width'))),
x=float(self.text(geometry.find('Left'))),
y=float(self.text(geometry.find('Top')))))
self.ome.rois.append(roi)
self.ome.images[0].roi_refs.append(model.ROIRef(id=f'ROI:{idx}'))
idx += 1

View File

@@ -1,59 +0,0 @@
from abc import ABC
from itertools import product
from pathlib import Path
from struct import unpack
from warnings import warn
import numpy as np
from ome_types import model
from tifffile import TiffFile
from .. import AbstractReader
class Reader(AbstractReader, ABC):
""" Can read some tif files written with Fiji which are broken because Fiji didn't finish writing. """
priority = 90
do_not_pickle = 'reader'
@staticmethod
def _can_open(path):
if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'):
with TiffFile(path) as tif:
return tif.is_imagej and not tif.is_bigtiff
else:
return False
def __frame__(self, c, z, t): # Override this, return the frame at c, z, t
self.reader.filehandle.seek(self.offset + t * self.count)
return np.reshape(unpack(self.fmt, self.reader.filehandle.read(self.count)), self.base_shape['yx'])
def open(self):
warn(f'File {self.path.name} is probably damaged, opening with fijiread.')
self.reader = TiffFile(self.path)
assert self.reader.pages[0].compression == 1, 'Can only read uncompressed tiff files.'
assert self.reader.pages[0].samplesperpixel == 1, 'Can only read 1 sample per pixel.'
self.offset = self.reader.pages[0].dataoffsets[0] # noqa
self.count = self.reader.pages[0].databytecounts[0] # noqa
self.bytes_per_sample = self.reader.pages[0].bitspersample // 8 # noqa
self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * 'BHILQ'[self.bytes_per_sample - 1] # noqa
def close(self):
self.reader.close()
def get_ome(self):
size_y, size_x = self.reader.pages[0].shape
size_c, size_z = 1, 1
size_t = int(np.floor((self.reader.filehandle.size - self.reader.pages[0].dataoffsets[0]) / self.count))
pixel_type = model.PixelType(self.reader.pages[0].dtype.name)
ome = model.OME()
ome.instruments.append(model.Instrument())
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=pixel_type),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0))
return ome

View File

@@ -1,80 +0,0 @@
import re
from abc import ABC
from pathlib import Path
from typing import Optional
import tifffile
from ome_types import model
from ome_types.units import _quantity_property # noqa
from .. import AbstractReader
class Reader(AbstractReader, ABC):
priority = 20
do_not_pickle = 'last_tif'
@staticmethod
def _can_open(path):
return isinstance(path, Path) and (path.is_dir() or
(path.parent.is_dir() and path.name.lower().startswith('pos')))
@staticmethod
def get_positions(path: str | Path) -> Optional[list[int]]:
pat = re.compile(rf's(\d)_t\d+\.(tif|TIF)$')
return sorted({int(m.group(1)) for file in Path(path).iterdir() if (m := pat.search(file.name))})
def get_ome(self):
ome = model.OME()
tif = self.get_tif(0)
metadata = tif.metaseries_metadata
size_z = len(tif.pages)
page = tif.pages[0]
shape = {axis.lower(): size for axis, size in zip(page.axes, page.shape)}
size_x, size_y = shape['x'], shape['y']
ome.instruments.append(model.Instrument())
size_c = 1
size_t = max(self.filedict.keys()) + 1
pixel_type = f"uint{metadata['PlaneInfo']['bits-per-pixel']}"
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t,
size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=pixel_type),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
return ome
def open(self):
pat = re.compile(rf's{self.series}_t\d+\.(tif|TIF)$')
filelist = sorted([file for file in self.path.iterdir() if pat.search(file.name)])
pattern = re.compile(r't(\d+)$')
self.filedict = {int(pattern.search(file.stem).group(1)) - 1: file for file in filelist}
if len(self.filedict) == 0:
raise FileNotFoundError
self.last_tif = 0, tifffile.TiffFile(self.filedict[0])
def close(self) -> None:
self.last_tif[1].close()
def get_tif(self, t: int = None):
last_t, tif = self.last_tif
if (t is None or t == last_t) and not tif.filehandle.closed:
return tif
else:
tif.close()
tif = tifffile.TiffFile(self.filedict[t])
self.last_tif = t, tif
return tif
def __frame__(self, c=0, z=0, t=0):
tif = self.get_tif(t)
page = tif.pages[z]
if page.axes.upper() == 'YX':
return page.asarray()
elif page.axes.upper() == 'XY':
return page.asarray().T
else:
raise NotImplementedError(f'reading axes {page.axes} is not implemented')

View File

@@ -1,53 +0,0 @@
from abc import ABC
from itertools import product
import numpy as np
from ome_types import model
from .. import AbstractReader
class Reader(AbstractReader, ABC):
priority = 20
@staticmethod
def _can_open(path):
return isinstance(path, np.ndarray) and 1 <= path.ndim <= 5
def get_ome(self):
def shape(size_x=1, size_y=1, size_c=1, size_z=1, size_t=1): # noqa
return size_x, size_y, size_c, size_z, size_t
size_x, size_y, size_c, size_z, size_t = shape(*self.array.shape)
try:
pixel_type = model.PixelType(self.array.dtype.name)
except ValueError:
if self.array.dtype.name.startswith('int'):
pixel_type = model.PixelType('int32')
else:
pixel_type = model.PixelType('float')
ome = model.OME()
ome.instruments.append(model.Instrument())
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=pixel_type),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0))
return ome
def open(self):
if isinstance(self.path, np.ndarray):
self.array = np.array(self.path)
while self.array.ndim < 5:
self.array = np.expand_dims(self.array, -1) # noqa
self.path = 'numpy array'
def __frame__(self, c, z, t):
frame = self.array[:, :, c, z, t]
if self.axes.find('y') > self.axes.find('x'):
return frame.T
else:
return frame

View File

@@ -1,146 +0,0 @@
import re
from abc import ABC
from datetime import datetime
from itertools import product
from pathlib import Path
import tifffile
import yaml
from ome_types import model
from ome_types.units import _quantity_property # noqa
from .. import AbstractReader
def lazy_property(function, field, *arg_fields):
def lazy(self):
if self.__dict__.get(field) is None:
self.__dict__[field] = function(*[getattr(self, arg_field) for arg_field in arg_fields])
try:
self.model_fields_set.add(field)
except Exception: # noqa
pass
return self.__dict__[field]
return property(lazy)
class Plane(model.Plane):
""" Lazily retrieve delta_t from metadata """
def __init__(self, t0, file, **kwargs): # noqa
super().__init__(**kwargs)
# setting fields here because they would be removed by ome_types/pydantic after class definition
setattr(self.__class__, 'delta_t', lazy_property(self.get_delta_t, 'delta_t', 't0', 'file'))
setattr(self.__class__, 'delta_t_quantity', _quantity_property('delta_t'))
self.__dict__['t0'] = t0 # noqa
self.__dict__['file'] = file # noqa
@staticmethod
def get_delta_t(t0, file):
with tifffile.TiffFile(file) as tif:
info = yaml.safe_load(tif.pages[0].tags[50839].value['Info'])
return float((datetime.strptime(info['Time'], '%Y-%m-%d %H:%M:%S %z') - t0).seconds)
class Reader(AbstractReader, ABC):
priority = 10
@staticmethod
def _can_open(path):
pat = re.compile(r'(?:\d+-)?Pos.*', re.IGNORECASE)
return (isinstance(path, Path) and path.is_dir() and
(pat.match(path.name) or any(file.is_dir() and pat.match(file.stem) for file in path.iterdir())))
def get_ome(self):
ome = model.OME()
with tifffile.TiffFile(self.filedict[0, 0, 0]) as tif:
metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()}
ome.experimenters.append(
model.Experimenter(id='Experimenter:0', user_name=metadata['Info']['Summary']['UserName']))
objective_str = metadata['Info']['ZeissObjectiveTurret-Label']
ome.instruments.append(model.Instrument())
ome.instruments[0].objectives.append(
model.Objective(
id='Objective:0', manufacturer='Zeiss', model=objective_str,
nominal_magnification=float(re.findall(r'(\d+)x', objective_str)[0]),
lens_na=float(re.findall(r'/(\d\.\d+)', objective_str)[0]),
immersion=model.Objective_Immersion.OIL if 'oil' in objective_str.lower() else None))
tubelens_str = metadata['Info']['ZeissOptovar-Label']
ome.instruments[0].objectives.append(
model.Objective(
id='Objective:Tubelens:0', manufacturer='Zeiss', model=tubelens_str,
nominal_magnification=float(re.findall(r'\d?\d*[,.]?\d+(?=x$)', tubelens_str)[0].replace(',', '.'))))
ome.instruments[0].detectors.append(
model.Detector(
id='Detector:0', amplification_gain=100))
ome.instruments[0].filter_sets.append(
model.FilterSet(id='FilterSet:0', model=metadata['Info']['ZeissReflectorTurret-Label']))
pxsize = metadata['Info']['PixelSizeUm']
pxsize_cam = 6.5 if 'Hamamatsu' in metadata['Info']['Core-Camera'] else None
if pxsize == 0:
pxsize = pxsize_cam / ome.instruments[0].objectives[0].nominal_magnification
pixel_type = metadata['Info']['PixelType'].lower()
if pixel_type.startswith('gray'):
pixel_type = 'uint' + pixel_type[4:]
else:
pixel_type = 'uint16' # assume
size_c, size_z, size_t = (max(i) + 1 for i in zip(*self.filedict.keys()))
t0 = datetime.strptime(metadata['Info']['Time'], '%Y-%m-%d %H:%M:%S %z')
ome.images.append(
model.Image(
pixels=model.Pixels(
size_c=size_c, size_z=size_z, size_t=size_t,
size_x=metadata['Info']['Width'], size_y=metadata['Info']['Height'],
dimension_order='XYCZT', # type: ignore
type=pixel_type, physical_size_x=pxsize, physical_size_y=pxsize,
physical_size_z=metadata['Info']['Summary']['z-step_um']),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(
Plane(t0, self.filedict[c, z, t],
the_c=c, the_z=z, the_t=t, exposure_time=metadata['Info']['Exposure-ms'] / 1000))
# compare channel names from metadata with filenames
pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE)
for c in range(size_c):
ome.images[0].pixels.channels.append(
model.Channel(
id=f'Channel:{c}', name=pattern_c.findall(self.filedict[c, 0, 0].stem)[0],
detector_settings=model.DetectorSettings(
id='Detector:0', binning=metadata['Info']['Hamamatsu_sCMOS-Binning']),
filter_set_ref=model.FilterSetRef(id='FilterSet:0')))
return ome
def open(self):
# /some_path/Pos4: path = /some_path, series = 4
# /some_path/5-Pos_001_005: path = /some_path/5-Pos_001_005, series = 0
if re.match(r'(?:\d+-)?Pos.*', self.path.name, re.IGNORECASE) is None:
pat = re.compile(rf'^(?:\d+-)?Pos{self.series}$', re.IGNORECASE)
files = sorted(file for file in self.path.iterdir() if pat.match(file.name))
if len(files):
path = files[0]
else:
raise FileNotFoundError(self.path / pat.pattern)
else:
path = self.path
pat = re.compile(r'^img_\d{3,}.*\d{3,}.*\.tif$', re.IGNORECASE)
filelist = sorted([file for file in path.iterdir() if pat.search(file.name)])
with tifffile.TiffFile(self.path / filelist[0]) as tif:
metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()}
# compare channel names from metadata with filenames
cnamelist = metadata['Info']['Summary']['ChNames']
cnamelist = [c for c in cnamelist if any([c in f.name for f in filelist])]
pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE)
pattern_z = re.compile(r'(\d{3,})$')
pattern_t = re.compile(r'img_(\d{3,})', re.IGNORECASE)
self.filedict = {(cnamelist.index(pattern_c.findall(file.stem)[0]), # noqa
int(pattern_z.findall(file.stem)[0]),
int(pattern_t.findall(file.stem)[0])): file for file in filelist}
def __frame__(self, c=0, z=0, t=0):
return tifffile.imread(self.path / self.filedict[(c, z, t)])

View File

@@ -1,85 +0,0 @@
from abc import ABC
from functools import cached_property
from itertools import product
from pathlib import Path
import numpy as np
import tifffile
import yaml
from ome_types import model
from .. import AbstractReader
class Reader(AbstractReader, ABC):
priority = 0
do_not_pickle = 'reader'
@staticmethod
def _can_open(path):
if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'):
with tifffile.TiffFile(path) as tif:
return tif.is_imagej and tif.pages[-1]._nextifd() == 0 # noqa
else:
return False
@cached_property
def metadata(self):
return {key: yaml.safe_load(value) if isinstance(value, str) else value
for key, value in self.reader.imagej_metadata.items()}
def get_ome(self):
page = self.reader.pages[0]
size_y = page.imagelength
size_x = page.imagewidth
if self.p_ndim == 3:
size_c = page.samplesperpixel
size_t = self.metadata.get('frames', 1) # // C
else:
size_c = self.metadata.get('channels', 1)
size_t = self.metadata.get('frames', 1)
size_z = self.metadata.get('slices', 1)
if 282 in page.tags and 296 in page.tags and page.tags[296].value == 1:
f = page.tags[282].value
pxsize = f[1] / f[0]
else:
pxsize = None
dtype = page.dtype.name
if dtype not in ('int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32',
'float', 'double', 'complex', 'double-complex', 'bit'):
dtype = 'float'
interval_t = self.metadata.get('interval', 0)
ome = model.OME()
ome.instruments.append(model.Instrument(id='Instrument:0'))
ome.instruments[0].objectives.append(model.Objective(id='Objective:0'))
ome.images.append(
model.Image(
id='Image:0',
pixels=model.Pixels(
id='Pixels:0',
size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y,
dimension_order='XYCZT', type=dtype, # type: ignore
physical_size_x=pxsize, physical_size_y=pxsize),
objective_settings=model.ObjectiveSettings(id='Objective:0')))
for c, z, t in product(range(size_c), range(size_z), range(size_t)):
ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=interval_t * t))
return ome
def open(self):
self.reader = tifffile.TiffFile(self.path)
page = self.reader.pages[0]
self.p_ndim = page.ndim # noqa
if self.p_ndim == 3:
self.p_transpose = [i for i in [page.axes.find(j) for j in 'SYX'] if i >= 0] # noqa
def close(self):
self.reader.close()
def __frame__(self, c, z, t):
if self.p_ndim == 3:
return np.transpose(self.reader.asarray(z + t * self.base_shape['z']), self.p_transpose)[c]
else:
return self.reader.asarray(c + z * self.base_shape['c'] + t * self.base_shape['c'] * self.base_shape['z'])

624
py/ndbioimage/__init__.py Executable file
View File

@@ -0,0 +1,624 @@
from __future__ import annotations
import os
import warnings
from abc import ABC
from argparse import ArgumentParser
from collections import OrderedDict
from functools import cached_property, wraps
from importlib.metadata import version
from itertools import product
from pathlib import Path
from typing import Any, Callable, Optional, Sequence, TypeVar
import numpy as np
from numpy.typing import ArrayLike, DTypeLike
from tiffwrite import FrameInfo, IJTiffParallel
from tqdm.auto import tqdm
from ome_metadata import Ome
from ome_metadata.ome_metadata_rs import Length # noqa
from . import ndbioimage_rs as rs # noqa
from .transforms import Transform, Transforms # noqa: F401
try:
__version__ = version(Path(__file__).parent.name)
except Exception: # noqa
__version__ = "unknown"
try:
with open(Path(__file__).parent.parent / ".git" / "HEAD") as g:
head = g.read().split(":")[1].strip()
with open(Path(__file__).parent.parent / ".git" / head) as h:
__git_commit_hash__ = h.read().rstrip("\n")
except Exception: # noqa
__git_commit_hash__ = "unknown"
warnings.filterwarnings("ignore", "Reference to unknown ID")
Number = int | float | np.integer | np.floating
for dep_file in (Path(__file__).parent / "deps").glob("*_"):
dep_file.rename(str(dep_file)[:-1])
if not list((Path(__file__).parent / "jassets").glob("bioformats*.jar")):
rs.download_bioformats(True)
class ReaderNotFoundError(Exception):
pass
class TransformTiff(IJTiffParallel):
"""transform frames in a parallel process to speed up saving"""
def __init__(self, image: Imread, *args: Any, **kwargs: Any) -> None:
self.image = image
super().__init__(*args, **kwargs)
def parallel(self, frame: tuple[int, int, int]) -> tuple[FrameInfo]:
return ((np.asarray(self.image(*frame)), 0, 0, 0),)
class DequeDict(OrderedDict):
def __init__(self, maxlen: int = None, *args: Any, **kwargs: Any) -> None:
self.maxlen = maxlen
super().__init__(*args, **kwargs)
def __setitem__(self, *args: Any, **kwargs: Any) -> None:
super().__setitem__(*args, **kwargs)
self.truncate()
def truncate(self) -> None:
if self.maxlen is not None:
while len(self) > self.maxlen:
self.popitem(False)
def update(self, *args: Any, **kwargs: Any) -> None:
super().update(*args, **kwargs) # type: ignore
self.truncate()
def find(obj: Sequence[Any], **kwargs: Any) -> Any:
for item in obj:
try:
if all([getattr(item, key) == value for key, value in kwargs.items()]):
return item
except AttributeError:
pass
return None
R = TypeVar("R")
def try_default(fun: Callable[..., R], default: Any, *args: Any, **kwargs: Any) -> R:
try:
return fun(*args, **kwargs)
except Exception: # noqa
return default
class Shape(tuple):
def __new__(cls, shape: Sequence[int] | Shape, axes: str = "yxczt") -> Shape:
if isinstance(shape, Shape):
axes = shape.axes # type: ignore
new = super().__new__(cls, shape)
new.axes = axes.lower()
return new # type: ignore
def __getitem__(self, n: int | str) -> int | tuple[int]:
if isinstance(n, str):
if len(n) == 1:
return self[self.axes.find(n.lower())] if n.lower() in self.axes else 1
else:
return tuple(self[i] for i in n) # type: ignore
return super().__getitem__(n)
@cached_property
def yxczt(self) -> tuple[int, int, int, int, int]:
return tuple(self[i] for i in "yxczt") # type: ignore
class OmeCache(DequeDict):
"""prevent (potentially expensive) rereading of ome data by caching"""
instance = None
def __new__(cls) -> OmeCache:
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self) -> None:
super().__init__(64)
def __reduce__(self) -> tuple[type, tuple]:
return self.__class__, ()
def __getitem__(self, path: Path | str | tuple) -> Ome:
if isinstance(path, tuple):
return super().__getitem__(path)
else:
return super().__getitem__(self.path_and_lstat(path))
def __setitem__(self, path: Path | str | tuple, value: Ome) -> None:
if isinstance(path, tuple):
super().__setitem__(path, value)
else:
super().__setitem__(self.path_and_lstat(path), value)
def __contains__(self, path: Path | str | tuple) -> bool:
if isinstance(path, tuple):
return super().__contains__(path)
else:
return super().__contains__(self.path_and_lstat(path))
@staticmethod
def path_and_lstat(
path: str | Path,
) -> tuple[Path, Optional[os.stat_result], Optional[os.stat_result]]:
path = Path(path)
return (
path,
(path.lstat() if path.exists() else None),
(
path.with_suffix(".ome.xml").lstat()
if path.with_suffix(".ome.xml").exists()
else None
),
)
def get_positions(path: str | Path) -> Optional[list[int]]: # noqa
# TODO
return None
class Imread(rs.View, np.lib.mixins.NDArrayOperatorsMixin, ABC):
"""class to read image files, while taking good care of important metadata,
currently optimized for .czi files, but can open anything that bioformats can handle
path: path to the image file
optional:
axes: order of axes, default: cztyx, but omitting any axes with lenght 1
dtype: datatype to be used when returning frames
Examples:
>> im = Imread('/path/to/file.image', axes='czt)
>> im
<< shows summary
>> im.shape
<< (15, 26, 1000, 1000)
>> im.axes
<< 'ztyx'
>> plt.imshow(im[1, 0])
<< plots frame at position z=1, t=0 (python type indexing)
>> plt.imshow(im[:, 0].max('z'))
<< plots max-z projection at t=0
>> im.pxsize
<< 0.09708737864077668 image-plane pixel size in um
>> im.laserwavelengths
<< [642, 488]
>> im.laserpowers
<< [0.02, 0.0005] in %
See __init__ and other functions for more ideas.
# TODO: argmax, argmin, nanmax, nanmin, nanmean, nansum, nanstd, nanvar, std, var, squeeze
"""
def __getitem__(self, item):
new = super().__getitem__(item)
return Imread(new) if isinstance(new, rs.View) else new
def __copy__(self):
Imread(super().__copy__())
def copy(self):
Imread(super().copy())
def astype(self):
Imread(super().astype())
def squeeze(self):
new = super().squeeze()
return Imread(new) if isinstance(new, rs.View) else new
def min(self, *args, **kwargs) -> Imread | float:
new = super().min(*args, **kwargs)
return Imread(new) if isinstance(new, rs.View) else new
def max(self, *args, **kwargs) -> Imread | float:
new = super().max(*args, **kwargs)
return Imread(new) if isinstance(new, rs.View) else new
def mean(self, *args, **kwargs) -> Imread | float:
new = super().mean(*args, **kwargs)
return Imread(new) if isinstance(new, rs.View) else new
def sum(self, *args, **kwargs) -> Imread | float:
new = super().sum(*args, **kwargs)
return Imread(new) if isinstance(new, rs.View) else new
def transpose(self, *args, **kwargs) -> Imread | float:
new = super().transpose(*args, **kwargs)
return Imread(new) if isinstance(new, rs.View) else new
def swap_axes(self, *args, **kwargs) -> Imread | float:
new = super().swap_axes(*args, **kwargs)
return Imread(new) if isinstance(new, rs.View) else new
@property
def T(self) -> Imread | float:
return Imread(super().T)
@staticmethod
def get_positions(path: str | Path) -> Optional[list[int]]: # noqa
# TODO
return None
@staticmethod
def as_axis(axis):
if axis is None:
return None
elif isinstance(axis, int):
return axis
else:
return str(axis)
@wraps(np.moveaxis)
def moveaxis(self, source, destination):
raise NotImplementedError("moveaxis is not implemented")
@wraps(np.ndarray.flatten)
def flatten(self, *args, **kwargs) -> np.ndarray:
return np.asarray(self).flatten(*args, **kwargs)
@wraps(np.ndarray.reshape)
def reshape(self, *args, **kwargs) -> np.ndarray:
return np.asarray(self).reshape(*args, **kwargs) # noqa
def as_array(self) -> np.ndarray:
return self.__array__()
@wraps(np.ndarray.astype)
def astype(self, dtype: DTypeLike, *_, **__) -> Imread:
return Imread(super().astype(str(np.dtype(dtype))))
@staticmethod
def fix_ome(ome: Ome) -> Ome:
# fix ome if necessary
for image in ome.image:
try:
if (
image.pixels.physical_size_z is None
and len(set([plane.the_z for plane in image.pixels.planes])) > 1
):
z = np.array(
[
(
plane.position_z_unit.convert("um", plane.position_z),
plane.the_z,
)
for plane in image.pixels.planes
if plane.the_c == 0 and plane.the_t == 0
]
)
i = np.argsort(z[:, 1])
image.pixels.physical_size_z = (
np.nanmean(np.true_divide(*np.diff(z[i], axis=0).T)) * 1e6
)
image.pixels.physical_size_z_unit = Length("um") # type: ignore
except Exception: # noqa
pass
return ome
@staticmethod
def read_ome(path: str | Path) -> Optional[Ome]:
path = Path(path) # type: ignore
if path.with_suffix(".ome.xml").exists():
return Ome.from_xml(path.with_suffix(".ome.xml").read_text())
return None
def get_ome(self) -> Ome:
"""OME metadata structure"""
return Ome.from_xml(self.get_ome_xml())
@cached_property
def ome(self) -> Ome:
cache = OmeCache()
if self.path not in cache:
ome = self.read_ome(self.path)
if ome is None:
ome = self.get_ome()
cache[self.path] = self.fix_ome(ome)
return cache[self.path]
def is_noise(self, volume: ArrayLike = None) -> bool:
"""True if volume only has noise"""
if volume is None:
volume = self
fft = np.fft.fftn(volume)
corr = np.fft.fftshift(np.fft.ifftn(fft * fft.conj()).real / np.sum(volume**2))
return 1 - corr[tuple([0] * corr.ndim)] < 0.0067
@staticmethod
def kill_vm() -> None:
pass
def save_as_movie(
self,
fname: Path | str = None,
c: int | Sequence[int] = None, # noqa
z: int | Sequence[int] = None, # noqa
t: str | int | Sequence[int] = None, # noqa
colors: tuple[str] = None,
brightnesses: tuple[float] = None,
scale: int = None,
bar: bool = True,
) -> None:
"""saves the image as a mp4 or mkv file"""
from matplotlib.colors import to_rgb
from skvideo.io import FFmpegWriter
if t is None:
t = np.arange(self.shape["t"])
elif isinstance(t, str):
t = eval(f"np.arange(self.shape['t'])[{t}]")
elif np.isscalar(t):
t = (t,)
def get_ab(
tyx: Imread, p: tuple[float, float] = (1, 99)
) -> tuple[float, float]:
s = tyx.flatten()
s = s[s > 0]
a, b = np.percentile(s, p)
if a == b:
a, b = np.min(s), np.max(s)
if a == b:
a, b = 0, 1
return a, b
def cframe(
frame: ArrayLike,
color: str,
a: float,
b: float,
scale: float = 1, # noqa
) -> np.ndarray:
color = to_rgb(color)
frame = (frame - a) / (b - a)
frame = np.dstack([255 * frame * i for i in color])
return np.clip(np.round(frame), 0, 255).astype("uint8")
ab = list(zip(*[get_ab(i) for i in self.transpose("cztyx")])) # type: ignore
colors = colors or ("r", "g", "b")[: self.shape["c"]] + max(
0, self.shape["c"] - 3
) * ("w",)
brightnesses = brightnesses or (1,) * self.shape["c"]
scale = scale or 1
shape_x = 2 * ((self.shape["x"] * scale + 1) // 2)
shape_y = 2 * ((self.shape["y"] * scale + 1) // 2)
with FFmpegWriter(
str(fname).format(name=self.path.stem, path=str(self.path.parent)),
outputdict={
"-vcodec": "libx264",
"-preset": "veryslow",
"-pix_fmt": "yuv420p",
"-r": "7",
"-vf": f"setpts={25 / 7}*PTS,scale={shape_x}:{shape_y}:flags=neighbor",
},
) as movie:
im = self.transpose("tzcyx") # type: ignore
for ti in tqdm(t, desc="Saving movie", disable=not bar):
movie.writeFrame(
np.max(
[
cframe(yx, c, a, b / s, scale)
for yx, a, b, c, s in zip(
im[ti].max("z"), *ab, colors, brightnesses
)
],
0,
)
)
def save_as_tiff(
self,
fname: Path | str = None,
c: int | Sequence[int] = None,
z: int | Sequence[int] = None,
t: int | Sequence[int] = None,
split: bool = False,
bar: bool = True,
pixel_type: str = "uint16",
**kwargs: Any,
) -> None:
"""saves the image as a tif file
split: split channels into different files"""
fname = Path(str(fname).format(name=self.path.stem, path=str(self.path.parent)))
if fname is None:
fname = self.path.with_suffix(".tif")
if fname == self.path:
raise FileExistsError(f"File {fname} exists already.")
if not isinstance(fname, Path):
fname = Path(fname)
if split:
for i in range(self.shape["c"]):
if self.timeseries:
self.save_as_tiff(
fname.with_name(f"{fname.stem}_C{i:01d}").with_suffix(".tif"),
i,
0,
None,
False,
bar,
pixel_type,
)
else:
self.save_as_tiff(
fname.with_name(f"{fname.stem}_C{i:01d}").with_suffix(".tif"),
i,
None,
0,
False,
bar,
pixel_type,
)
else:
n = [c, z, t]
for i, ax in enumerate("czt"):
if n[i] is None:
n[i] = range(self.shape[ax])
elif not isinstance(n[i], (tuple, list)):
n[i] = (n[i],)
shape = [len(i) for i in n]
with TransformTiff(
self,
fname.with_suffix(".tif"),
dtype=pixel_type,
pxsize=self.pxsize,
deltaz=self.deltaz,
**kwargs,
) as tif:
for i, m in tqdm( # noqa
zip(product(*[range(s) for s in shape]), product(*n)), # noqa
total=np.prod(shape),
desc="Saving tiff",
disable=not bar,
):
tif.save(m, *i) # type: ignore
def with_transform(
self,
channels: bool = True,
drift: bool = False,
file: Path | str = None,
bead_files: Sequence[Path | str] = (),
) -> Imread:
"""returns a view where channels and/or frames are registered with an affine transformation
channels: True/False register channels using bead_files
drift: True/False register frames to correct drift
file: load registration from file with name file, default: transform.yml in self.path.parent
bead_files: files used to register channels, default: files in self.path.parent,
with names starting with 'beads'
"""
raise NotImplementedError("transforms are not yet implemented")
# view = self.copy()
# if file is None:
# file = Path(view.path.parent) / 'transform.yml'
# else:
# file = Path(file)
# if not bead_files:
# try:
# bead_files = Transforms.get_bead_files(view.path.parent)
# except Exception: # noqa
# if not file.exists():
# raise Exception('No transform file and no bead file found.')
# bead_files = ()
#
# if channels:
# try:
# view.transform = Transforms.from_file(file, T=drift)
# except Exception: # noqa
# view.transform = Transforms().with_beads(view.cyllens, bead_files)
# if drift:
# view.transform = view.transform.with_drift(view)
# view.transform.save(file.with_suffix('.yml'))
# view.transform.save_channel_transform_tiff(bead_files, file.with_suffix('.tif'))
# elif drift:
# try:
# view.transform = Transforms.from_file(file, C=False)
# except Exception: # noqa
# view.transform = Transforms().with_drift(self)
# view.transform.adapt(view.frameoffset, view.shape.yxczt, view.channel_names)
# return view
def main() -> None:
parser = ArgumentParser(description="Display info and save as tif")
parser.add_argument("-v", "--version", action="version", version=__version__)
parser.add_argument("file", help="image_file", type=str, nargs="*")
parser.add_argument(
"-w",
"--write",
help="path to tif/movie out, {folder}, {name} and {ext} take this from file in",
type=str,
default=None,
)
parser.add_argument(
"-o", "--extract_ome", help="extract ome to xml file", action="store_true"
)
parser.add_argument(
"-r", "--register", help="register channels", action="store_true"
)
parser.add_argument("-c", "--channel", help="channel", type=int, default=None)
parser.add_argument("-z", "--zslice", help="z-slice", type=int, default=None)
parser.add_argument(
"-t",
"--time",
help="time (frames) in python slicing notation",
type=str,
default=None,
)
parser.add_argument("-s", "--split", help="split channels", action="store_true")
parser.add_argument("-f", "--force", help="force overwrite", action="store_true")
parser.add_argument(
"-C", "--movie-colors", help="colors for channels in movie", type=str, nargs="*"
)
parser.add_argument(
"-B",
"--movie-brightnesses",
help="scale brightness of each channel",
type=float,
nargs="*",
)
parser.add_argument(
"-S", "--movie-scale", help="upscale movie xy size, int", type=float
)
args = parser.parse_args()
for file in tqdm(args.file, desc="operating on files", disable=len(args.file) == 1):
file = Path(file)
with Imread(file) as im: # noqa
if args.register:
im = im.with_transform() # noqa
if args.write:
write = Path(
args.write.format(
folder=str(file.parent), name=file.stem, ext=file.suffix
)
).absolute() # noqa
write.parent.mkdir(parents=True, exist_ok=True)
if write.exists() and not args.force:
print(
f"File {args.write} exists already, add the -f flag if you want to overwrite it."
)
elif write.suffix in (".mkv", ".mp4"):
im.save_as_movie(
write,
args.channel,
args.zslice,
args.time,
args.movie_colors,
args.movie_brightnesses,
args.movie_scale,
bar=len(args.file) == 1,
)
else:
im.save_as_tiff(
write,
args.channel,
args.zslice,
args.time,
args.split,
bar=len(args.file) == 1,
)
if args.extract_ome:
with open(im.path.with_suffix(".ome.xml"), "w") as f:
f.write(im.ome.to_xml())
if len(args.file) == 1:
print(im.summary)

39
py/ndbioimage/ome.py Normal file
View File

@@ -0,0 +1,39 @@
from __future__ import annotations
from ome_metadata import ome_metadata_rs as rs # noqa
from collections import UserDict, UserList
class Ome(UserDict):
@staticmethod
def from_xml(xml: str) -> Ome:
"""Create the OME structure from an XML string"""
new = Ome()
new.update(rs.ome(str(xml)))
return new
def __dir__(self) -> list[str]:
return list(self.keys()) + list(super().__dir__())
def __getattr__(self, key: str) -> Ome | OmeList | int | float | str:
try:
new = self.__getitem__(key)
except KeyError:
raise AttributeError(f"'Ome' object has no attribute '{key}'")
if isinstance(new, dict):
return Ome(**new)
elif isinstance(new, list):
return OmeList(new)
else:
return new
class OmeList(UserList):
def __getitem__(self, item: int) -> Ome | OmeList | int | float | str:
new = super().__getitem__(item)
if isinstance(new, dict):
return Ome(**new)
elif isinstance(new, list):
return OmeList(new)
else:
return new

View File

@@ -21,7 +21,7 @@ except ImportError:
DataFrame, Series, concat = None, None, None
if hasattr(yaml, 'full_load'):
if hasattr(yaml, "full_load"):
yamlload = yaml.full_load
else:
yamlload = yaml.load
@@ -34,7 +34,7 @@ class Transforms(dict):
@classmethod
def from_file(cls, file, C=True, T=True):
with open(Path(file).with_suffix('.yml')) as f:
with open(Path(file).with_suffix(".yml")) as f:
return cls.from_dict(yamlload(f), C, T)
@classmethod
@@ -42,7 +42,9 @@ class Transforms(dict):
new = cls()
for key, value in d.items():
if isinstance(key, str) and C:
new[key.replace(r'\:', ':').replace('\\\\', '\\')] = Transform.from_dict(value)
new[key.replace(r"\:", ":").replace("\\\\", "\\")] = (
Transform.from_dict(value)
)
elif T:
new[key] = Transform.from_dict(value)
return new
@@ -69,11 +71,19 @@ class Transforms(dict):
return new
def asdict(self):
return {key.replace('\\', '\\\\').replace(':', r'\:') if isinstance(key, str) else key: value.asdict()
for key, value in self.items()}
return {
key.replace("\\", "\\\\").replace(":", r"\:")
if isinstance(key, str)
else key: value.asdict()
for key, value in self.items()
}
def __getitem__(self, item):
return np.prod([self[i] for i in item[::-1]]) if isinstance(item, tuple) else super().__getitem__(item)
return (
np.prod([self[i] for i in item[::-1]])
if isinstance(item, tuple)
else super().__getitem__(item)
)
def __missing__(self, key):
return self.default
@@ -88,7 +98,7 @@ class Transforms(dict):
return hash(frozenset((*self.__dict__.items(), *self.items())))
def save(self, file):
with open(Path(file).with_suffix('.yml'), 'w') as f:
with open(Path(file).with_suffix(".yml"), "w") as f:
yaml.safe_dump(self.asdict(), f, default_flow_style=None)
def copy(self):
@@ -109,8 +119,10 @@ class Transforms(dict):
transform_channels = {key for key in self.keys() if isinstance(key, str)}
if set(channel_names) - transform_channels:
mapping = key_map(channel_names, transform_channels)
warnings.warn(f'The image file and the transform do not have the same channels,'
f' creating a mapping: {mapping}')
warnings.warn(
f"The image file and the transform do not have the same channels,"
f" creating a mapping: {mapping}"
)
for key_im, key_t in mapping.items():
self[key_im] = self[key_t]
@@ -124,37 +136,54 @@ class Transforms(dict):
def coords_pandas(self, array, channel_names, columns=None):
if isinstance(array, DataFrame):
return concat([self.coords_pandas(row, channel_names, columns) for _, row in array.iterrows()], axis=1).T
return concat(
[
self.coords_pandas(row, channel_names, columns)
for _, row in array.iterrows()
],
axis=1,
).T
elif isinstance(array, Series):
key = []
if 'C' in array:
key.append(channel_names[int(array['C'])])
if 'T' in array:
key.append(int(array['T']))
if "C" in array:
key.append(channel_names[int(array["C"])])
if "T" in array:
key.append(int(array["T"]))
return self[tuple(key)].coords(array, columns)
else:
raise TypeError('Not a pandas DataFrame or Series.')
raise TypeError("Not a pandas DataFrame or Series.")
def with_beads(self, cyllens, bead_files):
assert len(bead_files) > 0, 'At least one file is needed to calculate the registration.'
transforms = [self.calculate_channel_transforms(file, cyllens) for file in bead_files]
assert len(bead_files) > 0, (
"At least one file is needed to calculate the registration."
)
transforms = [
self.calculate_channel_transforms(file, cyllens) for file in bead_files
]
for key in {key for transform in transforms for key in transform.keys()}:
new_transforms = [transform[key] for transform in transforms if key in transform]
new_transforms = [
transform[key] for transform in transforms if key in transform
]
if len(new_transforms) == 1:
self[key] = new_transforms[0]
else:
self[key] = Transform()
self[key].parameters = np.mean([t.parameters for t in new_transforms], 0)
self[key].dparameters = (np.std([t.parameters for t in new_transforms], 0) /
np.sqrt(len(new_transforms))).tolist()
self[key].parameters = np.mean(
[t.parameters for t in new_transforms], 0
)
self[key].dparameters = (
np.std([t.parameters for t in new_transforms], 0)
/ np.sqrt(len(new_transforms))
).tolist()
return self
@staticmethod
def get_bead_files(path):
from . import Imread
files = []
for file in path.iterdir():
if file.name.lower().startswith('beads'):
if file.name.lower().startswith("beads"):
try:
with Imread(file):
files.append(file)
@@ -162,32 +191,36 @@ class Transforms(dict):
pass
files = sorted(files)
if not files:
raise Exception('No bead file found!')
raise Exception("No bead file found!")
checked_files = []
for file in files:
try:
if file.is_dir():
file /= 'Pos0'
file /= "Pos0"
with Imread(file): # check for errors opening the file
checked_files.append(file)
except (Exception,):
continue
if not checked_files:
raise Exception('No bead file found!')
raise Exception("No bead file found!")
return checked_files
@staticmethod
def calculate_channel_transforms(bead_file, cyllens):
""" When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162
in the horizontal direction """
"""When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162
in the horizontal direction"""
from . import Imread
with Imread(bead_file, axes='zcyx') as im: # noqa
max_ims = im.max('z')
with Imread(bead_file, axes="zcyx") as im: # noqa
max_ims = im.max("z")
goodch = [c for c, max_im in enumerate(max_ims) if not im.is_noise(max_im)]
if not goodch:
goodch = list(range(len(max_ims)))
untransformed = [c for c in range(im.shape['c']) if cyllens[im.detector[c]].lower() == 'none']
untransformed = [
c
for c in range(im.shape["c"])
if cyllens[im.detector[c]].lower() == "none"
]
good_and_untrans = sorted(set(goodch) & set(untransformed))
if good_and_untrans:
@@ -200,54 +233,81 @@ class Transforms(dict):
matrix[0, 0] = 0.86
transform.matrix = matrix
transforms = Transforms()
for c in tqdm(goodch, desc='Calculating channel transforms'): # noqa
for c in tqdm(goodch, desc="Calculating channel transforms"): # noqa
if c == masterch:
transforms[im.channel_names[c]] = transform
else:
transforms[im.channel_names[c]] = Transform.register(max_ims[masterch], max_ims[c]) * transform
transforms[im.channel_names[c]] = (
Transform.register(max_ims[masterch], max_ims[c]) * transform
)
return transforms
@staticmethod
def save_channel_transform_tiff(bead_files, tiffile):
from . import Imread
n_channels = 0
for file in bead_files:
with Imread(file) as im:
n_channels = max(n_channels, im.shape['c'])
n_channels = max(n_channels, im.shape["c"])
with IJTiffFile(tiffile) as tif:
for t, file in enumerate(bead_files):
with Imread(file) as im:
with Imread(file).with_transform() as jm:
for c in range(im.shape['c']):
tif.save(np.hstack((im(c=c, t=0).max('z'), jm(c=c, t=0).max('z'))), c, 0, t)
for c in range(im.shape["c"]):
tif.save(
np.hstack(
(im(c=c, t=0).max("z"), jm(c=c, t=0).max("z"))
),
c,
0,
t,
)
def with_drift(self, im):
""" Calculate shifts relative to the first frame
divide the sequence into groups,
compare each frame to the frame in the middle of the group and compare these middle frames to each other
"""Calculate shifts relative to the first frame
divide the sequence into groups,
compare each frame to the frame in the middle of the group and compare these middle frames to each other
"""
im = im.transpose('tzycx')
t_groups = [list(chunk) for chunk in Chunks(range(im.shape['t']), size=round(np.sqrt(im.shape['t'])))]
im = im.transpose("tzycx")
t_groups = [
list(chunk)
for chunk in Chunks(
range(im.shape["t"]), size=round(np.sqrt(im.shape["t"]))
)
]
t_keys = [int(np.round(np.mean(t_group))) for t_group in t_groups]
t_pairs = [(int(np.round(np.mean(t_group))), frame) for t_group in t_groups for frame in t_group]
t_pairs = [
(int(np.round(np.mean(t_group))), frame)
for t_group in t_groups
for frame in t_group
]
t_pairs.extend(zip(t_keys, t_keys[1:]))
fmaxz_keys = {t_key: filters.gaussian(im[t_key].max('z'), 5) for t_key in t_keys}
fmaxz_keys = {
t_key: filters.gaussian(im[t_key].max("z"), 5) for t_key in t_keys
}
def fun(t_key_t, im, fmaxz_keys):
t_key, t = t_key_t
if t_key == t:
return 0, 0
else:
fmaxz = filters.gaussian(im[t].max('z'), 5)
return Transform.register(fmaxz_keys[t_key], fmaxz, 'translation').parameters[4:]
fmaxz = filters.gaussian(im[t].max("z"), 5)
return Transform.register(
fmaxz_keys[t_key], fmaxz, "translation"
).parameters[4:]
shifts = np.array(pmap(fun, t_pairs, (im, fmaxz_keys), desc='Calculating image shifts.'))
shifts = np.array(
pmap(fun, t_pairs, (im, fmaxz_keys), desc="Calculating image shifts.")
)
shift_keys_cum = np.zeros(2)
for shift_keys, t_group in zip(np.vstack((-shifts[0], shifts[im.shape['t']:])), t_groups):
for shift_keys, t_group in zip(
np.vstack((-shifts[0], shifts[im.shape["t"] :])), t_groups
):
shift_keys_cum += shift_keys
shifts[t_group] += shift_keys_cum
for i, shift in enumerate(shifts[:im.shape['t']]):
for i, shift in enumerate(shifts[: im.shape["t"]]):
self[i] = Transform.from_shift(shift)
return self
@@ -257,9 +317,11 @@ class Transform:
if sitk is None:
self.transform = None
else:
self.transform = sitk.ReadTransform(str(Path(__file__).parent / 'transform.txt'))
self.dparameters = [0., 0., 0., 0., 0., 0.]
self.shape = [512., 512.]
self.transform = sitk.ReadTransform(
str(Path(__file__).parent / "transform.txt")
)
self.dparameters = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.shape = [512.0, 512.0]
self.origin = [255.5, 255.5]
self._last, self._inverse = None, None
@@ -274,12 +336,14 @@ class Transform:
@classmethod
def register(cls, fix, mov, kind=None):
""" kind: 'affine', 'translation', 'rigid' """
"""kind: 'affine', 'translation', 'rigid'"""
if sitk is None:
raise ImportError('SimpleElastix is not installed: '
'https://simpleelastix.readthedocs.io/GettingStarted.html')
raise ImportError(
"SimpleElastix is not installed: "
"https://simpleelastix.readthedocs.io/GettingStarted.html"
)
new = cls()
kind = kind or 'affine'
kind = kind or "affine"
new.shape = fix.shape
fix, mov = new.cast_image(fix), new.cast_image(mov)
# TODO: implement RigidTransform
@@ -290,16 +354,18 @@ class Transform:
tfilter.SetParameterMap(sitk.GetDefaultParameterMap(kind))
tfilter.Execute()
transform = tfilter.GetTransformParameterMap()[0]
if kind == 'affine':
new.parameters = [float(t) for t in transform['TransformParameters']]
new.shape = [float(t) for t in transform['Size']]
new.origin = [float(t) for t in transform['CenterOfRotationPoint']]
elif kind == 'translation':
new.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform['TransformParameters']]
new.shape = [float(t) for t in transform['Size']]
if kind == "affine":
new.parameters = [float(t) for t in transform["TransformParameters"]]
new.shape = [float(t) for t in transform["Size"]]
new.origin = [float(t) for t in transform["CenterOfRotationPoint"]]
elif kind == "translation":
new.parameters = [1.0, 0.0, 0.0, 1.0] + [
float(t) for t in transform["TransformParameters"]
]
new.shape = [float(t) for t in transform["Size"]]
new.origin = [(t - 1) / 2 for t in new.shape]
else:
raise NotImplementedError(f'{kind} tranforms not implemented (yet)')
raise NotImplementedError(f"{kind} tranforms not implemented (yet)")
new.dparameters = 6 * [np.nan]
return new
@@ -315,16 +381,35 @@ class Transform:
@classmethod
def from_file(cls, file):
with open(Path(file).with_suffix('.yml')) as f:
with open(Path(file).with_suffix(".yml")) as f:
return cls.from_dict(yamlload(f))
@classmethod
def from_dict(cls, d):
new = cls()
new.origin = [float(i) for i in d['CenterOfRotationPoint']]
new.parameters = [float(i) for i in d['TransformParameters']]
new.dparameters = [float(i) for i in d['dTransformParameters']] if 'dTransformParameters' in d else 6 * [np.nan]
new.shape = [float(i) for i in d['Size']]
new.origin = (
None
if d["CenterOfRotationPoint"] is None
else [float(i) for i in d["CenterOfRotationPoint"]]
)
new.parameters = (
(1.0, 0.0, 0.0, 1.0, 0.0, 0.0)
if d["TransformParameters"] is None
else [float(i) for i in d["TransformParameters"]]
)
new.dparameters = (
[
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) if i is None else float(i)
for i in d["dTransformParameters"]
]
if "dTransformParameters" in d
else 6 * [np.nan] and d["dTransformParameters"] is not None
)
new.shape = (
None
if d["Size"] is None
else [None if i is None else float(i) for i in d["Size"]]
)
return new
def __mul__(self, other): # TODO: take care of dmatrix
@@ -357,9 +442,13 @@ class Transform:
@property
def matrix(self):
return np.array(((*self.parameters[:2], self.parameters[4]),
(*self.parameters[2:4], self.parameters[5]),
(0, 0, 1)))
return np.array(
(
(*self.parameters[:2], self.parameters[4]),
(*self.parameters[2:4], self.parameters[5]),
(0, 0, 1),
)
)
@matrix.setter
def matrix(self, value):
@@ -368,9 +457,13 @@ class Transform:
@property
def dmatrix(self):
return np.array(((*self.dparameters[:2], self.dparameters[4]),
(*self.dparameters[2:4], self.dparameters[5]),
(0, 0, 0)))
return np.array(
(
(*self.dparameters[:2], self.dparameters[4]),
(*self.dparameters[2:4], self.dparameters[5]),
(0, 0, 0),
)
)
@dmatrix.setter
def dmatrix(self, value):
@@ -381,6 +474,8 @@ class Transform:
def parameters(self):
if self.transform is not None:
return list(self.transform.GetParameters())
else:
return [1.0, 0.0, 0.0, 1.0, 0.0, 0.0]
@parameters.setter
def parameters(self, value):
@@ -416,43 +511,62 @@ class Transform:
self.shape = shape[:2]
def asdict(self):
return {'CenterOfRotationPoint': self.origin, 'Size': self.shape, 'TransformParameters': self.parameters,
'dTransformParameters': np.nan_to_num(self.dparameters, nan=1e99).tolist()}
return {
"CenterOfRotationPoint": self.origin,
"Size": self.shape,
"TransformParameters": self.parameters,
"dTransformParameters": np.nan_to_num(self.dparameters, nan=1e99).tolist(),
}
def frame(self, im, default=0):
if self.is_unity():
return im
else:
if sitk is None:
raise ImportError('SimpleElastix is not installed: '
'https://simpleelastix.readthedocs.io/GettingStarted.html')
raise ImportError(
"SimpleElastix is not installed: "
"https://simpleelastix.readthedocs.io/GettingStarted.html"
)
dtype = im.dtype
im = im.astype('float')
intp = sitk.sitkBSpline if np.issubdtype(dtype, np.floating) else sitk.sitkNearestNeighbor
return self.cast_array(sitk.Resample(self.cast_image(im), self.transform, intp, default)).astype(dtype)
im = im.astype("float")
intp = (
sitk.sitkBSpline
if np.issubdtype(dtype, np.floating)
else sitk.sitkNearestNeighbor
)
return self.cast_array(
sitk.Resample(self.cast_image(im), self.transform, intp, default)
).astype(dtype)
def coords(self, array, columns=None):
""" Transform coordinates in 2 column numpy array,
or in pandas DataFrame or Series objects in columns ['x', 'y']
"""Transform coordinates in 2 column numpy array,
or in pandas DataFrame or Series objects in columns ['x', 'y']
"""
if self.is_unity():
return array.copy()
elif DataFrame is not None and isinstance(array, (DataFrame, Series)):
columns = columns or ['x', 'y']
columns = columns or ["x", "y"]
array = array.copy()
if isinstance(array, DataFrame):
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))
elif isinstance(array, Series):
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))[0]
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))[
0
]
return array
else: # somehow we need to use the inverse here to get the same effect as when using self.frame
return np.array([self.inverse.transform.TransformPoint(i.tolist()) for i in np.asarray(array)])
return np.array(
[
self.inverse.transform.TransformPoint(i.tolist())
for i in np.asarray(array)
]
)
def save(self, file):
""" save the parameters of the transform calculated
with affine_registration to a yaml file
"""save the parameters of the transform calculated
with affine_registration to a yaml file
"""
if not file[-3:] == 'yml':
file += '.yml'
with open(file, 'w') as f:
if not file[-3:] == "yml":
file += ".yml"
with open(file, "w") as f:
yaml.safe_dump(self.asdict(), f, default_flow_style=None)

View File

@@ -1,54 +1,56 @@
[tool.poetry]
[build-system]
requires = ["maturin>=1.8,<2.0"]
build-backend = "maturin"
[project]
name = "ndbioimage"
version = "2025.1.2"
description = "Bio image reading, metadata and some affine registration."
authors = ["W. Pomp <w.pomp@nki.nl>"]
license = "GPLv3"
authors = [
{ name = "W. Pomp", email = "w.pomp@nki.nl" }
]
license = "MIT"
readme = "README.md"
keywords = ["bioformats", "imread", "numpy", "metadata"]
include = ["transform.txt"]
repository = "https://github.com/wimpomp/ndbioimage"
exclude = ["ndbioimage/jars"]
[tool.poetry.dependencies]
python = "^3.10"
numpy = ">=1.20.0"
pandas = "*"
tifffile = "*"
czifile = "2019.7.2"
tiffwrite = ">=2024.12.1"
ome-types = ">=0.4.0"
pint = "*"
tqdm = "*"
lxml = "*"
pyyaml = "*"
parfor = ">=2025.1.0"
JPype1 = "*"
SimpleITK-SimpleElastix = [
{ version = "*", python = "<3.12" },
{ version = "*", python = ">=3.12", markers = "sys_platform != 'darwin'" },
{ version = "*", python = ">=3.12", markers = "platform_machine == 'aarch64'" },
requires-python = ">=3.10"
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Rust",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
]
dynamic = ["version"]
dependencies = [
"numpy",
"tiffwrite",
"ome-metadata >= 0.2.1",
"tqdm",
]
scikit-image = "*"
imagecodecs = "*"
xsdata = "^23" # until pydantic is up-to-date
matplotlib = { version = "*", optional = true }
scikit-video = { version = "*", optional = true }
pytest = { version = "*", optional = true }
[tool.poetry.extras]
[project.optional-dependencies]
test = ["pytest"]
write = ["matplotlib", "scikit-video"]
[tool.poetry.scripts]
[project.urls]
Repository = "https://github.com/wimpomp/ndbioimage/tree/rs"
[project.scripts]
ndbioimage = "ndbioimage:main"
[tool.pytest.ini_options]
filterwarnings = ["ignore:::(colorcet)"]
[tool.maturin]
python-source = "py"
features = ["pyo3/extension-module", "python", "gpl-formats"]
module-name = "ndbioimage.ndbioimage_rs"
exclude = ["py/ndbioimage/jassets/*", "py/ndbioimage/deps/*"]
strip = true
[tool.isort]
line_length = 119
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

251
src/axes.rs Normal file
View File

@@ -0,0 +1,251 @@
use crate::error::Error;
use crate::stats::MinMax;
use ndarray::{Array, Dimension, Ix2, SliceInfo, SliceInfoElem};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
use std::fmt::{Display, Formatter};
use std::hash::{Hash, Hasher};
use std::str::FromStr;
/// a trait to find axis indices from any object
pub trait Ax {
/// C: 0, Z: 1, T: 2, Y: 3, X: 4
fn n(&self) -> usize;
/// the indices of axes in self.axes, which always has all of CZTYX
fn pos(&self, axes: &[Axis], slice: &[SliceInfoElem]) -> Result<usize, Error>;
/// the indices of axes in self.axes, which always has all of CZTYX, but skip axes with an operation
fn pos_op(
&self,
axes: &[Axis],
slice: &[SliceInfoElem],
op_axes: &[Axis],
) -> Result<usize, Error>;
}
/// Enum for CZTYX axes or a new axis
#[derive(Clone, Copy, Debug, Eq, Ord, PartialOrd, Serialize, Deserialize)]
pub enum Axis {
C,
Z,
T,
Y,
X,
New,
}
impl Hash for Axis {
fn hash<H: Hasher>(&self, state: &mut H) {
(*self as usize).hash(state);
}
}
impl FromStr for Axis {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_uppercase().as_str() {
"C" => Ok(Axis::C),
"Z" => Ok(Axis::Z),
"T" => Ok(Axis::T),
"Y" => Ok(Axis::Y),
"X" => Ok(Axis::X),
"NEW" => Ok(Axis::New),
_ => Err(Error::InvalidAxis(s.to_string())),
}
}
}
impl Display for Axis {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let s = match self {
Axis::C => "C",
Axis::Z => "Z",
Axis::T => "T",
Axis::Y => "Y",
Axis::X => "X",
Axis::New => "N",
};
write!(f, "{}", s)
}
}
impl Ax for Axis {
fn n(&self) -> usize {
*self as usize
}
fn pos(&self, axes: &[Axis], _slice: &[SliceInfoElem]) -> Result<usize, Error> {
if let Some(pos) = axes.iter().position(|a| a == self) {
Ok(pos)
} else {
Err(Error::AxisNotFound(
format!("{:?}", self),
format!("{:?}", axes),
))
}
}
fn pos_op(
&self,
axes: &[Axis],
_slice: &[SliceInfoElem],
_op_axes: &[Axis],
) -> Result<usize, Error> {
self.pos(axes, _slice)
}
}
impl Ax for usize {
fn n(&self) -> usize {
*self
}
fn pos(&self, _axes: &[Axis], slice: &[SliceInfoElem]) -> Result<usize, Error> {
let idx: Vec<_> = slice
.iter()
.enumerate()
.filter_map(|(i, s)| if s.is_index() { None } else { Some(i) })
.collect();
Ok(idx[*self])
}
fn pos_op(
&self,
axes: &[Axis],
slice: &[SliceInfoElem],
op_axes: &[Axis],
) -> Result<usize, Error> {
let idx: Vec<_> = axes
.iter()
.zip(slice.iter())
.enumerate()
.filter_map(|(i, (ax, s))| {
if s.is_index() | op_axes.contains(ax) {
None
} else {
Some(i)
}
})
.collect();
debug_assert!(*self < idx.len(), "self: {}, idx: {:?}", self, idx);
Ok(idx[*self])
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub(crate) enum Operation {
Max,
Min,
Sum,
Mean,
}
impl Operation {
pub(crate) fn operate<T, D>(
&self,
array: Array<T, D>,
axis: usize,
) -> Result<<Array<T, D> as MinMax>::Output, Error>
where
D: Dimension,
Array<T, D>: MinMax,
{
match self {
Operation::Max => array.max(axis),
Operation::Min => array.min(axis),
Operation::Sum => array.sum(axis),
Operation::Mean => array.mean(axis),
}
}
}
impl PartialEq for Axis {
fn eq(&self, other: &Self) -> bool {
(*self as u8) == (*other as u8)
}
}
pub(crate) fn slice_info<D: Dimension>(
info: &[SliceInfoElem],
) -> Result<SliceInfo<&[SliceInfoElem], Ix2, D>, Error> {
match info.try_into() {
Ok(slice) => Ok(slice),
Err(err) => Err(Error::TryInto(err.to_string())),
}
}
#[derive(Serialize, Deserialize)]
#[serde(remote = "SliceInfoElem")]
pub(crate) enum SliceInfoElemDef {
Slice {
start: isize,
end: Option<isize>,
step: isize,
},
Index(isize),
NewAxis,
}
impl SerializeAs<SliceInfoElem> for SliceInfoElemDef {
fn serialize_as<S>(source: &SliceInfoElem, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
SliceInfoElemDef::serialize(source, serializer)
}
}
impl<'de> DeserializeAs<'de, SliceInfoElem> for SliceInfoElemDef {
fn deserialize_as<D>(deserializer: D) -> Result<SliceInfoElem, D::Error>
where
D: Deserializer<'de>,
{
SliceInfoElemDef::deserialize(deserializer)
}
}
#[derive(Clone, Debug)]
pub(crate) struct Slice {
start: isize,
end: isize,
step: isize,
}
impl Slice {
pub(crate) fn new(start: isize, end: isize, step: isize) -> Self {
Self { start, end, step }
}
pub(crate) fn empty() -> Self {
Self {
start: 0,
end: 0,
step: 1,
}
}
}
impl Iterator for Slice {
type Item = isize;
fn next(&mut self) -> Option<Self::Item> {
if self.end - self.start >= self.step {
let r = self.start;
self.start += self.step;
Some(r)
} else {
None
}
}
}
impl IntoIterator for &Slice {
type Item = isize;
type IntoIter = Slice;
fn into_iter(self) -> Self::IntoIter {
self.clone()
}
}

230
src/bioformats.rs Normal file
View File

@@ -0,0 +1,230 @@
use crate::error::Error;
use j4rs::{Instance, InvocationArg, Jvm, JvmBuilder};
use std::cell::OnceCell;
use std::rc::Rc;
thread_local! {
static JVM: OnceCell<Rc<Jvm>> = const { OnceCell::new() }
}
/// Ensure 1 jvm per thread
fn jvm() -> Rc<Jvm> {
JVM.with(|cell| {
cell.get_or_init(move || {
#[cfg(feature = "python")]
let path = crate::py::ndbioimage_file();
#[cfg(not(feature = "python"))]
let path = std::env::current_exe()
.unwrap()
.parent()
.unwrap()
.to_path_buf();
let class_path = if path.join("jassets").exists() {
path.as_path()
} else {
path.parent().unwrap()
};
if !class_path.join("jassets").exists() {
panic!(
"jassets directory does not exist in {}",
class_path.display()
);
}
Rc::new(
JvmBuilder::new()
.skip_setting_native_lib()
.with_base_path(class_path.to_str().unwrap())
.build()
.expect("Failed to build JVM"),
)
})
.clone()
})
}
pub fn download_bioformats(gpl_formats: bool) -> Result<(), Error> {
#[cfg(feature = "python")]
let path = crate::py::ndbioimage_file();
#[cfg(not(feature = "python"))]
let path = std::env::current_exe()?.parent().unwrap().to_path_buf();
let class_path = path.parent().unwrap();
let jassets = class_path.join("jassets");
if !jassets.exists() {
std::fs::create_dir_all(jassets)?;
}
println!("installing jassets in {}", class_path.display());
let jvm = JvmBuilder::new()
.skip_setting_native_lib()
.with_base_path(class_path.to_str().unwrap())
.with_maven_settings(j4rs::MavenSettings::new(vec![
j4rs::MavenArtifactRepo::from(
"openmicroscopy::https://artifacts.openmicroscopy.org/artifactory/ome.releases",
),
]))
.build()?;
jvm.deploy_artifact(&j4rs::MavenArtifact::from("ome:bioformats_package:8.3.0"))?;
if gpl_formats {
jvm.deploy_artifact(&j4rs::MavenArtifact::from("ome:formats-gpl:8.3.0"))?;
}
Ok(())
}
macro_rules! method_return {
($R:ty$(|c)?) => { Result<$R, Error> };
() => { Result<(), Error> };
}
macro_rules! method_arg {
($n:tt: $t:ty|p) => {
InvocationArg::try_from($n)?.into_primitive()?
};
($n:tt: $t:ty) => {
InvocationArg::try_from($n)?
};
}
macro_rules! method {
($name:ident, $method:expr $(,[$($n:tt: $t:ty$(|$p:tt)?),*])? $(=> $tt:ty$(|$c:tt)?)?) => {
#[allow(dead_code)]
pub(crate) fn $name(&self, $($($n: $t),*)?) -> method_return!($($tt)?) {
let args: Vec<InvocationArg> = vec![$($( method_arg!($n:$t$(|$p)?) ),*)?];
let _result = jvm().invoke(&self.0, $method, &args)?;
macro_rules! method_result {
($R:ty|c) => {
Ok(jvm().to_rust(_result)?)
};
($R:ty|d) => {
Ok(jvm().to_rust_deserialized(_result)?)
};
($R:ty) => {
Ok(_result)
};
() => {
Ok(())
};
}
method_result!($($tt$(|$c)?)?)
}
};
}
fn transmute_vec<T, U>(vec: Vec<T>) -> Vec<U> {
unsafe {
// Ensure the original vector is not dropped.
let mut v_clone = std::mem::ManuallyDrop::new(vec);
Vec::from_raw_parts(
v_clone.as_mut_ptr() as *mut U,
v_clone.len(),
v_clone.capacity(),
)
}
}
/// Wrapper around bioformats java class loci.common.DebugTools
pub struct DebugTools;
impl DebugTools {
/// set debug root level: ERROR, DEBUG, TRACE, INFO, OFF
pub fn set_root_level(level: &str) -> Result<(), Error> {
jvm().invoke_static(
"loci.common.DebugTools",
"setRootLevel",
&[InvocationArg::try_from(level)?],
)?;
Ok(())
}
}
/// Wrapper around bioformats java class loci.formats.ChannelSeparator
pub(crate) struct ChannelSeparator(Instance);
impl ChannelSeparator {
pub(crate) fn new(image_reader: &ImageReader) -> Result<Self, Error> {
let jvm = jvm();
let channel_separator = jvm.create_instance(
"loci.formats.ChannelSeparator",
&[InvocationArg::from(jvm.clone_instance(&image_reader.0)?)],
)?;
Ok(ChannelSeparator(channel_separator))
}
pub(crate) fn open_bytes(&self, index: i32) -> Result<Vec<u8>, Error> {
Ok(transmute_vec(self.open_bi8(index)?))
}
method!(open_bi8, "openBytes", [index: i32|p] => Vec<i8>|c);
method!(get_index, "getIndex", [z: i32|p, c: i32|p, t: i32|p] => i32|c);
}
/// Wrapper around bioformats java class loci.formats.ImageReader
pub struct ImageReader(Instance);
impl Drop for ImageReader {
fn drop(&mut self) {
self.close().unwrap()
}
}
impl ImageReader {
pub(crate) fn new() -> Result<Self, Error> {
let reader = jvm().create_instance("loci.formats.ImageReader", InvocationArg::empty())?;
Ok(ImageReader(reader))
}
pub(crate) fn open_bytes(&self, index: i32) -> Result<Vec<u8>, Error> {
Ok(transmute_vec(self.open_bi8(index)?))
}
pub(crate) fn ome_xml(&self) -> Result<String, Error> {
let mds = self.get_metadata_store()?;
Ok(jvm()
.chain(&mds)?
.cast("loci.formats.ome.OMEPyramidStore")?
.invoke("dumpXML", InvocationArg::empty())?
.to_rust()?)
}
method!(set_metadata_store, "setMetadataStore", [ome_data: Instance]);
method!(get_metadata_store, "getMetadataStore" => Instance);
method!(set_id, "setId", [id: &str]);
method!(set_series, "setSeries", [series: i32|p]);
method!(open_bi8, "openBytes", [index: i32|p] => Vec<i8>|c);
method!(get_size_x, "getSizeX" => i32|c);
method!(get_size_y, "getSizeY" => i32|c);
method!(get_size_c, "getSizeC" => i32|c);
method!(get_size_t, "getSizeT" => i32|c);
method!(get_size_z, "getSizeZ" => i32|c);
method!(get_pixel_type, "getPixelType" => i32|c);
method!(is_little_endian, "isLittleEndian" => bool|c);
method!(is_rgb, "isRGB" => bool|c);
method!(is_interleaved, "isInterleaved" => bool|c);
method!(get_index, "getIndex", [z: i32|p, c: i32|p, t: i32|p] => i32|c);
method!(get_rgb_channel_count, "getRGBChannelCount" => i32|c);
method!(is_indexed, "isIndexed" => bool|c);
method!(get_8bit_lookup_table, "get8BitLookupTable" => Instance);
method!(get_16bit_lookup_table, "get16BitLookupTable" => Instance);
method!(close, "close");
}
/// Wrapper around bioformats java class loci.formats.MetadataTools
pub(crate) struct MetadataTools(Instance);
impl MetadataTools {
pub(crate) fn new() -> Result<Self, Error> {
let meta_data_tools =
jvm().create_instance("loci.formats.MetadataTools", InvocationArg::empty())?;
Ok(MetadataTools(meta_data_tools))
}
method!(create_ome_xml_metadata, "createOMEXMLMetadata" => Instance);
}

207
src/colors.rs Normal file
View File

@@ -0,0 +1,207 @@
use crate::error::Error;
use lazy_static::lazy_static;
use std::collections::HashMap;
use std::fmt::Display;
use std::str::FromStr;
lazy_static! {
static ref COLORS: HashMap<String, String> = {
HashMap::from([
("b".to_string(), "#0000FF".to_string()),
("g".to_string(), "#008000".to_string()),
("r".to_string(), "#FF0000".to_string()),
("c".to_string(), "#00BFBF".to_string()),
("m".to_string(), "#BF00BF".to_string()),
("y".to_string(), "#BFBF00".to_string()),
("k".to_string(), "#000000".to_string()),
("w".to_string(), "#FFFFFF".to_string()),
("aliceblue".to_string(), "#F0F8FF".to_string()),
("antiquewhite".to_string(), "#FAEBD7".to_string()),
("aqua".to_string(), "#00FFFF".to_string()),
("aquamarine".to_string(), "#7FFFD4".to_string()),
("azure".to_string(), "#F0FFFF".to_string()),
("beige".to_string(), "#F5F5DC".to_string()),
("bisque".to_string(), "#FFE4C4".to_string()),
("black".to_string(), "#000000".to_string()),
("blanchedalmond".to_string(), "#FFEBCD".to_string()),
("blue".to_string(), "#0000FF".to_string()),
("blueviolet".to_string(), "#8A2BE2".to_string()),
("brown".to_string(), "#A52A2A".to_string()),
("burlywood".to_string(), "#DEB887".to_string()),
("cadetblue".to_string(), "#5F9EA0".to_string()),
("chartreuse".to_string(), "#7FFF00".to_string()),
("chocolate".to_string(), "#D2691E".to_string()),
("coral".to_string(), "#FF7F50".to_string()),
("cornflowerblue".to_string(), "#6495ED".to_string()),
("cornsilk".to_string(), "#FFF8DC".to_string()),
("crimson".to_string(), "#DC143C".to_string()),
("cyan".to_string(), "#00FFFF".to_string()),
("darkblue".to_string(), "#00008B".to_string()),
("darkcyan".to_string(), "#008B8B".to_string()),
("darkgoldenrod".to_string(), "#B8860B".to_string()),
("darkgray".to_string(), "#A9A9A9".to_string()),
("darkgreen".to_string(), "#006400".to_string()),
("darkgrey".to_string(), "#A9A9A9".to_string()),
("darkkhaki".to_string(), "#BDB76B".to_string()),
("darkmagenta".to_string(), "#8B008B".to_string()),
("darkolivegreen".to_string(), "#556B2F".to_string()),
("darkorange".to_string(), "#FF8C00".to_string()),
("darkorchid".to_string(), "#9932CC".to_string()),
("darkred".to_string(), "#8B0000".to_string()),
("darksalmon".to_string(), "#E9967A".to_string()),
("darkseagreen".to_string(), "#8FBC8F".to_string()),
("darkslateblue".to_string(), "#483D8B".to_string()),
("darkslategray".to_string(), "#2F4F4F".to_string()),
("darkslategrey".to_string(), "#2F4F4F".to_string()),
("darkturquoise".to_string(), "#00CED1".to_string()),
("darkviolet".to_string(), "#9400D3".to_string()),
("deeppink".to_string(), "#FF1493".to_string()),
("deepskyblue".to_string(), "#00BFFF".to_string()),
("dimgray".to_string(), "#696969".to_string()),
("dimgrey".to_string(), "#696969".to_string()),
("dodgerblue".to_string(), "#1E90FF".to_string()),
("firebrick".to_string(), "#B22222".to_string()),
("floralwhite".to_string(), "#FFFAF0".to_string()),
("forestgreen".to_string(), "#228B22".to_string()),
("fuchsia".to_string(), "#FF00FF".to_string()),
("gainsboro".to_string(), "#DCDCDC".to_string()),
("ghostwhite".to_string(), "#F8F8FF".to_string()),
("gold".to_string(), "#FFD700".to_string()),
("goldenrod".to_string(), "#DAA520".to_string()),
("gray".to_string(), "#808080".to_string()),
("green".to_string(), "#008000".to_string()),
("greenyellow".to_string(), "#ADFF2F".to_string()),
("grey".to_string(), "#808080".to_string()),
("honeydew".to_string(), "#F0FFF0".to_string()),
("hotpink".to_string(), "#FF69B4".to_string()),
("indianred".to_string(), "#CD5C5C".to_string()),
("indigo".to_string(), "#4B0082".to_string()),
("ivory".to_string(), "#FFFFF0".to_string()),
("khaki".to_string(), "#F0E68C".to_string()),
("lavender".to_string(), "#E6E6FA".to_string()),
("lavenderblush".to_string(), "#FFF0F5".to_string()),
("lawngreen".to_string(), "#7CFC00".to_string()),
("lemonchiffon".to_string(), "#FFFACD".to_string()),
("lightblue".to_string(), "#ADD8E6".to_string()),
("lightcoral".to_string(), "#F08080".to_string()),
("lightcyan".to_string(), "#E0FFFF".to_string()),
("lightgoldenrodyellow".to_string(), "#FAFAD2".to_string()),
("lightgray".to_string(), "#D3D3D3".to_string()),
("lightgreen".to_string(), "#90EE90".to_string()),
("lightgrey".to_string(), "#D3D3D3".to_string()),
("lightpink".to_string(), "#FFB6C1".to_string()),
("lightsalmon".to_string(), "#FFA07A".to_string()),
("lightseagreen".to_string(), "#20B2AA".to_string()),
("lightskyblue".to_string(), "#87CEFA".to_string()),
("lightslategray".to_string(), "#778899".to_string()),
("lightslategrey".to_string(), "#778899".to_string()),
("lightsteelblue".to_string(), "#B0C4DE".to_string()),
("lightyellow".to_string(), "#FFFFE0".to_string()),
("lime".to_string(), "#00FF00".to_string()),
("limegreen".to_string(), "#32CD32".to_string()),
("linen".to_string(), "#FAF0E6".to_string()),
("magenta".to_string(), "#FF00FF".to_string()),
("maroon".to_string(), "#800000".to_string()),
("mediumaquamarine".to_string(), "#66CDAA".to_string()),
("mediumblue".to_string(), "#0000CD".to_string()),
("mediumorchid".to_string(), "#BA55D3".to_string()),
("mediumpurple".to_string(), "#9370DB".to_string()),
("mediumseagreen".to_string(), "#3CB371".to_string()),
("mediumslateblue".to_string(), "#7B68EE".to_string()),
("mediumspringgreen".to_string(), "#00FA9A".to_string()),
("mediumturquoise".to_string(), "#48D1CC".to_string()),
("mediumvioletred".to_string(), "#C71585".to_string()),
("midnightblue".to_string(), "#191970".to_string()),
("mintcream".to_string(), "#F5FFFA".to_string()),
("mistyrose".to_string(), "#FFE4E1".to_string()),
("moccasin".to_string(), "#FFE4B5".to_string()),
("navajowhite".to_string(), "#FFDEAD".to_string()),
("navy".to_string(), "#000080".to_string()),
("oldlace".to_string(), "#FDF5E6".to_string()),
("olive".to_string(), "#808000".to_string()),
("olivedrab".to_string(), "#6B8E23".to_string()),
("orange".to_string(), "#FFA500".to_string()),
("orangered".to_string(), "#FF4500".to_string()),
("orchid".to_string(), "#DA70D6".to_string()),
("palegoldenrod".to_string(), "#EEE8AA".to_string()),
("palegreen".to_string(), "#98FB98".to_string()),
("paleturquoise".to_string(), "#AFEEEE".to_string()),
("palevioletred".to_string(), "#DB7093".to_string()),
("papayawhip".to_string(), "#FFEFD5".to_string()),
("peachpuff".to_string(), "#FFDAB9".to_string()),
("peru".to_string(), "#CD853F".to_string()),
("pink".to_string(), "#FFC0CB".to_string()),
("plum".to_string(), "#DDA0DD".to_string()),
("powderblue".to_string(), "#B0E0E6".to_string()),
("purple".to_string(), "#800080".to_string()),
("rebeccapurple".to_string(), "#663399".to_string()),
("red".to_string(), "#FF0000".to_string()),
("rosybrown".to_string(), "#BC8F8F".to_string()),
("royalblue".to_string(), "#4169E1".to_string()),
("saddlebrown".to_string(), "#8B4513".to_string()),
("salmon".to_string(), "#FA8072".to_string()),
("sandybrown".to_string(), "#F4A460".to_string()),
("seagreen".to_string(), "#2E8B57".to_string()),
("seashell".to_string(), "#FFF5EE".to_string()),
("sienna".to_string(), "#A0522D".to_string()),
("silver".to_string(), "#C0C0C0".to_string()),
("skyblue".to_string(), "#87CEEB".to_string()),
("slateblue".to_string(), "#6A5ACD".to_string()),
("slategray".to_string(), "#708090".to_string()),
("slategrey".to_string(), "#708090".to_string()),
("snow".to_string(), "#FFFAFA".to_string()),
("springgreen".to_string(), "#00FF7F".to_string()),
("steelblue".to_string(), "#4682B4".to_string()),
("tan".to_string(), "#D2B48C".to_string()),
("teal".to_string(), "#008080".to_string()),
("thistle".to_string(), "#D8BFD8".to_string()),
("tomato".to_string(), "#FF6347".to_string()),
("turquoise".to_string(), "#40E0D0".to_string()),
("violet".to_string(), "#EE82EE".to_string()),
("wheat".to_string(), "#F5DEB3".to_string()),
("white".to_string(), "#FFFFFF".to_string()),
("whitesmoke".to_string(), "#F5F5F5".to_string()),
("yellow".to_string(), "#FFFF00".to_string()),
("yellowgreen".to_string(), "#9ACD32".to_string()),
])
};
}
#[derive(Clone, Debug)]
pub struct Color {
r: u8,
g: u8,
b: u8,
}
impl FromStr for Color {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = if !s.starts_with("#") {
if let Some(s) = COLORS.get(s) {
s
} else {
return Err(Error::InvalidColor(s.to_string()));
}
} else {
s
};
let r = u8::from_str_radix(&s[1..3], 16)?;
let g = u8::from_str_radix(&s[3..5], 16)?;
let b = u8::from_str_radix(&s[5..], 16)?;
Ok(Self { r, g, b })
}
}
impl Display for Color {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "#{:02X}{:02X}{:02X}", self.r, self.g, self.b)
}
}
impl Color {
pub fn to_rgb(&self) -> Vec<u8> {
vec![self.r, self.g, self.b]
}
}

65
src/error.rs Normal file
View File

@@ -0,0 +1,65 @@
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
IO(#[from] std::io::Error),
#[error(transparent)]
Shape(#[from] ndarray::ShapeError),
#[error(transparent)]
J4rs(#[from] j4rs::errors::J4RsError),
#[error(transparent)]
Infallible(#[from] std::convert::Infallible),
#[error(transparent)]
ParseIntError(#[from] std::num::ParseIntError),
#[error(transparent)]
Ome(#[from] ome_metadata::error::Error),
#[cfg(feature = "tiff")]
#[error(transparent)]
TemplateError(#[from] indicatif::style::TemplateError),
#[cfg(feature = "tiff")]
#[error(transparent)]
TiffWrite(#[from] tiffwrite::error::Error),
#[error("invalid axis: {0}")]
InvalidAxis(String),
#[error("axis {0} not found in axes {1}")]
AxisNotFound(String, String),
#[error("conversion error: {0}")]
TryInto(String),
#[error("file already exists {0}")]
FileAlreadyExists(String),
#[error("could not download ffmpeg: {0}")]
Ffmpeg(String),
#[error("index {0} out of bounds {1}")]
OutOfBounds(isize, isize),
#[error("axis {0} has length {1}, but was not included")]
OutOfBoundsAxis(String, usize),
#[error("dimensionality mismatch: {0} != {0}")]
DimensionalityMismatch(usize, usize),
#[error("axis {0}: {1} is already operated on!")]
AxisAlreadyOperated(usize, String),
#[error("not enough free dimensions")]
NotEnoughFreeDimensions,
#[error("cannot cast {0} to {1}")]
Cast(String, String),
#[error("empty view")]
EmptyView,
#[error("invalid color: {0}")]
InvalidColor(String),
#[error("no image or pixels found")]
NoImageOrPixels,
#[error("invalid attenuation value: {0}")]
InvalidAttenuation(String),
#[error("not a valid file name")]
InvalidFileName,
#[error("unknown pixel type {0}")]
UnknownPixelType(String),
#[error("no mean")]
NoMean,
#[error("tiff is locked")]
TiffLock,
#[error("not implemented: {0}")]
NotImplemented(String),
#[error("cannot parse: {0}")]
Parse(String),
}

414
src/lib.rs Normal file
View File

@@ -0,0 +1,414 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
mod bioformats;
pub mod axes;
pub mod metadata;
#[cfg(feature = "python")]
mod py;
pub mod reader;
pub mod stats;
pub mod view;
pub mod colors;
pub mod error;
#[cfg(feature = "movie")]
pub mod movie;
#[cfg(feature = "tiff")]
pub mod tiff;
pub use bioformats::download_bioformats;
#[cfg(test)]
mod tests {
use crate::axes::Axis;
use crate::error::Error;
use crate::reader::{Frame, Reader};
use crate::stats::MinMax;
use crate::view::Item;
use downloader::{Download, Downloader};
use ndarray::{Array, Array4, Array5, NewAxis};
use ndarray::{Array2, s};
use rayon::prelude::*;
fn open(file: &str) -> Result<Reader, Error> {
let path = std::env::current_dir()?
.join("tests")
.join("files")
.join(file);
Reader::new(&path, 0)
}
#[test]
fn read_ome() -> Result<(), Box<dyn std::error::Error>> {
let path = std::env::current_dir()?.join("tests/files/ome");
std::fs::create_dir_all(&path)?;
let url =
"https://downloads.openmicroscopy.org/images/OME-TIFF/2016-06/bioformats-artificial/";
let page = reqwest::blocking::get(url)?.text()?;
let pat = regex::Regex::new(
r#"<a\s+href\s*=\s*"([^"<>]+)">[^<>]+</a>\s+\d{2}-\w{3}-\d{4}\s+\d{2}:\d{2}\s+(\d+)"#,
)?;
let mut downloads = Vec::new();
let mut files = Vec::new();
for line in page.lines() {
if let Some(cap) = pat.captures(line) {
let link = cap[1].trim().to_string();
let size = cap[2].trim().parse::<usize>()?;
if size < 10 * 1024usize.pow(2) {
if !path.join(&link).exists() {
downloads.push(Download::new(&format!("{}{}", url, link)));
}
files.push(path.join(link));
}
}
}
if !downloads.is_empty() {
let mut downloader = Downloader::builder().download_folder(&path).build()?;
downloader.download(&downloads)?;
}
let mut count = 0;
for file in files {
if let Ok(reader) = Reader::new(&file, 0) {
let _ome = reader.get_ome()?;
count += 1;
}
}
println!("count: {}", count);
assert!(count > 30);
Ok(())
}
fn get_pixel_type(file: &str) -> Result<String, Error> {
let reader = open(file)?;
Ok(format!(
"file: {}, pixel type: {:?}",
file, reader.pixel_type
))
}
fn get_frame(file: &str) -> Result<Frame, Error> {
let reader = open(file)?;
reader.get_frame(0, 0, 0)
}
#[test]
fn read_ser() -> Result<(), Error> {
let file = "Experiment-2029.czi";
let reader = open(file)?;
println!("size: {}, {}", reader.size_y, reader.size_y);
let frame = reader.get_frame(0, 0, 0)?;
if let Ok(arr) = <Frame as TryInto<Array2<i8>>>::try_into(frame) {
println!("{:?}", arr);
} else {
println!("could not convert Frame to Array<i8>");
}
Ok(())
}
#[test]
fn read_par() -> Result<(), Error> {
let files = vec!["Experiment-2029.czi", "test.tif"];
let pixel_type = files
.into_par_iter()
.map(|file| get_pixel_type(file).unwrap())
.collect::<Vec<_>>();
println!("{:?}", pixel_type);
Ok(())
}
#[test]
fn read_frame_par() -> Result<(), Error> {
let files = vec!["Experiment-2029.czi", "test.tif"];
let frames = files
.into_par_iter()
.map(|file| get_frame(file).unwrap())
.collect::<Vec<_>>();
println!("{:?}", frames);
Ok(())
}
#[test]
fn read_sequence() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
println!("reader: {:?}", reader);
let frame = reader.get_frame(0, 4, 0)?;
println!("frame: {:?}", frame);
let frame = reader.get_frame(0, 2, 0)?;
println!("frame: {:?}", frame);
Ok(())
}
#[test]
fn read_sequence1() -> Result<(), Error> {
let file = "4-Pos_001_002/img_000000000_Cy3-Cy3_filter_000.tif";
let reader = open(file)?;
println!("reader: {:?}", reader);
Ok(())
}
#[test]
fn ome_xml() -> Result<(), Error> {
let file = "Experiment-2029.czi";
let reader = open(file)?;
let xml = reader.get_ome_xml()?;
println!("{}", xml);
Ok(())
}
#[test]
fn view() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let view = reader.view();
let a = view.slice(s![0, 5, 0, .., ..])?;
let b = reader.get_frame(0, 5, 0)?;
let c: Array2<isize> = a.try_into()?;
let d: Array2<isize> = b.try_into()?;
assert_eq!(c, d);
Ok(())
}
#[test]
fn view_shape() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let view = reader.view();
let a = view.slice(s![0, ..5, 0, .., 100..200])?;
let shape = a.shape();
assert_eq!(shape, vec![5, 1024, 100]);
Ok(())
}
#[test]
fn view_new_axis() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let view = reader.view();
let a = Array5::<u8>::zeros((1, 9, 1, 1024, 1024));
let a = a.slice(s![0, ..5, 0, NewAxis, 100..200, ..]);
let v = view.slice(s![0, ..5, 0, NewAxis, 100..200, ..])?;
assert_eq!(v.shape(), a.shape());
let a = a.slice(s![NewAxis, .., .., NewAxis, .., .., NewAxis]);
let v = v.slice(s![NewAxis, .., .., NewAxis, .., .., NewAxis])?;
assert_eq!(v.shape(), a.shape());
Ok(())
}
#[test]
fn view_permute_axes() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let view = reader.view();
let s = view.shape();
let mut a = Array5::<u8>::zeros((s[0], s[1], s[2], s[3], s[4]));
assert_eq!(view.shape(), a.shape());
let b: Array5<usize> = view.clone().try_into()?;
assert_eq!(b.shape(), a.shape());
let view = view.swap_axes(Axis::C, Axis::Z)?;
a.swap_axes(0, 1);
assert_eq!(view.shape(), a.shape());
let b: Array5<usize> = view.clone().try_into()?;
assert_eq!(b.shape(), a.shape());
let view = view.permute_axes(&[Axis::X, Axis::Z, Axis::Y])?;
let a = a.permuted_axes([4, 1, 2, 0, 3]);
assert_eq!(view.shape(), a.shape());
let b: Array5<usize> = view.clone().try_into()?;
assert_eq!(b.shape(), a.shape());
Ok(())
}
macro_rules! test_max {
($($name:ident: $b:expr $(,)?)*) => {
$(
#[test]
fn $name() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let view = reader.view();
let array: Array5<usize> = view.clone().try_into()?;
let view = view.max_proj($b)?;
let a: Array4<usize> = view.clone().try_into()?;
let b = array.max($b)?;
assert_eq!(a.shape(), b.shape());
assert_eq!(a, b);
Ok(())
}
)*
};
}
test_max! {
max_c: 0
max_z: 1
max_t: 2
max_y: 3
max_x: 4
}
macro_rules! test_index {
($($name:ident: $b:expr $(,)?)*) => {
$(
#[test]
fn $name() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let view = reader.view();
let v4: Array<usize, _> = view.slice($b)?.try_into()?;
let a5: Array5<usize> = reader.view().try_into()?;
let a4 = a5.slice($b).to_owned();
assert_eq!(a4, v4);
Ok(())
}
)*
};
}
test_index! {
index_0: s![.., .., .., .., ..]
index_1: s![0, .., .., .., ..]
index_2: s![.., 0, .., .., ..]
index_3: s![.., .., 0, .., ..]
index_4: s![.., .., .., 0, ..]
index_5: s![.., .., .., .., 0]
index_6: s![0, 0, .., .., ..]
index_7: s![0, .., 0, .., ..]
index_8: s![0, .., .., 0, ..]
index_9: s![0, .., .., .., 0]
index_a: s![.., 0, 0, .., ..]
index_b: s![.., 0, .., 0, ..]
index_c: s![.., 0, .., .., 0]
index_d: s![.., .., 0, 0, ..]
index_e: s![.., .., 0, .., 0]
index_f: s![.., .., .., 0, 0]
index_g: s![0, 0, 0, .., ..]
index_h: s![0, 0, .., 0, ..]
index_i: s![0, 0, .., .., 0]
index_j: s![0, .., 0, 0, ..]
index_k: s![0, .., 0, .., 0]
index_l: s![0, .., .., 0, 0]
index_m: s![0, 0, 0, 0, ..]
index_n: s![0, 0, 0, .., 0]
index_o: s![0, 0, .., 0, 0]
index_p: s![0, .., 0, 0, 0]
index_q: s![.., 0, 0, 0, 0]
index_r: s![0, 0, 0, 0, 0]
}
#[test]
fn dyn_view() -> Result<(), Error> {
let file = "YTL1841B2-2-1_1hr_DMSO_galinduction_1/Pos0/img_000000000_mScarlet_GFP-mSc-filter_004.tif";
let reader = open(file)?;
let a = reader.view().into_dyn();
let b = a.max_proj(1)?;
let c = b.slice(s![0, 0, .., ..])?;
let d = c.as_array::<usize>()?;
assert_eq!(d.shape(), [1024, 1024]);
Ok(())
}
#[test]
fn item() -> Result<(), Error> {
let file = "1xp53-01-AP1.czi";
let reader = open(file)?;
let view = reader.view();
let a = view.slice(s![.., 0, 0, 0, 0])?;
let b = a.slice(s![0])?;
let item = b.item::<usize>()?;
assert_eq!(item, 2);
Ok(())
}
#[test]
fn slice_cztyx() -> Result<(), Error> {
let file = "1xp53-01-AP1.czi";
let reader = open(file)?;
let view = reader.view().max_proj(Axis::Z)?.into_dyn();
println!("view.axes: {:?}", view.get_axes());
println!("view.slice: {:?}", view.get_slice());
let r = view.reset_axes()?;
println!("r.axes: {:?}", r.get_axes());
println!("r.slice: {:?}", r.get_slice());
let a = view.slice_cztyx(s![0, 0, 0, .., ..])?;
println!("a.axes: {:?}", a.get_axes());
println!("a.slice: {:?}", a.get_slice());
assert_eq!(a.axes(), [Axis::Y, Axis::X]);
Ok(())
}
#[test]
fn reset_axes() -> Result<(), Error> {
let file = "1xp53-01-AP1.czi";
let reader = open(file)?;
let view = reader.view().max_proj(Axis::Z)?;
let view = view.reset_axes()?;
assert_eq!(view.axes(), [Axis::C, Axis::New, Axis::T, Axis::Y, Axis::X]);
let a = view.as_array::<f64>()?;
assert_eq!(a.ndim(), 5);
Ok(())
}
#[test]
fn reset_axes2() -> Result<(), Error> {
let file = "Experiment-2029.czi";
let reader = open(file)?;
let view = reader.view().squeeze()?;
let a = view.reset_axes()?;
assert_eq!(a.axes(), [Axis::C, Axis::Z, Axis::T, Axis::Y, Axis::X]);
Ok(())
}
#[test]
fn reset_axes3() -> Result<(), Error> {
let file = "Experiment-2029.czi";
let reader = open(file)?;
let view4 = reader.view().squeeze()?;
let view = view4.max_proj(Axis::Z)?.into_dyn();
let slice = view.slice_cztyx(s![0, .., .., .., ..])?.into_dyn();
let a = slice.as_array::<u16>()?;
assert_eq!(slice.shape(), [1, 10, 1280, 1280]);
assert_eq!(a.shape(), [1, 10, 1280, 1280]);
let r = slice.reset_axes()?;
let b = r.as_array::<u16>()?;
assert_eq!(r.shape(), [1, 1, 10, 1280, 1280]);
assert_eq!(b.shape(), [1, 1, 10, 1280, 1280]);
let q = slice.max_proj(Axis::C)?.max_proj(Axis::T)?;
let c = q.as_array::<f64>()?;
assert_eq!(q.shape(), [1, 1280, 1280]);
assert_eq!(c.shape(), [1, 1280, 1280]);
let p = q.reset_axes()?;
let d = p.as_array::<u16>()?;
println!("axes: {:?}", p.get_axes());
println!("operations: {:?}", p.get_operations());
println!("slice: {:?}", p.get_slice());
assert_eq!(p.shape(), [1, 1, 1, 1280, 1280]);
assert_eq!(d.shape(), [1, 1, 1, 1280, 1280]);
Ok(())
}
#[test]
fn max() -> Result<(), Error> {
let file = "Experiment-2029.czi";
let reader = open(file)?;
let view = reader.view();
let m = view.max_proj(Axis::T)?;
let a = m.as_array::<u16>()?;
assert_eq!(m.shape(), [2, 1, 1280, 1280]);
assert_eq!(a.shape(), [2, 1, 1280, 1280]);
let mc = view.max_proj(Axis::C)?;
let a = mc.as_array::<u16>()?;
assert_eq!(mc.shape(), [1, 10, 1280, 1280]);
assert_eq!(a.shape(), [1, 10, 1280, 1280]);
let mz = mc.max_proj(Axis::Z)?;
let a = mz.as_array::<u16>()?;
assert_eq!(mz.shape(), [10, 1280, 1280]);
assert_eq!(a.shape(), [10, 1280, 1280]);
let mt = mz.max_proj(Axis::T)?;
let a = mt.as_array::<u16>()?;
assert_eq!(mt.shape(), [1280, 1280]);
assert_eq!(a.shape(), [1280, 1280]);
Ok(())
}
}

194
src/main.rs Normal file
View File

@@ -0,0 +1,194 @@
use clap::{Parser, Subcommand};
#[cfg(feature = "movie")]
use ndarray::SliceInfoElem;
use ndbioimage::error::Error;
#[cfg(feature = "movie")]
use ndbioimage::movie::MovieOptions;
use ndbioimage::reader::{split_path_and_series, Reader};
#[cfg(feature = "tiff")]
use ndbioimage::tiff::TiffOptions;
use ndbioimage::view::View;
use std::path::PathBuf;
#[derive(Parser)]
#[command(arg_required_else_help = true, version, about, long_about = None, propagate_version = true)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Print some metadata
Info {
#[arg(value_name = "FILE", num_args(1..))]
file: Vec<PathBuf>,
},
/// save ome metadata as xml
ExtractOME {
#[arg(value_name = "FILE", num_args(1..))]
file: Vec<PathBuf>,
},
/// Save the image as tiff file
#[cfg(feature = "tiff")]
Tiff {
#[arg(value_name = "FILE", num_args(1..))]
file: Vec<PathBuf>,
#[arg(short, long, value_name = "COLOR", num_args(1..))]
colors: Vec<String>,
#[arg(short, long, value_name = "OVERWRITE")]
overwrite: bool,
},
/// Save the image as mp4 file
#[cfg(feature = "movie")]
Movie {
#[arg(value_name = "FILE", num_args(1..))]
file: Vec<PathBuf>,
#[arg(short, long, value_name = "VELOCITY", default_value = "3.6")]
velocity: f64,
#[arg(short, long, value_name = "BRIGHTNESS", num_args(1..))]
brightness: Vec<f64>,
#[arg(short, long, value_name = "SCALE", default_value = "1.0")]
scale: f64,
#[arg(short = 'C', long, value_name = "COLOR", num_args(1..))]
colors: Vec<String>,
#[arg(short, long, value_name = "OVERWRITE")]
overwrite: bool,
#[arg(short, long, value_name = "REGISTER")]
register: bool,
#[arg(short, long, value_name = "CHANNEL")]
channel: Option<isize>,
#[arg(short, long, value_name = "ZSLICE")]
zslice: Option<String>,
#[arg(short, long, value_name = "TIME")]
time: Option<String>,
#[arg(short, long, value_name = "NO-SCALE-BRIGHTNESS")]
no_scaling: bool,
},
/// Download the BioFormats jar into the correct folder
DownloadBioFormats {
#[arg(short, long, value_name = "GPL_FORMATS")]
gpl_formats: bool,
},
}
#[cfg(feature = "movie")]
fn parse_slice(s: &str) -> Result<SliceInfoElem, Error> {
let mut t = s
.trim()
.replace("..", ":")
.split(":")
.map(|i| i.parse().ok())
.collect::<Vec<Option<isize>>>();
if t.len() > 3 {
return Err(Error::Parse(s.to_string()));
}
while t.len() < 3 {
t.push(None);
}
match t[..] {
[Some(start), None, None] => Ok(SliceInfoElem::Index(start)),
[Some(start), end, None] => Ok(SliceInfoElem::Slice {
start,
end,
step: 1,
}),
[Some(start), end, Some(step)] => Ok(SliceInfoElem::Slice { start, end, step }),
[None, end, None] => Ok(SliceInfoElem::Slice {
start: 0,
end,
step: 1,
}),
[None, end, Some(step)] => Ok(SliceInfoElem::Slice {
start: 0,
end,
step,
}),
_ => Err(Error::Parse(s.to_string())),
}
}
pub(crate) fn main() -> Result<(), Error> {
let cli = Cli::parse();
match &cli.command {
Commands::Info { file } => {
for f in file {
let (path, series) = split_path_and_series(f)?;
let view = View::from_path(path, series.unwrap_or(0))?.squeeze()?;
println!("{}", view.summary()?);
}
}
Commands::ExtractOME { file } => {
for f in file {
let (path, series) = split_path_and_series(f)?;
let reader = Reader::new(&path, series.unwrap_or(0))?;
let xml = reader.get_ome_xml()?;
std::fs::write(path.with_extension("xml"), xml)?;
}
}
#[cfg(feature = "tiff")]
Commands::Tiff {
file,
colors,
overwrite,
} => {
let mut options = TiffOptions::new(true, None, colors.clone(), *overwrite)?;
options.enable_bar()?;
for f in file {
let (path, series) = split_path_and_series(f)?;
let view = View::from_path(path, series.unwrap_or(0))?;
view.save_as_tiff(f.with_extension("tiff"), &options)?;
}
}
#[cfg(feature = "movie")]
Commands::Movie {
file,
velocity: speed,
brightness,
scale,
colors,
overwrite,
register,
channel,
zslice,
time,
no_scaling,
} => {
let options = MovieOptions::new(
*speed,
brightness.to_vec(),
*scale,
colors.to_vec(),
*overwrite,
*register,
*no_scaling,
)?;
for f in file {
let (path, series) = split_path_and_series(f)?;
let view = View::from_path(path, series.unwrap_or(0))?;
let mut s = [SliceInfoElem::Slice {
start: 0,
end: None,
step: 1,
}; 5];
if let Some(channel) = channel {
s[0] = SliceInfoElem::Index(*channel);
};
if let Some(zslice) = zslice {
s[1] = parse_slice(zslice)?;
}
if let Some(time) = time {
s[2] = parse_slice(time)?;
}
view.into_dyn()
.slice(s.as_slice())?
.save_as_movie(f.with_extension("mp4"), &options)?;
}
}
Commands::DownloadBioFormats { gpl_formats } => {
ndbioimage::download_bioformats(*gpl_formats)?
}
}
Ok(())
}

363
src/metadata.rs Normal file
View File

@@ -0,0 +1,363 @@
use crate::error::Error;
use itertools::Itertools;
use ome_metadata::Ome;
use ome_metadata::ome::{
BinningType, Convert, Image, Instrument, Objective, Pixels, UnitsLength, UnitsTime,
};
impl Metadata for Ome {
fn get_instrument(&self) -> Option<&Instrument> {
let instrument_id = self.get_image()?.instrument_ref.as_ref()?.id.clone();
self.instrument
.iter()
.find(|i| i.id == instrument_id)
}
fn get_image(&self) -> Option<&Image> {
if let Some(image) = &self.image.first() {
Some(&image)
} else {
None
}
}
}
pub trait Metadata {
fn get_instrument(&self) -> Option<&Instrument>;
fn get_image(&self) -> Option<&Image>;
fn get_pixels(&self) -> Option<&Pixels> {
if let Some(image) = self.get_image() {
Some(&image.pixels)
} else {
None
}
}
fn get_objective(&self) -> Option<&Objective> {
let objective_id = self.get_image()?.objective_settings.as_ref()?.id.clone();
self.get_instrument()?
.objective
.iter()
.find(|o| o.id == objective_id)
}
fn get_tube_lens(&self) -> Option<Objective> {
Some(Objective {
manufacturer: None,
model: Some("Unknown".to_string()),
serial_number: None,
lot_number: None,
id: "TubeLens:1".to_string(),
correction: None,
immersion: None,
lens_na: None,
nominal_magnification: Some(1.0),
calibrated_magnification: None,
working_distance: None,
working_distance_unit: UnitsLength::um,
iris: None,
annotation_ref: vec![],
})
}
/// shape of the data along cztyx axes
fn shape(&self) -> Result<(usize, usize, usize, usize, usize), Error> {
if let Some(pixels) = self.get_pixels() {
Ok((
pixels.size_c as usize,
pixels.size_z as usize,
pixels.size_t as usize,
pixels.size_y as usize,
pixels.size_x as usize,
))
} else {
Err(Error::NoImageOrPixels)
}
}
/// pixel size in nm
fn pixel_size(&self) -> Result<Option<f64>, Error> {
if let Some(pixels) = self.get_pixels() {
match (pixels.physical_size_x, pixels.physical_size_y) {
(Some(x), Some(y)) => Ok(Some(
(pixels
.physical_size_x_unit
.convert(&UnitsLength::nm, x as f64)?
+ pixels
.physical_size_y_unit
.convert(&UnitsLength::nm, y as f64)?)
/ 2f64,
)),
(Some(x), None) => Ok(Some(
pixels
.physical_size_x_unit
.convert(&UnitsLength::nm, x as f64)?
.powi(2),
)),
(None, Some(y)) => Ok(Some(
pixels
.physical_size_y_unit
.convert(&UnitsLength::nm, y as f64)?
.powi(2),
)),
_ => Ok(None),
}
} else {
Ok(None)
}
}
/// distance between planes in z-stack in nm
fn delta_z(&self) -> Result<Option<f64>, Error> {
if let Some(pixels) = self.get_pixels() {
if let Some(z) = pixels.physical_size_z {
return Ok(Some(
pixels
.physical_size_z_unit
.convert(&UnitsLength::nm, z as f64)?,
));
}
}
Ok(None)
}
/// time interval in seconds for time-lapse images
fn time_interval(&self) -> Result<Option<f64>, Error> {
if let Some(pixels) = self.get_pixels() {
if let Some(t) = pixels.plane.iter().map(|p| p.the_t).max() {
if t > 0 {
let plane_a = pixels.plane
.iter()
.find(|p| (p.the_c == 0) && (p.the_z == 0) && (p.the_t == 0));
let plane_b = pixels.plane
.iter()
.find(|p| (p.the_c == 0) && (p.the_z == 0) && (p.the_t == t));
if let (Some(a), Some(b)) = (plane_a, plane_b) {
if let (Some(a_t), Some(b_t)) = (a.delta_t, b.delta_t) {
return Ok(Some(
(b.delta_t_unit.convert(&UnitsTime::s, b_t as f64)?
- a.delta_t_unit.convert(&UnitsTime::s, a_t as f64)?)
.abs()
/ (t as f64),
));
}
}
}
}
}
Ok(None)
}
/// exposure time for channel, z=0 and t=0
fn exposure_time(&self, channel: usize) -> Result<Option<f64>, Error> {
let c = channel as i32;
if let Some(pixels) = self.get_pixels() {
if let Some(p) = pixels.plane
.iter()
.find(|p| (p.the_c == c) && (p.the_z == 0) && (p.the_t == 0))
{
if let Some(t) = p.exposure_time {
return Ok(Some(p.exposure_time_unit.convert(&UnitsTime::s, t as f64)?));
}
}
}
Ok(None)
}
fn binning(&self, channel: usize) -> Option<usize> {
match self
.get_pixels()?
.channel
.get(channel)?
.detector_settings
.as_ref()?
.binning
.as_ref()?
{
BinningType::_1X1 => Some(1),
BinningType::_2X2 => Some(2),
BinningType::_4X4 => Some(4),
BinningType::_8X8 => Some(8),
BinningType::Other => None,
}
}
fn laser_wavelengths(&self, channel: usize) -> Result<Option<f64>, Error> {
if let Some(pixels) = self.get_pixels() {
if let Some(channel) = pixels.channel.get(channel) {
if let Some(w) = channel.excitation_wavelength {
return Ok(Some(
channel
.excitation_wavelength_unit
.convert(&UnitsLength::nm, w as f64)?,
));
}
}
}
Ok(None)
}
fn laser_powers(&self, channel: usize) -> Result<Option<f64>, Error> {
if let Some(pixels) = self.get_pixels() {
if let Some(channel) = pixels.channel.get(channel) {
if let Some(ls) = &channel.light_source_settings {
if let Some(a) = ls.attenuation {
return if (0. ..=1.).contains(&a) {
Ok(Some(1f64 - (a as f64)))
} else {
Err(Error::InvalidAttenuation(a.to_string()))
};
}
}
}
}
Ok(None)
}
fn objective_name(&self) -> Option<String> {
Some(self.get_objective()?.model.as_ref()?.clone())
}
fn magnification(&self) -> Option<f64> {
Some(
(self.get_objective()?.nominal_magnification? as f64)
* (self.get_tube_lens()?.nominal_magnification? as f64),
)
}
fn tube_lens_name(&self) -> Option<String> {
self.get_tube_lens()?.model.clone()
}
fn filter_set_name(&self, channel: usize) -> Option<String> {
let filter_set_id = self
.get_pixels()?
.channel
.get(channel)?
.filter_set_ref
.as_ref()?
.id
.clone();
self.get_instrument()
.as_ref()?
.filter_set
.iter()
.find(|f| f.id == filter_set_id)?
.model
.clone()
}
fn gain(&self, channel: usize) -> Option<f64> {
if let Some(pixels) = self.get_pixels() {
Some(
*pixels
.channel
.get(channel)?
.detector_settings
.as_ref()?
.gain
.as_ref()? as f64,
)
} else {
None
}
}
fn summary(&self) -> Result<String, Error> {
let size_c = if let Some(pixels) = self.get_pixels() {
pixels.channel.len()
} else {
0
};
let mut s = "".to_string();
if let Ok(Some(pixel_size)) = self.pixel_size() {
s.push_str(&format!("pixel size: {pixel_size:.2} nm\n"));
}
if let Ok(Some(delta_z)) = self.delta_z() {
s.push_str(&format!("z-interval: {delta_z:.2} nm\n"))
}
if let Ok(Some(time_interval)) = self.time_interval() {
s.push_str(&format!("time interval: {time_interval:.2} s\n"))
}
let exposure_time = (0..size_c)
.map(|c| self.exposure_time(c))
.collect::<Result<Vec<_>, Error>>()?
.into_iter()
.flatten()
.collect::<Vec<_>>();
if !exposure_time.is_empty() {
s.push_str(&format!(
"exposure time: {}\n",
exposure_time.into_iter().join(" | ")
));
}
if let Some(magnification) = self.magnification() {
s.push_str(&format!("magnification: {magnification}x\n"))
}
if let Some(objective_name) = self.objective_name() {
s.push_str(&format!("objective: {objective_name}\n"))
}
if let Some(tube_lens_name) = self.tube_lens_name() {
s.push_str(&format!("tube lens: {tube_lens_name}\n"))
}
let filter_set_name = (0..size_c)
.map(|c| self.filter_set_name(c))
.collect::<Vec<_>>()
.into_iter()
.flatten()
.collect::<Vec<_>>();
if !filter_set_name.is_empty() {
s.push_str(&format!(
"filter set: {}\n",
filter_set_name.into_iter().join(" | ")
));
}
let gain = (0..size_c)
.map(|c| self.gain(c))
.collect::<Vec<_>>()
.into_iter()
.flatten()
.collect::<Vec<_>>();
if !gain.is_empty() {
s.push_str(&format!("gain: {}\n", gain.into_iter().join(" ")));
}
let laser_wavelengths = (0..size_c)
.map(|c| self.laser_wavelengths(c))
.collect::<Result<Vec<_>, Error>>()?
.into_iter()
.flatten()
.collect::<Vec<_>>();
if !laser_wavelengths.is_empty() {
s.push_str(&format!(
"laser colors: {} nm\n",
laser_wavelengths.into_iter().join(" | ")
));
}
let laser_powers = (0..size_c)
.map(|c| self.laser_powers(c))
.collect::<Result<Vec<_>, Error>>()?
.into_iter()
.flatten()
.collect::<Vec<_>>();
if !laser_powers.is_empty() {
s.push_str(&format!(
"laser powers: {}\n",
laser_powers.into_iter().join(" | ")
));
}
let binning = (0..size_c)
.map(|c| self.binning(c))
.collect::<Vec<_>>()
.into_iter()
.flatten()
.collect::<Vec<_>>();
if !binning.is_empty() {
s.push_str(&format!(
"binning: {}\n",
binning.into_iter().join(" | ")
));
}
Ok(s.to_string())
}
}

285
src/movie.rs Normal file
View File

@@ -0,0 +1,285 @@
use crate::axes::Axis;
use crate::colors::Color;
use crate::error::Error;
use crate::reader::PixelType;
use crate::view::View;
use ffmpeg_sidecar::command::FfmpegCommand;
use ffmpeg_sidecar::download::auto_download;
use ffmpeg_sidecar::event::{FfmpegEvent, LogLevel};
use itertools::Itertools;
use ndarray::{Array2, Array3, Dimension, IxDyn, s, stack};
use ordered_float::OrderedFloat;
use std::io::Write;
use std::path::Path;
use std::thread;
pub struct MovieOptions {
velocity: f64,
brightness: Vec<f64>,
scale: f64,
colors: Option<Vec<Vec<u8>>>,
overwrite: bool,
register: bool,
no_scaling: bool,
}
impl Default for MovieOptions {
fn default() -> Self {
Self {
velocity: 3.6,
brightness: Vec::new(),
scale: 1.0,
colors: None,
overwrite: false,
register: false,
no_scaling: false,
}
}
}
impl MovieOptions {
pub fn new(
velocity: f64,
brightness: Vec<f64>,
scale: f64,
colors: Vec<String>,
overwrite: bool,
register: bool,
no_scaling: bool,
) -> Result<Self, Error> {
let colors = if colors.is_empty() {
None
} else {
let colors = colors
.iter()
.map(|c| c.parse::<Color>())
.collect::<Result<Vec<_>, Error>>()?;
Some(colors.into_iter().map(|c| c.to_rgb()).collect())
};
Ok(Self {
velocity,
brightness,
scale,
colors,
overwrite,
register,
no_scaling,
})
}
pub fn set_velocity(&mut self, velocity: f64) {
self.velocity = velocity;
}
pub fn set_brightness(&mut self, brightness: Vec<f64>) {
self.brightness = brightness;
}
pub fn set_scale(&mut self, scale: f64) {
self.scale = scale;
}
pub fn set_colors(&mut self, colors: &[String]) -> Result<(), Error> {
let colors = colors
.iter()
.map(|c| c.parse::<Color>())
.collect::<Result<Vec<_>, Error>>()?;
self.colors = Some(colors.into_iter().map(|c| c.to_rgb()).collect());
Ok(())
}
pub fn set_overwrite(&mut self, overwrite: bool) {
self.overwrite = overwrite;
}
}
fn get_ab(tyx: View<IxDyn>) -> Result<(f64, f64), Error> {
let s = tyx
.as_array::<f64>()?
.iter()
.filter_map(|&i| {
if i == 0.0 || !i.is_finite() {
None
} else {
Some(OrderedFloat::from(i))
}
})
.sorted_unstable()
.map(f64::from)
.collect::<Vec<_>>();
let n = s.len();
let mut a = s[n / 100];
let mut b = s[n - n / 100 - 1];
if a == b {
a = s[0];
b = s[n - 1];
}
if a == b {
a = 0.0;
b = 1.0;
}
Ok((a, b))
}
fn cframe(frame: Array2<f64>, color: &[u8], a: f64, b: f64) -> Array3<f64> {
let frame = ((frame - a) / (b - a)).clamp(0.0, 1.0);
let color = color
.iter()
.map(|&c| (c as f64) / 255.0)
.collect::<Vec<_>>();
let frame = color
.iter()
.map(|&c| (c * &frame).to_owned())
.collect::<Vec<Array2<f64>>>();
let view = frame.iter().map(|c| c.view()).collect::<Vec<_>>();
stack(ndarray::Axis(2), &view).unwrap()
}
impl<D> View<D>
where
D: Dimension,
{
pub fn save_as_movie<P>(&self, path: P, options: &MovieOptions) -> Result<(), Error>
where
P: AsRef<Path>,
{
if options.register {
return Err(Error::NotImplemented("register".to_string()));
}
let path = path.as_ref().to_path_buf();
if path.exists() {
if options.overwrite {
std::fs::remove_file(&path)?;
} else {
return Err(Error::FileAlreadyExists(path.display().to_string()));
}
}
let view = self.max_proj(Axis::Z)?.reset_axes()?;
let velocity = options.velocity;
let mut brightness = options.brightness.clone();
let scale = options.scale;
let shape = view.shape();
let size_c = shape[0];
let size_t = shape[2];
let size_y = shape[3];
let size_x = shape[4];
let shape_y = 2 * (((size_y as f64 * scale) / 2.).round() as usize);
let shape_x = 2 * (((size_x as f64 * scale) / 2.).round() as usize);
while brightness.len() < size_c {
brightness.push(1.0);
}
let mut colors = if let Some(colors) = options.colors.as_ref() {
colors.to_vec()
} else {
Vec::new()
};
while colors.len() < size_c {
colors.push(vec![255, 255, 255]);
}
if let Err(e) = auto_download() {
return Err(Error::Ffmpeg(e.to_string()));
}
let mut movie = FfmpegCommand::new()
.args([
"-f",
"rawvideo",
"-pix_fmt",
"rgb24",
"-s",
&format!("{size_x}x{size_y}"),
])
.input("-")
.args([
"-vcodec",
"libx264",
"-preset",
"veryslow",
"-pix_fmt",
"yuv420p",
"-r",
"7",
"-vf",
&format!("setpts={velocity}*PTS,scale={shape_x}:{shape_y}:flags=neighbor"),
])
.output(path.to_str().expect("path cannot be converted to string"))
.spawn()?;
let mut stdin = movie.take_stdin().unwrap();
let ab = if options.no_scaling {
vec![
match view.pixel_type {
PixelType::I8 => (i8::MIN as f64, i8::MAX as f64),
PixelType::U8 => (u8::MIN as f64, u8::MAX as f64),
PixelType::I16 => (i16::MIN as f64, i16::MAX as f64),
PixelType::U16 => (u16::MIN as f64, u16::MAX as f64),
PixelType::I32 => (i32::MIN as f64, i32::MAX as f64),
PixelType::U32 => (u32::MIN as f64, u32::MAX as f64),
PixelType::I64 => (i64::MIN as f64, i64::MAX as f64),
PixelType::U64 => (u64::MIN as f64, u64::MAX as f64),
_ => (0.0, 1.0),
};
view.size_c
]
} else {
(0..size_c)
.map(|c| match view.slice(s![c, .., .., .., ..]) {
Ok(slice) => get_ab(slice.into_dyn()),
Err(e) => Err(e),
})
.collect::<Result<Vec<_>, Error>>()?
};
thread::spawn(move || {
for t in 0..size_t {
let mut frame = Array3::<f64>::zeros((size_y, size_x, 3));
for c in 0..size_c {
frame = frame
+ cframe(
view.get_frame(c, 0, t).unwrap(),
&colors[c],
ab[c].0,
ab[c].1 / brightness[c],
);
}
let frame = (frame.clamp(0.0, 1.0) * 255.0).round().mapv(|i| i as u8);
let bytes: Vec<_> = frame.flatten().into_iter().collect();
stdin.write_all(&bytes).unwrap();
}
});
movie
.iter()
.map_err(|e| Error::Ffmpeg(e.to_string()))?
.for_each(|e| match e {
FfmpegEvent::Log(LogLevel::Error, e) => println!("Error: {}", e),
FfmpegEvent::Progress(p) => println!("Progress: {} / 00:00:15", p.time),
_ => {}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::reader::Reader;
#[test]
fn movie() -> Result<(), Error> {
let file = "1xp53-01-AP1.czi";
let path = std::env::current_dir()?
.join("tests")
.join("files")
.join(file);
let reader = Reader::new(&path, 0)?;
let view = reader.view();
let mut options = MovieOptions::default();
options.set_overwrite(true);
view.save_as_movie("/home/wim/tmp/movie.mp4", &options)?;
Ok(())
}
}

989
src/py.rs Normal file
View File

@@ -0,0 +1,989 @@
use crate::axes::Axis;
use crate::bioformats::download_bioformats;
use crate::error::Error;
use crate::metadata::Metadata;
use crate::reader::{PixelType, Reader};
use crate::view::{Item, View};
use itertools::Itertools;
use ndarray::{Ix0, IxDyn, SliceInfoElem};
use numpy::IntoPyArray;
use ome_metadata::Ome;
use pyo3::IntoPyObjectExt;
use pyo3::exceptions::{PyNotImplementedError, PyValueError};
use pyo3::prelude::*;
use pyo3::types::{PyEllipsis, PyInt, PyList, PySlice, PySliceMethods, PyString, PyTuple};
use serde::{Deserialize, Serialize};
use serde_json::{from_str, to_string};
use std::path::PathBuf;
use std::sync::Arc;
impl From<crate::error::Error> for PyErr {
fn from(err: crate::error::Error) -> PyErr {
PyErr::new::<PyValueError, _>(err.to_string())
}
}
#[pyclass(module = "ndbioimage.ndbioimage_rs")]
struct ViewConstructor;
#[pymethods]
impl ViewConstructor {
#[new]
fn new() -> Self {
Self
}
fn __getnewargs__<'py>(&self, py: Python<'py>) -> Bound<'py, PyTuple> {
PyTuple::empty(py)
}
#[staticmethod]
fn __call__(state: String) -> PyResult<PyView> {
if let Ok(new) = from_str(&state) {
Ok(new)
} else {
Err(PyErr::new::<PyValueError, _>(
"cannot parse state".to_string(),
))
}
}
}
#[pyclass(subclass, module = "ndbioimage.ndbioimage_rs")]
#[pyo3(name = "View")]
#[derive(Clone, Debug, Serialize, Deserialize)]
struct PyView {
view: View<IxDyn>,
dtype: PixelType,
ome: Arc<Ome>,
}
#[pymethods]
impl PyView {
/// new view on a file at path, open series #, open as dtype: (u)int(8/16/32) or float(32/64)
#[new]
#[pyo3(signature = (path, series = 0, dtype = "uint16", axes = "cztyx"))]
fn new<'py>(
py: Python<'py>,
path: Bound<'py, PyAny>,
series: usize,
dtype: &str,
axes: &str,
) -> PyResult<Self> {
if path.is_instance_of::<Self>() {
Ok(path.cast_into::<Self>()?.extract::<Self>()?)
} else {
let builtins = PyModule::import(py, "builtins")?;
let mut path = PathBuf::from(
builtins
.getattr("str")?
.call1((path,))?
.cast_into::<PyString>()?
.extract::<String>()?,
);
if path.is_dir() {
for file in path.read_dir()?.flatten() {
let p = file.path();
if file.path().is_file() && (p.extension() == Some("tif".as_ref())) {
path = p;
break;
}
}
}
let axes = axes
.chars()
.map(|a| a.to_string().parse())
.collect::<Result<Vec<Axis>, Error>>()?;
let reader = Reader::new(&path, series)?;
let view = View::new_with_axes(Arc::new(reader), axes)?;
let dtype = dtype.parse()?;
let ome = Arc::new(view.get_ome()?);
Ok(Self { view, dtype, ome })
}
}
fn squeeze<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyAny>> {
let view = self.view.squeeze()?;
if view.ndim() == 0 {
Ok(match self.dtype {
PixelType::I8 => view
.into_dimensionality::<Ix0>()?
.item::<i8>()?
.into_pyobject(py)?
.into_any(),
PixelType::U8 => view
.into_dimensionality::<Ix0>()?
.item::<u8>()?
.into_pyobject(py)?
.into_any(),
PixelType::I16 => view
.into_dimensionality::<Ix0>()?
.item::<i16>()?
.into_pyobject(py)?
.into_any(),
PixelType::U16 => view
.into_dimensionality::<Ix0>()?
.item::<u16>()?
.into_pyobject(py)?
.into_any(),
PixelType::I32 => view
.into_dimensionality::<Ix0>()?
.item::<i32>()?
.into_pyobject(py)?
.into_any(),
PixelType::U32 => view
.into_dimensionality::<Ix0>()?
.item::<u32>()?
.into_pyobject(py)?
.into_any(),
PixelType::I64 => view
.into_dimensionality::<Ix0>()?
.item::<i64>()?
.into_pyobject(py)?
.into_any(),
PixelType::U64 => view
.into_dimensionality::<Ix0>()?
.item::<u64>()?
.into_pyobject(py)?
.into_any(),
PixelType::I128 => view
.into_dimensionality::<Ix0>()?
.item::<i64>()?
.into_pyobject(py)?
.into_any(),
PixelType::U128 => view
.into_dimensionality::<Ix0>()?
.item::<u64>()?
.into_pyobject(py)?
.into_any(),
PixelType::F32 => view
.into_dimensionality::<Ix0>()?
.item::<f32>()?
.into_pyobject(py)?
.into_any(),
PixelType::F64 => view
.into_dimensionality::<Ix0>()?
.item::<f64>()?
.into_pyobject(py)?
.into_any(),
PixelType::F128 => view
.into_dimensionality::<Ix0>()?
.item::<f64>()?
.into_pyobject(py)?
.into_any(),
})
} else {
PyView {
view,
dtype: self.dtype.clone(),
ome: self.ome.clone(),
}
.into_bound_py_any(py)
}
}
/// close the file: does nothing as this is handled automatically
fn close(&self) -> PyResult<()> {
Ok(())
}
/// change the data type of the view: (u)int(8/16/32) or float(32/64)
fn as_type(&self, dtype: &str) -> PyResult<PyView> {
Ok(PyView {
view: self.view.clone(),
dtype: dtype.parse()?,
ome: self.ome.clone(),
})
}
/// change the data type of the view: (u)int(8/16/32) or float(32/64)
fn astype(&self, dtype: &str) -> PyResult<PyView> {
self.as_type(dtype)
}
/// slice the view and return a new view or a single number
fn __getitem__<'py>(
&self,
py: Python<'py>,
n: Bound<'py, PyAny>,
) -> PyResult<Bound<'py, PyAny>> {
let slice: Vec<_> = if n.is_instance_of::<PyTuple>() {
n.cast_into::<PyTuple>()?.into_iter().collect()
} else if n.is_instance_of::<PyList>() {
n.cast_into::<PyList>()?.into_iter().collect()
} else {
vec![n]
};
let mut new_slice = Vec::new();
let mut ellipsis = None;
let shape = self.view.shape();
for (i, (s, t)) in slice.iter().zip(shape.iter()).enumerate() {
if s.is_instance_of::<PyInt>() {
new_slice.push(SliceInfoElem::Index(s.cast::<PyInt>()?.extract::<isize>()?));
} else if s.is_instance_of::<PySlice>() {
let u = s.cast::<PySlice>()?.indices(*t as isize)?;
new_slice.push(SliceInfoElem::Slice {
start: u.start,
end: Some(u.stop),
step: u.step,
});
} else if s.is_instance_of::<PyEllipsis>() {
if ellipsis.is_some() {
return Err(PyErr::new::<PyValueError, _>(
"cannot have more than one ellipsis".to_string(),
));
}
let _ = ellipsis.insert(i);
} else {
return Err(PyErr::new::<PyValueError, _>(format!(
"cannot convert {:?} to slice",
s
)));
}
}
if new_slice.len() > shape.len() {
return Err(PyErr::new::<PyValueError, _>(format!(
"got more indices ({}) than dimensions ({})",
new_slice.len(),
shape.len()
)));
}
while new_slice.len() < shape.len() {
if let Some(i) = ellipsis {
new_slice.insert(
i,
SliceInfoElem::Slice {
start: 0,
end: None,
step: 1,
},
)
} else {
new_slice.push(SliceInfoElem::Slice {
start: 0,
end: None,
step: 1,
})
}
}
let view = self.view.slice(new_slice.as_slice())?;
if view.ndim() == 0 {
Ok(match self.dtype {
PixelType::I8 => view
.into_dimensionality::<Ix0>()?
.item::<i8>()?
.into_pyobject(py)?
.into_any(),
PixelType::U8 => view
.into_dimensionality::<Ix0>()?
.item::<u8>()?
.into_pyobject(py)?
.into_any(),
PixelType::I16 => view
.into_dimensionality::<Ix0>()?
.item::<i16>()?
.into_pyobject(py)?
.into_any(),
PixelType::U16 => view
.into_dimensionality::<Ix0>()?
.item::<u16>()?
.into_pyobject(py)?
.into_any(),
PixelType::I32 => view
.into_dimensionality::<Ix0>()?
.item::<i32>()?
.into_pyobject(py)?
.into_any(),
PixelType::U32 => view
.into_dimensionality::<Ix0>()?
.item::<u32>()?
.into_pyobject(py)?
.into_any(),
PixelType::F32 => view
.into_dimensionality::<Ix0>()?
.item::<f32>()?
.into_pyobject(py)?
.into_any(),
PixelType::F64 => view
.into_dimensionality::<Ix0>()?
.item::<f64>()?
.into_pyobject(py)?
.into_any(),
PixelType::I64 => view
.into_dimensionality::<Ix0>()?
.item::<i64>()?
.into_pyobject(py)?
.into_any(),
PixelType::U64 => view
.into_dimensionality::<Ix0>()?
.item::<u64>()?
.into_pyobject(py)?
.into_any(),
PixelType::I128 => view
.into_dimensionality::<Ix0>()?
.item::<i128>()?
.into_pyobject(py)?
.into_any(),
PixelType::U128 => view
.into_dimensionality::<Ix0>()?
.item::<u128>()?
.into_pyobject(py)?
.into_any(),
PixelType::F128 => view
.into_dimensionality::<Ix0>()?
.item::<f64>()?
.into_pyobject(py)?
.into_any(),
})
} else {
PyView {
view,
dtype: self.dtype.clone(),
ome: self.ome.clone(),
}
.into_bound_py_any(py)
}
}
#[pyo3(signature = (dtype = None))]
fn __array__<'py>(&self, py: Python<'py>, dtype: Option<&str>) -> PyResult<Bound<'py, PyAny>> {
if let Some(dtype) = dtype {
self.as_type(dtype)?.as_array(py)
} else {
self.as_array(py)
}
}
fn __contains__(&self, _item: Bound<PyAny>) -> PyResult<bool> {
Err(PyNotImplementedError::new_err("contains not implemented"))
}
fn __enter__<'py>(slf: PyRef<'py, Self>) -> PyResult<PyRef<'py, Self>> {
Ok(slf)
}
#[allow(unused_variables)]
#[pyo3(signature = (exc_type=None, exc_val=None, exc_tb=None))]
fn __exit__(
&self,
exc_type: Option<Bound<PyAny>>,
exc_val: Option<Bound<PyAny>>,
exc_tb: Option<Bound<PyAny>>,
) -> PyResult<()> {
self.close()
}
fn __reduce__(&self) -> PyResult<(ViewConstructor, (String,))> {
if let Ok(s) = to_string(self) {
Ok((ViewConstructor, (s,)))
} else {
Err(PyErr::new::<PyValueError, _>(
"cannot get state".to_string(),
))
}
}
fn __copy__(&self) -> Self {
Self {
view: self.view.clone(),
dtype: self.dtype.clone(),
ome: self.ome.clone(),
}
}
fn copy(&self) -> Self {
Self {
view: self.view.clone(),
dtype: self.dtype.clone(),
ome: self.ome.clone(),
}
}
fn __len__(&self) -> PyResult<usize> {
Ok(self.view.len())
}
fn __repr__(&self) -> PyResult<String> {
Ok(self.view.summary()?)
}
fn __str__(&self) -> PyResult<String> {
Ok(self.view.path.display().to_string())
}
/// retrieve a single frame at czt, sliced accordingly
fn get_frame<'py>(
&self,
py: Python<'py>,
c: isize,
z: isize,
t: isize,
) -> PyResult<Bound<'py, PyAny>> {
Ok(match self.dtype {
PixelType::I8 => self
.view
.get_frame::<i8, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::U8 => self
.view
.get_frame::<u8, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::I16 => self
.view
.get_frame::<i16, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::U16 => self
.view
.get_frame::<u16, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::I32 => self
.view
.get_frame::<i32, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::U32 => self
.view
.get_frame::<u32, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::F32 => self
.view
.get_frame::<f32, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::F64 => self
.view
.get_frame::<f64, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::I64 => self
.view
.get_frame::<i64, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::U64 => self
.view
.get_frame::<u64, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::I128 => self
.view
.get_frame::<i64, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::U128 => self
.view
.get_frame::<u64, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
PixelType::F128 => self
.view
.get_frame::<f64, _>(c, z, t)?
.into_pyarray(py)
.into_any(),
})
}
fn flatten<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyAny>> {
Ok(match self.dtype {
PixelType::I8 => self.view.flatten::<i8>()?.into_pyarray(py).into_any(),
PixelType::U8 => self.view.flatten::<u8>()?.into_pyarray(py).into_any(),
PixelType::I16 => self.view.flatten::<i16>()?.into_pyarray(py).into_any(),
PixelType::U16 => self.view.flatten::<u16>()?.into_pyarray(py).into_any(),
PixelType::I32 => self.view.flatten::<i32>()?.into_pyarray(py).into_any(),
PixelType::U32 => self.view.flatten::<u32>()?.into_pyarray(py).into_any(),
PixelType::F32 => self.view.flatten::<f32>()?.into_pyarray(py).into_any(),
PixelType::F64 => self.view.flatten::<f64>()?.into_pyarray(py).into_any(),
PixelType::I64 => self.view.flatten::<i64>()?.into_pyarray(py).into_any(),
PixelType::U64 => self.view.flatten::<u64>()?.into_pyarray(py).into_any(),
PixelType::I128 => self.view.flatten::<i64>()?.into_pyarray(py).into_any(),
PixelType::U128 => self.view.flatten::<u64>()?.into_pyarray(py).into_any(),
PixelType::F128 => self.view.flatten::<f64>()?.into_pyarray(py).into_any(),
})
}
fn to_bytes(&self) -> PyResult<Vec<u8>> {
Ok(match self.dtype {
PixelType::I8 => self.view.to_bytes::<i8>()?,
PixelType::U8 => self.view.to_bytes::<u8>()?,
PixelType::I16 => self.view.to_bytes::<i16>()?,
PixelType::U16 => self.view.to_bytes::<u16>()?,
PixelType::I32 => self.view.to_bytes::<i32>()?,
PixelType::U32 => self.view.to_bytes::<u32>()?,
PixelType::F32 => self.view.to_bytes::<f32>()?,
PixelType::F64 => self.view.to_bytes::<f64>()?,
PixelType::I64 => self.view.to_bytes::<i64>()?,
PixelType::U64 => self.view.to_bytes::<u64>()?,
PixelType::I128 => self.view.to_bytes::<i64>()?,
PixelType::U128 => self.view.to_bytes::<u64>()?,
PixelType::F128 => self.view.to_bytes::<f64>()?,
})
}
fn tobytes(&self) -> PyResult<Vec<u8>> {
self.to_bytes()
}
/// retrieve the ome metadata as an XML string
fn get_ome_xml(&self) -> PyResult<String> {
Ok(self.view.get_ome_xml()?)
}
/// the file path
#[getter]
fn path(&self) -> PyResult<String> {
Ok(self.view.path.display().to_string())
}
/// the series in the file
#[getter]
fn series(&self) -> PyResult<usize> {
Ok(self.view.series)
}
/// the axes in the view
#[getter]
fn axes(&self) -> String {
self.view.axes().iter().map(|a| format!("{:?}", a)).join("")
}
/// the shape of the view
#[getter]
fn shape(&self) -> Vec<usize> {
self.view.shape()
}
#[getter]
fn slice(&self) -> PyResult<Vec<String>> {
Ok(self
.view
.get_slice()
.iter()
.map(|s| format!("{:#?}", s))
.collect())
}
/// the number of pixels in the view
#[getter]
fn size(&self) -> usize {
self.view.size()
}
/// the number of dimensions in the view
#[getter]
fn ndim(&self) -> usize {
self.view.ndim()
}
/// find the position of an axis
#[pyo3(text_signature = "axis: str | int")]
fn get_ax(&self, axis: Bound<PyAny>) -> PyResult<usize> {
if axis.is_instance_of::<PyString>() {
let axis = axis
.cast_into::<PyString>()?
.extract::<String>()?
.parse::<Axis>()?;
self.view
.axes()
.iter()
.position(|a| *a == axis)
.ok_or_else(|| {
PyErr::new::<PyValueError, _>(format!("cannot find axis {:?}", axis))
})
} else if axis.is_instance_of::<PyInt>() {
Ok(axis.cast_into::<PyInt>()?.extract::<usize>()?)
} else {
Err(PyErr::new::<PyValueError, _>(
"cannot convert to axis".to_string(),
))
}
}
/// swap two axes
#[pyo3(text_signature = "ax0: str | int, ax1: str | int")]
fn swap_axes(&self, ax0: Bound<PyAny>, ax1: Bound<PyAny>) -> PyResult<Self> {
let ax0 = self.get_ax(ax0)?;
let ax1 = self.get_ax(ax1)?;
let view = self.view.swap_axes(ax0, ax1)?;
Ok(PyView {
view,
dtype: self.dtype.clone(),
ome: self.ome.clone(),
})
}
/// permute the order of the axes
#[pyo3(signature = (axes = None), text_signature = "axes: list[str | int] = None")]
fn transpose(&self, axes: Option<Vec<Bound<PyAny>>>) -> PyResult<Self> {
let view = if let Some(axes) = axes {
let ax = axes
.into_iter()
.map(|a| self.get_ax(a))
.collect::<Result<Vec<_>, _>>()?;
self.view.permute_axes(&ax)?
} else {
self.view.transpose()?
};
Ok(PyView {
view,
dtype: self.dtype.clone(),
ome: self.ome.clone(),
})
}
#[allow(non_snake_case)]
#[getter]
fn T(&self) -> PyResult<Self> {
self.transpose(None)
}
/// collect data into a numpy array
fn as_array<'py>(&self, py: Python<'py>) -> PyResult<Bound<'py, PyAny>> {
Ok(match self.dtype {
PixelType::I8 => self.view.as_array_dyn::<i8>()?.into_pyarray(py).into_any(),
PixelType::U8 => self.view.as_array_dyn::<u8>()?.into_pyarray(py).into_any(),
PixelType::I16 => self.view.as_array_dyn::<i16>()?.into_pyarray(py).into_any(),
PixelType::U16 => self.view.as_array_dyn::<u16>()?.into_pyarray(py).into_any(),
PixelType::I32 => self.view.as_array_dyn::<i32>()?.into_pyarray(py).into_any(),
PixelType::U32 => self.view.as_array_dyn::<u32>()?.into_pyarray(py).into_any(),
PixelType::F32 => self.view.as_array_dyn::<f32>()?.into_pyarray(py).into_any(),
PixelType::F64 => self.view.as_array_dyn::<f64>()?.into_pyarray(py).into_any(),
PixelType::I64 => self.view.as_array_dyn::<i64>()?.into_pyarray(py).into_any(),
PixelType::U64 => self.view.as_array_dyn::<u64>()?.into_pyarray(py).into_any(),
PixelType::I128 => self.view.as_array_dyn::<i64>()?.into_pyarray(py).into_any(),
PixelType::U128 => self.view.as_array_dyn::<u64>()?.into_pyarray(py).into_any(),
PixelType::F128 => self.view.as_array_dyn::<f64>()?.into_pyarray(py).into_any(),
})
}
#[getter]
fn get_dtype(&self) -> PyResult<&str> {
Ok(match self.dtype {
PixelType::I8 => "int8",
PixelType::U8 => "uint8",
PixelType::I16 => "int16",
PixelType::U16 => "uint16",
PixelType::I32 => "int32",
PixelType::U32 => "uint32",
PixelType::F32 => "float32",
PixelType::F64 => "float64",
PixelType::I64 => "int64",
PixelType::U64 => "uint64",
PixelType::I128 => "int128",
PixelType::U128 => "uint128",
PixelType::F128 => "float128",
})
}
#[setter]
fn set_dtype(&mut self, dtype: String) -> PyResult<()> {
self.dtype = dtype.parse()?;
Ok(())
}
/// get the maximum overall or along a given axis
#[allow(clippy::too_many_arguments)]
#[pyo3(signature = (axis=None, dtype=None, out=None, keepdims=false, initial=0, r#where=true), text_signature = "axis: str | int")]
fn max<'py>(
&self,
py: Python<'py>,
axis: Option<Bound<'py, PyAny>>,
dtype: Option<Bound<'py, PyAny>>,
out: Option<Bound<'py, PyAny>>,
keepdims: bool,
initial: Option<usize>,
r#where: bool,
) -> PyResult<Bound<'py, PyAny>> {
if let Some(i) = initial
&& i != 0
{
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
if dtype.is_some() || out.is_some() || keepdims || !r#where {
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
if let Some(axis) = axis {
PyView {
dtype: self.dtype.clone(),
view: self.view.max_proj(self.get_ax(axis)?)?,
ome: self.ome.clone(),
}
.into_bound_py_any(py)
} else {
Ok(match self.dtype {
PixelType::I8 => self.view.max::<i8>()?.into_pyobject(py)?.into_any(),
PixelType::U8 => self.view.max::<u8>()?.into_pyobject(py)?.into_any(),
PixelType::I16 => self.view.max::<i16>()?.into_pyobject(py)?.into_any(),
PixelType::U16 => self.view.max::<u16>()?.into_pyobject(py)?.into_any(),
PixelType::I32 => self.view.max::<i32>()?.into_pyobject(py)?.into_any(),
PixelType::U32 => self.view.max::<u32>()?.into_pyobject(py)?.into_any(),
PixelType::F32 => self.view.max::<f32>()?.into_pyobject(py)?.into_any(),
PixelType::F64 => self.view.max::<f64>()?.into_pyobject(py)?.into_any(),
PixelType::I64 => self.view.max::<i64>()?.into_pyobject(py)?.into_any(),
PixelType::U64 => self.view.max::<u64>()?.into_pyobject(py)?.into_any(),
PixelType::I128 => self.view.max::<i64>()?.into_pyobject(py)?.into_any(),
PixelType::U128 => self.view.max::<u64>()?.into_pyobject(py)?.into_any(),
PixelType::F128 => self.view.max::<f64>()?.into_pyobject(py)?.into_any(),
})
}
}
/// get the minimum overall or along a given axis
#[allow(clippy::too_many_arguments)]
#[pyo3(signature = (axis=None, dtype=None, out=None, keepdims=false, initial=0, r#where=true), text_signature = "axis: str | int")]
fn min<'py>(
&self,
py: Python<'py>,
axis: Option<Bound<'py, PyAny>>,
dtype: Option<Bound<'py, PyAny>>,
out: Option<Bound<'py, PyAny>>,
keepdims: bool,
initial: Option<usize>,
r#where: bool,
) -> PyResult<Bound<'py, PyAny>> {
if let Some(i) = initial
&& i != 0
{
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
if dtype.is_some() || out.is_some() || keepdims || !r#where {
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
if let Some(axis) = axis {
PyView {
dtype: self.dtype.clone(),
view: self.view.min_proj(self.get_ax(axis)?)?,
ome: self.ome.clone(),
}
.into_bound_py_any(py)
} else {
Ok(match self.dtype {
PixelType::I8 => self.view.min::<i8>()?.into_pyobject(py)?.into_any(),
PixelType::U8 => self.view.min::<u8>()?.into_pyobject(py)?.into_any(),
PixelType::I16 => self.view.min::<i16>()?.into_pyobject(py)?.into_any(),
PixelType::U16 => self.view.min::<u16>()?.into_pyobject(py)?.into_any(),
PixelType::I32 => self.view.min::<i32>()?.into_pyobject(py)?.into_any(),
PixelType::U32 => self.view.min::<u32>()?.into_pyobject(py)?.into_any(),
PixelType::F32 => self.view.min::<f32>()?.into_pyobject(py)?.into_any(),
PixelType::F64 => self.view.min::<f64>()?.into_pyobject(py)?.into_any(),
PixelType::I64 => self.view.min::<i64>()?.into_pyobject(py)?.into_any(),
PixelType::U64 => self.view.min::<u64>()?.into_pyobject(py)?.into_any(),
PixelType::I128 => self.view.min::<i64>()?.into_pyobject(py)?.into_any(),
PixelType::U128 => self.view.min::<u64>()?.into_pyobject(py)?.into_any(),
PixelType::F128 => self.view.min::<f64>()?.into_pyobject(py)?.into_any(),
})
}
}
#[pyo3(signature = (axis=None, dtype=None, out=None, keepdims=false, *, r#where=true), text_signature = "axis: str | int")]
fn mean<'py>(
&self,
py: Python<'py>,
axis: Option<Bound<'py, PyAny>>,
dtype: Option<Bound<'py, PyAny>>,
out: Option<Bound<'py, PyAny>>,
keepdims: bool,
r#where: bool,
) -> PyResult<Bound<'py, PyAny>> {
if dtype.is_some() || out.is_some() || keepdims || !r#where {
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
if let Some(axis) = axis {
let dtype = if let PixelType::F32 = self.dtype {
PixelType::F32
} else {
PixelType::F64
};
PyView {
dtype,
view: self.view.mean_proj(self.get_ax(axis)?)?,
ome: self.ome.clone(),
}
.into_bound_py_any(py)
} else {
Ok(match self.dtype {
PixelType::F32 => self.view.mean::<f32>()?.into_pyobject(py)?.into_any(),
_ => self.view.mean::<f64>()?.into_pyobject(py)?.into_any(),
})
}
}
/// get the sum overall or along a given axis
#[allow(clippy::too_many_arguments)]
#[pyo3(signature = (axis=None, dtype=None, out=None, keepdims=false, initial=0, r#where=true), text_signature = "axis: str | int")]
fn sum<'py>(
&self,
py: Python<'py>,
axis: Option<Bound<'py, PyAny>>,
dtype: Option<Bound<'py, PyAny>>,
out: Option<Bound<'py, PyAny>>,
keepdims: bool,
initial: Option<usize>,
r#where: bool,
) -> PyResult<Bound<'py, PyAny>> {
if let Some(i) = initial
&& i != 0
{
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
if dtype.is_some() || out.is_some() || keepdims || !r#where {
Err(Error::NotImplemented(
"arguments beyond axis are not implemented".to_string(),
))?;
}
let dtype = match self.dtype {
PixelType::I8 => PixelType::I16,
PixelType::U8 => PixelType::U16,
PixelType::I16 => PixelType::I32,
PixelType::U16 => PixelType::U32,
PixelType::I32 => PixelType::I64,
PixelType::U32 => PixelType::U64,
PixelType::F32 => PixelType::F32,
PixelType::F64 => PixelType::F64,
PixelType::I64 => PixelType::I128,
PixelType::U64 => PixelType::U128,
PixelType::I128 => PixelType::I128,
PixelType::U128 => PixelType::U128,
PixelType::F128 => PixelType::F128,
};
if let Some(axis) = axis {
PyView {
dtype,
view: self.view.sum_proj(self.get_ax(axis)?)?,
ome: self.ome.clone(),
}
.into_bound_py_any(py)
} else {
Ok(match self.dtype {
PixelType::F32 => self.view.sum::<f32>()?.into_pyobject(py)?.into_any(),
PixelType::F64 => self.view.sum::<f64>()?.into_pyobject(py)?.into_any(),
PixelType::I64 => self.view.sum::<i64>()?.into_pyobject(py)?.into_any(),
PixelType::U64 => self.view.sum::<u64>()?.into_pyobject(py)?.into_any(),
PixelType::I128 => self.view.sum::<i64>()?.into_pyobject(py)?.into_any(),
PixelType::U128 => self.view.sum::<u64>()?.into_pyobject(py)?.into_any(),
PixelType::F128 => self.view.sum::<f64>()?.into_pyobject(py)?.into_any(),
_ => self.view.sum::<i64>()?.into_pyobject(py)?.into_any(),
})
}
}
#[getter]
fn z_stack(&self) -> PyResult<bool> {
if let Some(s) = self.view.size_ax(Axis::Z) {
Ok(s > 1)
} else {
Ok(false)
}
}
#[getter]
fn time_series(&self) -> PyResult<bool> {
if let Some(s) = self.view.size_ax(Axis::T) {
Ok(s > 1)
} else {
Ok(false)
}
}
#[getter]
fn pixel_size(&self) -> PyResult<Option<f64>> {
Ok(self.ome.pixel_size()?)
}
#[getter]
fn delta_z(&self) -> PyResult<Option<f64>> {
Ok(self.ome.delta_z()?)
}
#[getter]
fn time_interval(&self) -> PyResult<Option<f64>> {
Ok(self.ome.time_interval()?)
}
fn exposure_time(&self, channel: usize) -> PyResult<Option<f64>> {
Ok(self.ome.exposure_time(channel)?)
}
fn binning(&self, channel: usize) -> Option<usize> {
self.ome.binning(channel)
}
fn laser_wavelengths(&self, channel: usize) -> PyResult<Option<f64>> {
Ok(self.ome.laser_wavelengths(channel)?)
}
fn laser_power(&self, channel: usize) -> PyResult<Option<f64>> {
Ok(self.ome.laser_powers(channel)?)
}
#[getter]
fn objective_name(&self) -> Option<String> {
self.ome.objective_name()
}
#[getter]
fn magnification(&self) -> Option<f64> {
self.ome.magnification()
}
#[getter]
fn tube_lens_name(&self) -> Option<String> {
self.ome.tube_lens_name()
}
fn filter_set_name(&self, channel: usize) -> Option<String> {
self.ome.filter_set_name(channel)
}
fn gain(&self, channel: usize) -> Option<f64> {
self.ome.gain(channel)
}
/// gives a helpful summary of the recorded experiment
fn summary(&self) -> PyResult<String> {
Ok(self.view.summary()?)
}
}
pub(crate) fn ndbioimage_file() -> PathBuf {
let file = Python::attach(|py| {
py.import("ndbioimage")
.unwrap()
.filename()
.unwrap()
.to_string()
});
PathBuf::from(file)
}
#[pyfunction]
#[pyo3(name = "download_bioformats")]
fn py_download_bioformats(gpl_formats: bool) -> PyResult<()> {
download_bioformats(gpl_formats)?;
Ok(())
}
#[pymodule]
#[pyo3(name = "ndbioimage_rs")]
fn ndbioimage_rs(m: &Bound<PyModule>) -> PyResult<()> {
m.add_class::<PyView>()?;
m.add_class::<ViewConstructor>()?;
m.add_function(wrap_pyfunction!(py_download_bioformats, m)?)?;
Ok(())
}

487
src/reader.rs Normal file
View File

@@ -0,0 +1,487 @@
use crate::axes::Axis;
use crate::bioformats;
use crate::bioformats::{DebugTools, ImageReader, MetadataTools};
use crate::error::Error;
use crate::view::View;
use ndarray::{Array2, Ix5, s};
use num::{FromPrimitive, Zero};
use ome_metadata::Ome;
use serde::{Deserialize, Serialize};
use std::any::type_name;
use std::fmt::Debug;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::Arc;
use thread_local::ThreadLocal;
pub fn split_path_and_series<P>(path: P) -> Result<(PathBuf, Option<usize>), Error>
where
P: Into<PathBuf>,
{
let path = path.into();
let file_name = path
.file_name()
.ok_or(Error::InvalidFileName)?
.to_str()
.ok_or(Error::InvalidFileName)?;
if file_name.to_lowercase().starts_with("pos") {
if let Some(series) = file_name.get(3..) {
if let Ok(series) = series.parse::<usize>() {
return Ok((path, Some(series)));
}
}
}
Ok((path, None))
}
/// Pixel types (u)int(8/16/32) or float(32/64), (u/i)(64/128) are not included in bioformats
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum PixelType {
I8,
U8,
I16,
U16,
I32,
U32,
F32,
F64,
I64,
U64,
I128,
U128,
F128,
}
impl TryFrom<i32> for PixelType {
type Error = Error;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
0 => Ok(PixelType::I8),
1 => Ok(PixelType::U8),
2 => Ok(PixelType::I16),
3 => Ok(PixelType::U16),
4 => Ok(PixelType::I32),
5 => Ok(PixelType::U32),
6 => Ok(PixelType::F32),
7 => Ok(PixelType::F64),
8 => Ok(PixelType::I64),
9 => Ok(PixelType::U64),
10 => Ok(PixelType::I128),
11 => Ok(PixelType::U128),
12 => Ok(PixelType::F128),
_ => Err(Error::UnknownPixelType(value.to_string())),
}
}
}
impl FromStr for PixelType {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"int8" | "i8" => Ok(PixelType::I8),
"uint8" | "u8" => Ok(PixelType::U8),
"int16" | "i16" => Ok(PixelType::I16),
"uint16" | "u16" => Ok(PixelType::U16),
"int32" | "i32" => Ok(PixelType::I32),
"uint32" | "u32" => Ok(PixelType::U32),
"float" | "f32" | "float32" => Ok(PixelType::F32),
"double" | "f64" | "float64" => Ok(PixelType::F64),
"int64" | "i64" => Ok(PixelType::I64),
"uint64" | "u64" => Ok(PixelType::U64),
"int128" | "i128" => Ok(PixelType::I128),
"uint128" | "u128" => Ok(PixelType::U128),
"extended" | "f128" => Ok(PixelType::F128),
_ => Err(Error::UnknownPixelType(s.to_string())),
}
}
}
/// Struct containing frame data in one of eight pixel types. Cast to `Array2<T>` using try_into.
#[allow(clippy::upper_case_acronyms)]
#[derive(Clone, Debug)]
pub enum Frame {
I8(Array2<i8>),
U8(Array2<u8>),
I16(Array2<i16>),
U16(Array2<u16>),
I32(Array2<i32>),
U32(Array2<u32>),
F32(Array2<f32>),
F64(Array2<f64>),
I64(Array2<i64>),
U64(Array2<u64>),
I128(Array2<i128>),
U128(Array2<u128>),
F128(Array2<f64>), // f128 is nightly
}
macro_rules! impl_frame_cast {
($($t:tt: $s:ident $(,)?)*) => {
$(
impl From<Array2<$t>> for Frame {
fn from(value: Array2<$t>) -> Self {
Frame::$s(value)
}
}
)*
};
}
impl_frame_cast! {
u8: U8
i8: I8
i16: I16
u16: U16
i32: I32
u32: U32
f32: F32
f64: F64
i64: I64
u64: U64
i128: I128
u128: U128
}
#[cfg(target_pointer_width = "32")]
impl_frame_cast! {
usize: UINT32
isize: INT32
}
impl<T> TryInto<Array2<T>> for Frame
where
T: FromPrimitive + Zero + 'static,
{
type Error = Error;
fn try_into(self) -> Result<Array2<T>, Self::Error> {
let mut err = Ok(());
let arr = match self {
Frame::I8(v) => v.mapv_into_any(|x| {
T::from_i8(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::U8(v) => v.mapv_into_any(|x| {
T::from_u8(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::I16(v) => v.mapv_into_any(|x| {
T::from_i16(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::U16(v) => v.mapv_into_any(|x| {
T::from_u16(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::I32(v) => v.mapv_into_any(|x| {
T::from_i32(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::U32(v) => v.mapv_into_any(|x| {
T::from_u32(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::F32(v) => v.mapv_into_any(|x| {
T::from_f32(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::F64(v) | Frame::F128(v) => v.mapv_into_any(|x| {
T::from_f64(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::I64(v) => v.mapv_into_any(|x| {
T::from_i64(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::U64(v) => v.mapv_into_any(|x| {
T::from_u64(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::I128(v) => v.mapv_into_any(|x| {
T::from_i128(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
Frame::U128(v) => v.mapv_into_any(|x| {
T::from_u128(x).unwrap_or_else(|| {
err = Err(Error::Cast(x.to_string(), type_name::<T>().to_string()));
T::zero()
})
}),
};
match err {
Err(err) => Err(err),
Ok(()) => Ok(arr),
}
}
}
/// Reader interface to file. Use get_frame to get data.
#[derive(Serialize, Deserialize)]
pub struct Reader {
#[serde(skip)]
image_reader: ThreadLocal<ImageReader>,
/// path to file
pub path: PathBuf,
/// which (if more than 1) of the series in the file to open
pub series: usize,
/// size x (horizontal)
pub size_x: usize,
/// size y (vertical)
pub size_y: usize,
/// size c (# channels)
pub size_c: usize,
/// size z (# slices)
pub size_z: usize,
/// size t (# time/frames)
pub size_t: usize,
/// pixel type ((u)int(8/16/32) or float(32/64))
pub pixel_type: PixelType,
little_endian: bool,
}
impl Deref for Reader {
type Target = ImageReader;
fn deref(&self) -> &Self::Target {
self.get_reader().unwrap()
}
}
impl Clone for Reader {
fn clone(&self) -> Self {
Reader::new(&self.path, self.series).unwrap()
}
}
impl Debug for Reader {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Reader")
.field("path", &self.path)
.field("series", &self.series)
.field("size_x", &self.size_x)
.field("size_y", &self.size_y)
.field("size_c", &self.size_c)
.field("size_z", &self.size_z)
.field("size_t", &self.size_t)
.field("pixel_type", &self.pixel_type)
.field("little_endian", &self.little_endian)
.finish()
}
}
impl Reader {
/// Create a new reader for the image file at a path, and open series #.
pub fn new<P>(path: P, series: usize) -> Result<Self, Error>
where
P: AsRef<Path>,
{
DebugTools::set_root_level("ERROR")?;
let mut reader = Reader {
image_reader: ThreadLocal::default(),
path: path.as_ref().to_path_buf(),
series,
size_x: 0,
size_y: 0,
size_c: 0,
size_z: 0,
size_t: 0,
pixel_type: PixelType::I8,
little_endian: false,
};
reader.set_reader()?;
reader.size_x = reader.get_size_x()? as usize;
reader.size_y = reader.get_size_y()? as usize;
reader.size_c = reader.get_size_c()? as usize;
reader.size_z = reader.get_size_z()? as usize;
reader.size_t = reader.get_size_t()? as usize;
reader.pixel_type = PixelType::try_from(reader.get_pixel_type()?)?;
reader.little_endian = reader.is_little_endian()?;
Ok(reader)
}
fn get_reader(&self) -> Result<&ImageReader, Error> {
self.image_reader.get_or_try(|| {
let reader = ImageReader::new()?;
let meta_data_tools = MetadataTools::new()?;
let ome_meta = meta_data_tools.create_ome_xml_metadata()?;
reader.set_metadata_store(ome_meta)?;
reader.set_id(self.path.to_str().ok_or(Error::InvalidFileName)?)?;
reader.set_series(self.series as i32)?;
Ok(reader)
})
}
pub fn set_reader(&self) -> Result<(), Error> {
self.get_reader().map(|_| ())
}
/// Get ome metadata as ome structure
pub fn get_ome(&self) -> Result<Ome, Error> {
let mut ome = self.ome_xml()?.parse::<Ome>()?;
if ome.image.len() > 1 {
ome.image = vec![ome.image[self.series].clone()];
}
Ok(ome)
}
/// Get ome metadata as xml string
pub fn get_ome_xml(&self) -> Result<String, Error> {
self.ome_xml()
}
fn deinterleave(&self, bytes: Vec<u8>, channel: usize) -> Result<Vec<u8>, Error> {
let chunk_size = match self.pixel_type {
PixelType::I8 => 1,
PixelType::U8 => 1,
PixelType::I16 => 2,
PixelType::U16 => 2,
PixelType::I32 => 4,
PixelType::U32 => 4,
PixelType::F32 => 4,
PixelType::F64 => 8,
PixelType::I64 => 8,
PixelType::U64 => 8,
PixelType::I128 => 16,
PixelType::U128 => 16,
PixelType::F128 => 8,
};
Ok(bytes
.chunks(chunk_size)
.skip(channel)
.step_by(self.size_c)
.flat_map(|a| a.to_vec())
.collect())
}
/// Retrieve fame at channel c, slize z and time t.
#[allow(clippy::if_same_then_else)]
pub fn get_frame(&self, c: usize, z: usize, t: usize) -> Result<Frame, Error> {
let bytes = if self.is_rgb()? && self.is_interleaved()? {
let index = self.get_index(z as i32, 0, t as i32)?;
self.deinterleave(self.open_bytes(index)?, c)?
} else if self.get_rgb_channel_count()? > 1 {
let channel_separator = bioformats::ChannelSeparator::new(self)?;
let index = channel_separator.get_index(z as i32, c as i32, t as i32)?;
channel_separator.open_bytes(index)?
} else if self.is_indexed()? {
let index = self.get_index(z as i32, c as i32, t as i32)?;
self.open_bytes(index)?
// TODO: apply LUT
// let _bytes_lut = match self.pixel_type {
// PixelType::INT8 | PixelType::UINT8 => {
// let _lut = self.image_reader.get_8bit_lookup_table()?;
// }
// PixelType::INT16 | PixelType::UINT16 => {
// let _lut = self.image_reader.get_16bit_lookup_table()?;
// }
// _ => {}
// };
} else {
let index = self.get_index(z as i32, c as i32, t as i32)?;
self.open_bytes(index)?
};
self.bytes_to_frame(bytes)
}
fn bytes_to_frame(&self, bytes: Vec<u8>) -> Result<Frame, Error> {
macro_rules! get_frame {
($t:tt, <$n:expr) => {
Ok(Frame::from(Array2::from_shape_vec(
(self.size_y, self.size_x),
bytes
.chunks($n)
.map(|x| $t::from_le_bytes(x.try_into().unwrap()))
.collect(),
)?))
};
($t:tt, >$n:expr) => {
Ok(Frame::from(Array2::from_shape_vec(
(self.size_y, self.size_x),
bytes
.chunks($n)
.map(|x| $t::from_be_bytes(x.try_into().unwrap()))
.collect(),
)?))
};
}
match (&self.pixel_type, self.little_endian) {
(PixelType::I8, true) => get_frame!(i8, <1),
(PixelType::U8, true) => get_frame!(u8, <1),
(PixelType::I16, true) => get_frame!(i16, <2),
(PixelType::U16, true) => get_frame!(u16, <2),
(PixelType::I32, true) => get_frame!(i32, <4),
(PixelType::U32, true) => get_frame!(u32, <4),
(PixelType::F32, true) => get_frame!(f32, <4),
(PixelType::F64, true) => get_frame!(f64, <8),
(PixelType::I64, true) => get_frame!(i64, <8),
(PixelType::U64, true) => get_frame!(u64, <8),
(PixelType::I128, true) => get_frame!(i128, <16),
(PixelType::U128, true) => get_frame!(u128, <16),
(PixelType::F128, true) => get_frame!(f64, <8),
(PixelType::I8, false) => get_frame!(i8, >1),
(PixelType::U8, false) => get_frame!(u8, >1),
(PixelType::I16, false) => get_frame!(i16, >2),
(PixelType::U16, false) => get_frame!(u16, >2),
(PixelType::I32, false) => get_frame!(i32, >4),
(PixelType::U32, false) => get_frame!(u32, >4),
(PixelType::F32, false) => get_frame!(f32, >4),
(PixelType::F64, false) => get_frame!(f64, >8),
(PixelType::I64, false) => get_frame!(i64, >8),
(PixelType::U64, false) => get_frame!(u64, >8),
(PixelType::I128, false) => get_frame!(i128, >16),
(PixelType::U128, false) => get_frame!(u128, >16),
(PixelType::F128, false) => get_frame!(f64, >8),
}
}
/// get a sliceable view on the image file
pub fn view(&self) -> View<Ix5> {
let slice = s![
0..self.size_c,
0..self.size_z,
0..self.size_t,
0..self.size_y,
0..self.size_x
];
View::new(
Arc::new(self.clone()),
slice.as_ref().to_vec(),
vec![Axis::C, Axis::Z, Axis::T, Axis::Y, Axis::X],
)
}
}
impl Drop for Reader {
fn drop(&mut self) {
if let Ok(reader) = self.get_reader() {
reader.close().unwrap();
}
}
}

205
src/stats.rs Normal file
View File

@@ -0,0 +1,205 @@
use crate::error::Error;
use ndarray::{Array, ArrayD, ArrayView, Axis, Dimension, RemoveAxis};
/// a trait to define the min, max, sum and mean operations along an axis
pub trait MinMax {
type Output;
fn max(self, axis: usize) -> Result<Self::Output, Error>;
fn min(self, axis: usize) -> Result<Self::Output, Error>;
fn sum(self, axis: usize) -> Result<Self::Output, Error>;
fn mean(self, axis: usize) -> Result<Self::Output, Error>;
}
macro_rules! impl_frame_stats_float_view {
($($t:tt),+ $(,)?) => {
$(
impl<D> MinMax for ArrayView<'_, $t, D>
where
D: Dimension + RemoveAxis,
{
type Output = Array<$t, D::Smaller>;
fn max(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| {
x.iter()
.fold($t::NEG_INFINITY, |prev, curr| prev.max(*curr))
})
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn min(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| {
x.iter()
.fold($t::INFINITY, |prev, curr| prev.min(*curr))
})
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn sum(self, axis: usize) -> Result<Self::Output, Error> {
Ok(self.sum_axis(Axis(axis)))
}
fn mean(self, axis: usize) -> Result<Self::Output, Error> {
self.mean_axis(Axis(axis)).ok_or(Error::NoMean)
}
}
)*
};
}
macro_rules! impl_frame_stats_int_view {
($($t:tt),+ $(,)?) => {
$(
impl<D> MinMax for ArrayView<'_, $t, D>
where
D: Dimension + RemoveAxis,
{
type Output = Array<$t, D::Smaller>;
fn max(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| *x.iter().max().unwrap())
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn min(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| *x.iter().min().unwrap())
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn sum(self, axis: usize) -> Result<Self::Output, Error> {
Ok(self.sum_axis(Axis(axis)))
}
fn mean(self, axis: usize) -> Result<Self::Output, Error> {
self.mean_axis(Axis(axis)).ok_or(Error::NoMean)
}
}
)*
};
}
macro_rules! impl_frame_stats_float {
($($t:tt),+ $(,)?) => {
$(
impl<D> MinMax for Array<$t, D>
where
D: Dimension + RemoveAxis,
{
type Output = Array<$t, D::Smaller>;
fn max(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| {
x.iter()
.fold($t::NEG_INFINITY, |prev, curr| prev.max(*curr))
})
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn min(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| {
x.iter()
.fold($t::INFINITY, |prev, curr| prev.min(*curr))
})
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn sum(self, axis: usize) -> Result<Self::Output, Error> {
Ok(self.sum_axis(Axis(axis)))
}
fn mean(self, axis: usize) -> Result<Self::Output, Error> {
self.mean_axis(Axis(axis)).ok_or(Error::NoMean)
}
}
)*
};
}
macro_rules! impl_frame_stats_int {
($($t:tt),+ $(,)?) => {
$(
impl<D> MinMax for Array<$t, D>
where
D: Dimension + RemoveAxis,
{
type Output = Array<$t, D::Smaller>;
fn max(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| *x.iter().max().unwrap())
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn min(self, axis: usize) -> Result<Self::Output, Error> {
let a: Vec<_> = self
.lanes(Axis(axis))
.into_iter()
.map(|x| *x.iter().min().unwrap())
.collect();
let mut shape = self.shape().to_vec();
shape.remove(axis);
Ok(ArrayD::from_shape_vec(shape, a)?.into_dimensionality()?)
}
fn sum(self, axis: usize) -> Result<Self::Output, Error> {
Ok(self.sum_axis(Axis(axis)))
}
fn mean(self, axis: usize) -> Result<Self::Output, Error> {
self.mean_axis(Axis(axis)).ok_or(Error::NoMean)
}
}
)*
};
}
impl_frame_stats_float_view!(f32, f64);
impl_frame_stats_int_view!(
u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize
);
impl_frame_stats_float!(f32, f64);
impl_frame_stats_int!(
u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize
);

193
src/tiff.rs Normal file
View File

@@ -0,0 +1,193 @@
use crate::colors::Color;
use crate::error::Error;
use crate::metadata::Metadata;
use crate::reader::PixelType;
use crate::stats::MinMax;
use crate::view::{Number, View};
use indicatif::{ProgressBar, ProgressStyle};
use itertools::iproduct;
use ndarray::{Array0, Array1, Array2, ArrayD, Dimension};
use rayon::prelude::*;
use std::path::Path;
use std::sync::{Arc, Mutex};
use tiffwrite::{Bytes, Colors, Compression, IJTiffFile};
#[derive(Clone)]
pub struct TiffOptions {
bar: Option<ProgressStyle>,
compression: Compression,
colors: Option<Vec<Vec<u8>>>,
overwrite: bool,
}
impl Default for TiffOptions {
fn default() -> Self {
Self {
bar: None,
compression: Compression::Zstd(10),
colors: None,
overwrite: false,
}
}
}
impl TiffOptions {
pub fn new(
bar: bool,
compression: Option<Compression>,
colors: Vec<String>,
overwrite: bool,
) -> Result<Self, Error> {
let mut options = Self {
bar: None,
compression: compression.unwrap_or(Compression::Zstd(10)),
colors: None,
overwrite,
};
if bar {
options.enable_bar()?;
}
if !colors.is_empty() {
options.set_colors(&colors)?;
}
Ok(options)
}
/// show a progress bar while saving tiff
pub fn enable_bar(&mut self) -> Result<(), Error> {
self.bar = Some(ProgressStyle::with_template(
"{spinner:.green} [{elapsed_precise}, {percent}%] [{wide_bar:.green/lime}] {pos:>7}/{len:7} ({eta_precise}, {per_sec:<5})",
)?.progress_chars("▰▱▱"));
Ok(())
}
/// do not show a progress bar while saving tiff
pub fn disable_bar(&mut self) {
self.bar = None;
}
/// save tiff with zstd compression (default)
pub fn set_zstd_compression(&mut self) {
self.compression = Compression::Zstd(10)
}
/// save tiff with zstd compression, choose a level between 7..=22
pub fn set_zstd_compression_level(&mut self, level: i32) {
self.compression = Compression::Zstd(level)
}
/// save tiff with deflate compression
pub fn set_deflate_compression(&mut self) {
self.compression = Compression::Deflate
}
pub fn set_colors(&mut self, colors: &[String]) -> Result<(), Error> {
let colors = colors
.iter()
.map(|c| c.parse::<Color>())
.collect::<Result<Vec<_>, Error>>()?;
self.colors = Some(colors.into_iter().map(|c| c.to_rgb()).collect());
Ok(())
}
pub fn set_overwrite(&mut self, overwrite: bool) {
self.overwrite = overwrite;
}
}
impl<D> View<D>
where
D: Dimension,
{
/// save as tiff with a certain type
pub fn save_as_tiff_with_type<T, P>(&self, path: P, options: &TiffOptions) -> Result<(), Error>
where
P: AsRef<Path>,
T: Bytes + Number + Send + Sync,
ArrayD<T>: MinMax<Output = ArrayD<T>>,
Array1<T>: MinMax<Output = Array0<T>>,
Array2<T>: MinMax<Output = Array1<T>>,
{
let path = path.as_ref().to_path_buf();
if path.exists() {
if options.overwrite {
std::fs::remove_file(&path)?;
} else {
return Err(Error::FileAlreadyExists(path.display().to_string()));
}
}
let size_c = self.size_c();
let size_z = self.size_z();
let size_t = self.size_t();
let mut tiff = IJTiffFile::new(path)?;
tiff.set_compression(options.compression.clone());
let ome = self.get_ome()?;
tiff.px_size = ome.pixel_size()?.map(|i| i / 1e3);
tiff.time_interval = ome.time_interval()?.map(|i| i / 1e3);
tiff.delta_z = ome.delta_z()?.map(|i| i / 1e3);
tiff.comment = Some(self.ome_xml()?);
if let Some(mut colors) = options.colors.clone() {
while colors.len() < self.size_c {
colors.push(vec![255, 255, 255]);
}
tiff.colors = Colors::Colors(colors);
}
let tiff = Arc::new(Mutex::new(tiff));
if let Some(style) = &options.bar {
let bar = ProgressBar::new((size_c as u64) * (size_z as u64) * (size_t as u64))
.with_style(style.clone());
iproduct!(0..size_c, 0..size_z, 0..size_t)
.collect::<Vec<_>>()
.into_par_iter()
.map(|(c, z, t)| {
if let Ok(mut tiff) = tiff.lock() {
tiff.save(&self.get_frame::<T, _>(c, z, t)?, c, z, t)?;
bar.inc(1);
Ok(())
} else {
Err(Error::TiffLock)
}
})
.collect::<Result<(), Error>>()?;
bar.finish();
} else {
iproduct!(0..size_c, 0..size_z, 0..size_t)
.collect::<Vec<_>>()
.into_par_iter()
.map(|(c, z, t)| {
if let Ok(mut tiff) = tiff.lock() {
tiff.save(&self.get_frame::<T, _>(c, z, t)?, c, z, t)?;
Ok(())
} else {
Err(Error::TiffLock)
}
})
.collect::<Result<(), Error>>()?;
};
Ok(())
}
/// save as tiff with whatever pixel type the view has
pub fn save_as_tiff<P>(&self, path: P, options: &TiffOptions) -> Result<(), Error>
where
P: AsRef<Path>,
{
match self.pixel_type {
PixelType::I8 => self.save_as_tiff_with_type::<i8, P>(path, options)?,
PixelType::U8 => self.save_as_tiff_with_type::<u8, P>(path, options)?,
PixelType::I16 => self.save_as_tiff_with_type::<i16, P>(path, options)?,
PixelType::U16 => self.save_as_tiff_with_type::<u16, P>(path, options)?,
PixelType::I32 => self.save_as_tiff_with_type::<i32, P>(path, options)?,
PixelType::U32 => self.save_as_tiff_with_type::<u32, P>(path, options)?,
PixelType::F32 => self.save_as_tiff_with_type::<f32, P>(path, options)?,
PixelType::F64 => self.save_as_tiff_with_type::<f64, P>(path, options)?,
PixelType::I64 => self.save_as_tiff_with_type::<i64, P>(path, options)?,
PixelType::U64 => self.save_as_tiff_with_type::<u64, P>(path, options)?,
PixelType::I128 => self.save_as_tiff_with_type::<i64, P>(path, options)?,
PixelType::U128 => self.save_as_tiff_with_type::<u64, P>(path, options)?,
PixelType::F128 => self.save_as_tiff_with_type::<f64, P>(path, options)?,
}
Ok(())
}
}

1133
src/view.rs Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

BIN
tests/files/test.tif Normal file

Binary file not shown.

View File

@@ -1,27 +1,25 @@
import pickle
from multiprocessing import active_children
from pathlib import Path
import pytest
from ndbioimage import Imread, ReaderNotFoundError
from ndbioimage import Imread
@pytest.mark.parametrize('file', (Path(__file__).parent / 'files').iterdir())
@pytest.mark.parametrize(
"file",
[
file
for file in (Path(__file__).parent / "files").iterdir()
if not file.suffix == ".pzl"
],
)
def test_open(file):
try:
with Imread(file) as im:
mean = im[dict(c=0, z=0, t=0)].mean()
b = pickle.dumps(im)
jm = pickle.loads(b)
assert jm[dict(c=0, z=0, t=0)].mean() == mean
v = im.view()
assert v[dict(c=0, z=0, t=0)].mean() == mean
b = pickle.dumps(v)
w = pickle.loads(b)
assert w[dict(c=0, z=0, t=0)].mean() == mean
except ReaderNotFoundError:
assert len(Imread.__subclasses__()), 'No subclasses for Imread found.'
for child in active_children():
child.kill()
with Imread(file, axes="cztyx") as im:
mean = im[0, 0, 0].mean()
b = pickle.dumps(im)
jm = pickle.loads(b)
assert jm.get_frame(0, 0, 0).mean() == mean
b = pickle.dumps(im)
jm = pickle.loads(b)
assert jm[0, 0, 0].mean() == mean

View File

@@ -1,24 +1,41 @@
import tempfile
from itertools import combinations_with_replacement
from numbers import Number
from pathlib import Path
import numpy as np
import pytest
from tiffwrite import tiffwrite
from ndbioimage import Imread
r = np.random.randint(0, 255, (64, 64, 2, 3, 4))
im = Imread(r)
a = np.array(im)
@pytest.fixture
def array():
return np.random.randint(0, 255, (64, 64, 2, 3, 4), "uint8")
@pytest.mark.parametrize('s', combinations_with_replacement(
(0, -1, 1, slice(None), slice(0, 1), slice(-1, 0), slice(1, 1)), 5))
def test_slicing(s):
s_im, s_a = im[s], a[s]
@pytest.fixture()
def image(array):
with tempfile.TemporaryDirectory() as folder:
file = Path(folder) / "test.tif"
tiffwrite(file, array, "yxczt")
with Imread(file, axes="yxczt") as im:
yield im
@pytest.mark.parametrize(
"s",
combinations_with_replacement(
(0, -1, 1, slice(None), slice(0, 1), slice(-1, 0), slice(1, 1)), 5
),
)
def test_slicing(s, image, array):
s_im, s_a = image[s], array[s]
if isinstance(s_a, Number):
assert isinstance(s_im, Number)
assert s_im == s_a
else:
assert isinstance(s_im, Imread)
assert s_im.shape == s_a.shape
assert tuple(s_im.shape) == s_a.shape
assert np.all(s_im == s_a)

View File

@@ -1,19 +1,52 @@
import tempfile
from itertools import product
from pathlib import Path
import numpy as np
import pytest
from tiffwrite import tiffwrite
from ndbioimage import Imread
r = np.random.randint(0, 255, (64, 64, 2, 3, 4))
im = Imread(r)
a = np.array(im)
@pytest.fixture
def array():
return np.random.randint(0, 255, (64, 64, 2, 3, 4), "uint16")
@pytest.mark.parametrize('fun_and_axis', product(
(np.sum, np.nansum, np.min, np.nanmin, np.max, np.nanmax, np.argmin, np.argmax,
np.mean, np.nanmean, np.var, np.nanvar, np.std, np.nanstd), (None, 0, 1, 2, 3, 4)))
def test_ufuncs(fun_and_axis):
@pytest.fixture()
def image(array):
with tempfile.TemporaryDirectory() as folder:
file = Path(folder) / "test.tif"
tiffwrite(file, array, "yxczt")
with Imread(file, axes="yxczt") as im:
yield im
@pytest.mark.parametrize(
"fun_and_axis",
product(
(
np.sum,
np.nansum,
np.min,
np.nanmin,
np.max,
np.nanmax,
np.argmin,
np.argmax,
np.mean,
np.nanmean,
np.var,
np.nanvar,
np.std,
np.nanstd,
),
(None, 0, 1, 2, 3, 4),
),
)
def test_ufuncs(fun_and_axis, image, array):
fun, axis = fun_and_axis
assert np.all(np.isclose(fun(im, axis), fun(a, axis))), \
f'function {fun.__name__} over axis {axis} does not give the correct result'
assert np.all(np.isclose(np.asarray(fun(image, axis)), fun(array, axis))), (
f"function {fun.__name__} over axis {axis} does not give the correct result"
)