diff --git a/.gitignore b/.gitignore index 54248cb..69ded17 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ /tests/files/* /poetry.lock /dist/ +/uv.lock diff --git a/README.md b/README.md index b037443..dd0057b 100644 --- a/README.md +++ b/README.md @@ -13,17 +13,19 @@ Currently, it supports imagej tif files, czi files, micromanager tif sequences a ## Installation +One of: + ``` pip install ndbioimage -``` - -### Installation with option to write mp4 or mkv: -Work in progress! Make sure ffmpeg is installed. - -``` +pip install ndbioimage[bioformats] pip install ndbioimage[write] +pip install ndbioimage[bioformats, write] ``` +- bioformats: use [bio-formats](https://www.openmicroscopy.org/bio-formats/) +as fallback when other readers cannot open a file. +- write: write an image file into a mp4 or mkv file. Work in progress! Make sure ffmpeg is installed. + ## Usage ### Python diff --git a/ndbioimage/__init__.py b/ndbioimage/__init__.py index 63ea13d..47cdf77 100755 --- a/ndbioimage/__init__.py +++ b/ndbioimage/__init__.py @@ -30,19 +30,19 @@ from .transforms import Transform, Transforms # noqa: F401 try: __version__ = version(Path(__file__).parent.name) except Exception: # noqa - __version__ = 'unknown' + __version__ = "unknown" try: - with open(Path(__file__).parent.parent / '.git' / 'HEAD') as g: - head = g.read().split(':')[1].strip() - with open(Path(__file__).parent.parent / '.git' / head) as h: - __git_commit_hash__ = h.read().rstrip('\n') + with open(Path(__file__).parent.parent / ".git" / "HEAD") as g: + head = g.read().split(":")[1].strip() + with open(Path(__file__).parent.parent / ".git" / head) as h: + __git_commit_hash__ = h.read().rstrip("\n") except Exception: # noqa - __git_commit_hash__ = 'unknown' + __git_commit_hash__ = "unknown" -ureg.default_format = '~P' +ureg.default_format = "~P" set_application_registry(ureg) -warnings.filterwarnings('ignore', 'Reference to unknown ID') +warnings.filterwarnings("ignore", "Reference to unknown ID") Number = int | float | np.integer | np.floating @@ -51,13 +51,14 @@ class ReaderNotFoundError(Exception): class TransformTiff(IJTiffParallel): - """ transform frames in a parallel process to speed up saving """ + """transform frames in a parallel process to speed up saving""" + def __init__(self, image: Imread, *args: Any, **kwargs: Any) -> None: self.image = image super().__init__(*args, **kwargs) def parallel(self, frame: tuple[int, int, int]) -> tuple[FrameInfo]: - return (np.asarray(self.image(*frame)), 0, 0, 0), + return ((np.asarray(self.image(*frame)), 0, 0, 0),) class DequeDict(OrderedDict): @@ -88,7 +89,7 @@ def find(obj: Sequence[Any], **kwargs: Any) -> Any: pass -R = TypeVar('R') +R = TypeVar("R") def try_default(fun: Callable[..., R], default: Any, *args: Any, **kwargs: Any) -> R: @@ -100,13 +101,14 @@ def try_default(fun: Callable[..., R], default: Any, *args: Any, **kwargs: Any) def bioformats_ome(path: str | Path) -> OME: from .readers.bfread import jars + try: jvm = JVM(jars) # noqa ome_meta = jvm.metadata_tools.createOMEXMLMetadata() reader = jvm.image_reader() reader.setMetadataStore(ome_meta) reader.setId(str(path)) - ome = from_xml(str(ome_meta.dumpXML()), parser='lxml') + ome = from_xml(str(ome_meta.dumpXML()), parser="lxml") except Exception: # noqa print_exc() ome = model.OME() @@ -116,7 +118,7 @@ def bioformats_ome(path: str | Path) -> OME: class Shape(tuple): - def __new__(cls, shape: Sequence[int] | Shape, axes: str = 'yxczt') -> Shape: + def __new__(cls, shape: Sequence[int] | Shape, axes: str = "yxczt") -> Shape: if isinstance(shape, Shape): axes = shape.axes # type: ignore new = super().__new__(cls, shape) @@ -133,11 +135,11 @@ class Shape(tuple): @cached_property def yxczt(self) -> tuple[int, int, int, int, int]: - return tuple(self[i] for i in 'yxczt') # type: ignore + return tuple(self[i] for i in "yxczt") # type: ignore class OmeCache(DequeDict): - """ prevent (potentially expensive) rereading of ome data by caching """ + """prevent (potentially expensive) rereading of ome data by caching""" instance = None @@ -173,8 +175,11 @@ class OmeCache(DequeDict): @staticmethod def path_and_lstat(path: str | Path) -> tuple[Path, Optional[os.stat_result], Optional[os.stat_result]]: path = Path(path) - return (path, (path.lstat() if path.exists() else None), - (path.with_suffix('.ome.xml').lstat() if path.with_suffix('.ome.xml').exists() else None)) + return ( + path, + (path.lstat() if path.exists() else None), + (path.with_suffix(".ome.xml").lstat() if path.with_suffix(".ome.xml").exists() else None), + ) def get_positions(path: str | Path) -> Optional[list[int]]: @@ -183,51 +188,51 @@ def get_positions(path: str | Path) -> Optional[list[int]]: class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): - """ class to read image files, while taking good care of important metadata, - currently optimized for .czi files, but can open anything that bioformats can handle - path: path to the image file - optional: - axes: order of axes, default: cztyx, but omitting any axes with lenght 1 - dtype: datatype to be used when returning frames + """class to read image files, while taking good care of important metadata, + currently optimized for .czi files, but can open anything that bioformats can handle + path: path to the image file + optional: + axes: order of axes, default: cztyx, but omitting any axes with lenght 1 + dtype: datatype to be used when returning frames - modify images on the fly with a decorator function: - define a function which takes an instance of this object, one image frame, - and the coordinates c, z, t as arguments, and one image frame as return - >> imread.frame_decorator = fun - then use imread as usually + modify images on the fly with a decorator function: + define a function which takes an instance of this object, one image frame, + and the coordinates c, z, t as arguments, and one image frame as return + >> imread.frame_decorator = fun + then use imread as usually - Examples: - >> im = Imread('/path/to/file.image', axes='czt) - >> im - << shows summary - >> im.shape - << (15, 26, 1000, 1000) - >> im.axes - << 'ztyx' - >> plt.imshow(im[1, 0]) - << plots frame at position z=1, t=0 (python type indexing) - >> plt.imshow(im[:, 0].max('z')) - << plots max-z projection at t=0 - >> im.pxsize_um - << 0.09708737864077668 image-plane pixel size in um - >> im.laserwavelengths - << [642, 488] - >> im.laserpowers - << [0.02, 0.0005] in % + Examples: + >> im = Imread('/path/to/file.image', axes='czt) + >> im + << shows summary + >> im.shape + << (15, 26, 1000, 1000) + >> im.axes + << 'ztyx' + >> plt.imshow(im[1, 0]) + << plots frame at position z=1, t=0 (python type indexing) + >> plt.imshow(im[:, 0].max('z')) + << plots max-z projection at t=0 + >> im.pxsize_um + << 0.09708737864077668 image-plane pixel size in um + >> im.laserwavelengths + << [642, 488] + >> im.laserpowers + << [0.02, 0.0005] in % - See __init__ and other functions for more ideas. + See __init__ and other functions for more ideas. - Subclassing: - Subclass AbstractReader to add more file types. A subclass should always have at least the following - methods: - staticmethod _can_open(path): returns True when the subclass can open the image in path - __frame__(self, c, z, t): this should return a single frame at channel c, slice z and time t - optional open(self): code to be run during initialization, e.g. to open a file handle - optional close(self): close the file in a proper way - optional class field priority: subclasses with lower priority will be tried first, default = 99 - optional get_ome(self) -> OME: return an OME structure with metadata, - if not present bioformats will be used to generate an OME - Any other method can be overridden as needed + Subclassing: + Subclass AbstractReader to add more file types. A subclass should always have at least the following + methods: + staticmethod _can_open(path): returns True when the subclass can open the image in path + __frame__(self, c, z, t): this should return a single frame at channel c, slice z and time t + optional open(self): code to be run during initialization, e.g. to open a file handle + optional close(self): close the file in a proper way + optional class field priority: subclasses with lower priority will be tried first, default = 99 + optional get_ome(self) -> OME: return an OME structure with metadata, + if not present bioformats will be used to generate an OME + Any other method can be overridden as needed """ isclosed: Optional[bool] @@ -255,18 +260,25 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): @staticmethod def get_subclass(path: Path | str | Any): if len(AbstractReader.__subclasses__()) == 0: - raise Exception('Restart python kernel please!') + raise Exception("Restart python kernel please!") path, _ = AbstractReader.split_path_series(path) for subclass in sorted(AbstractReader.__subclasses__(), key=lambda subclass_: subclass_.priority): if subclass._can_open(path): # noqa - do_not_pickle = (AbstractReader.do_not_pickle,) if isinstance(AbstractReader.do_not_pickle, str) \ + do_not_pickle = ( + (AbstractReader.do_not_pickle,) + if isinstance(AbstractReader.do_not_pickle, str) else AbstractReader.do_not_pickle - subclass_do_not_pickle = (subclass.do_not_pickle,) if isinstance(subclass.do_not_pickle, str) \ - else subclass.do_not_pickle if hasattr(subclass, 'do_not_pickle') else () + ) + subclass_do_not_pickle = ( + (subclass.do_not_pickle,) + if isinstance(subclass.do_not_pickle, str) + else subclass.do_not_pickle + if hasattr(subclass, "do_not_pickle") + else () + ) subclass.do_not_pickle = set(do_not_pickle).union(set(subclass_do_not_pickle)) return subclass - raise ReaderNotFoundError(f'No reader found for {path}.') - + raise ReaderNotFoundError(f"No reader found for {path}.") def __new__(cls, path: Path | str | Imread | Any = None, dtype: DTypeLike = None, axes: str = None) -> Imread: if cls is not Imread: @@ -274,20 +286,29 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): if isinstance(path, Imread): return path subclass = cls.get_subclass(path) - do_not_pickle = (AbstractReader.do_not_pickle,) if isinstance(AbstractReader.do_not_pickle, str) \ + do_not_pickle = ( + (AbstractReader.do_not_pickle,) + if isinstance(AbstractReader.do_not_pickle, str) else AbstractReader.do_not_pickle - subclass_do_not_pickle = (subclass.do_not_pickle,) if isinstance(subclass.do_not_pickle, str) \ - else subclass.do_not_pickle if hasattr(subclass, 'do_not_pickle') else () + ) + subclass_do_not_pickle = ( + (subclass.do_not_pickle,) + if isinstance(subclass.do_not_pickle, str) + else subclass.do_not_pickle + if hasattr(subclass, "do_not_pickle") + else () + ) subclass.do_not_pickle = set(do_not_pickle).union(set(subclass_do_not_pickle)) return super().__new__(subclass) def __init__(self, *args: Any, **kwargs: Any): - def parse(base: Imread = None, # noqa - slice: tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] = None, # noqa - shape: tuple[int, ...] = (0, 0, 0, 0, 0), # noqa - dtype: DTypeLike = None, # noqa - frame_decorator: Callable[[Imread, np.ndarray, int, int, int], np.ndarray] = None # noqa - ) -> tuple[Any, ...]: + def parse( + base: Imread = None, # noqa + slice: tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] = None, # noqa + shape: tuple[int, ...] = (0, 0, 0, 0, 0), # noqa + dtype: DTypeLike = None, # noqa + frame_decorator: Callable[[Imread, np.ndarray, int, int, int], np.ndarray] = None, # noqa + ) -> tuple[Any, ...]: return base, slice, shape, dtype, frame_decorator base, slice, shape, dtype, frame_decorator = parse(*args, **kwargs) # noqa @@ -297,11 +318,18 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): self.dtype = dtype self.frame_decorator = frame_decorator self.transform = Transforms() - self.flags = dict(C_CONTIGUOUS=False, F_CONTIGUOUS=False, OWNDATA=False, WRITEABLE=False, - ALIGNED=False, WRITEBACKIFCOPY=False, UPDATEIFCOPY=False) + self.flags = dict( + C_CONTIGUOUS=False, + F_CONTIGUOUS=False, + OWNDATA=False, + WRITEABLE=False, + ALIGNED=False, + WRITEBACKIFCOPY=False, + UPDATEIFCOPY=False, + ) def __call__(self, c: int = None, z: int = None, t: int = None, x: int = None, y: int = None) -> np.ndarray: - """ same as im[] but allowing keyword axes, but slices need to made with slice() or np.s_ """ + """same as im[] but allowing keyword axes, but slices need to made with slice() or np.s_""" return self[{k: slice(v) if v is None else v for k, v in dict(c=c, z=z, t=t, x=x, y=y).items()}] def __copy__(self) -> Imread: @@ -315,10 +343,12 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): if k not in a: yield k - for idx in unique_yield([key[:3] for key in self.cache.keys()], - product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t']))): + for idx in unique_yield( + [key[:3] for key in self.cache.keys()], + product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])), + ): yxczt = (slice(None), slice(None)) + idx - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) if item in np.asarray(self[in_idx]): return True return False @@ -329,15 +359,21 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): def __exit__(self, *args: Any, **kwargs: Any) -> None: if not self.isclosed: self.isclosed = True - if hasattr(self, 'close'): + if hasattr(self, "close"): self.close() - def __getitem__(self, n: int | Sequence[int] | Sequence[slice] | slice | type(Ellipsis) | - dict[str, int | Sequence[int] | Sequence[slice] | slice | type(Ellipsis)] - ) -> Number | Imread | np.ndarray: - """ slice like a numpy array but return an Imread instance """ + def __getitem__( + self, + n: int + | Sequence[int] + | Sequence[slice] + | slice + | type(Ellipsis) + | dict[str, int | Sequence[int] | Sequence[slice] | slice | type(Ellipsis)], + ) -> Number | Imread | np.ndarray: + """slice like a numpy array but return an Imread instance""" if self.isclosed: - raise OSError('file is closed') + raise OSError("file is closed") if isinstance(n, (slice, Number)): # None = : n = (n,) elif isinstance(n, type(Ellipsis)): @@ -349,7 +385,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): # deal with ... ell = [i for i, e in enumerate(n) if isinstance(e, type(Ellipsis))] if len(ell) > 1: - raise IndexError('an index can only have a single ellipsis (...)') + raise IndexError("an index can only have a single ellipsis (...)") if len(ell): if len(n) > self.ndim: n.remove(Ellipsis) @@ -360,7 +396,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): while len(n) < self.ndim: n.append(None) - axes_idx = [self.shape.axes.find(i) for i in 'yxczt'] + axes_idx = [self.shape.axes.find(i) for i in "yxczt"] n = [n[j] if 0 <= j < len(n) else None for j in axes_idx] # reorder n new_slice = [] @@ -377,13 +413,15 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): new = View(self) new.slice = new_slice new._shape = Shape([1 if isinstance(s, Number) else len(s) for s in new_slice]) - new.axes = ''.join(j for j in self.axes if j in [i for i, s in zip('yxczt', new_slice) - if not isinstance(s, Number)]) + new.axes = "".join( + j for j in self.axes if j in [i for i, s in zip("yxczt", new_slice) if not isinstance(s, Number)] + ) return new - def __getstate__(self) -> dict[str: Any]: - return ({key: value for key, value in self.__dict__.items() if key not in self.do_not_pickle} | - {'cache_size': self.cache.maxlen}) + def __getstate__(self) -> dict[str:Any]: + return {key: value for key, value in self.__dict__.items() if key not in self.do_not_pickle} | { + "cache_size": self.cache.maxlen + } def __len__(self) -> int: return self.shape[0] @@ -392,11 +430,11 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): return self.summary def __setstate__(self, state: dict[str, Any]) -> None: - """ What happens during unpickling """ - self.__dict__.update({key: value for key, value in state.items() if key != 'cache_size'}) + """What happens during unpickling""" + self.__dict__.update({key: value for key, value in state.items() if key != "cache_size"}) if isinstance(self, AbstractReader): self.open() - self.cache = DequeDict(state.get('cache_size', 16)) + self.cache = DequeDict(state.get("cache_size", 16)) def __str__(self) -> str: return str(self.path) @@ -412,25 +450,32 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): if copy is False: raise ValueError("`copy=False` isn't supported. A copy is always created.") block = self.block(*self.slice) - axes_idx = [self.shape.axes.find(i) for i in 'yxczt'] - axes_squeeze = tuple({i for i, j in enumerate(axes_idx) if j == -1}.union( - {i for i, j in enumerate(self.slice) if isinstance(j, Number)})) + axes_idx = [self.shape.axes.find(i) for i in "yxczt"] + axes_squeeze = tuple( + {i for i, j in enumerate(axes_idx) if j == -1}.union( + {i for i, j in enumerate(self.slice) if isinstance(j, Number)} + ) + ) block = block.squeeze(axes_squeeze) if dtype is not None: block = block.astype(dtype) if block.ndim == 0: return block.item() - axes = ''.join(j for i, j in enumerate('yxczt') if i not in axes_squeeze) + axes = "".join(j for i, j in enumerate("yxczt") if i not in axes_squeeze) return block.transpose([axes.find(i) for i in self.shape.axes if i in axes]) - def __array_arg_fun__(self, fun: Callable[[ArrayLike, Optional[int]], Number | np.ndarray], - axis: int | str = None, out: np.ndarray = None) -> Number | np.ndarray: - """ frame-wise application of np.argmin and np.argmax """ + def __array_arg_fun__( + self, + fun: Callable[[ArrayLike, Optional[int]], Number | np.ndarray], + axis: int | str = None, + out: np.ndarray = None, + ) -> Number | np.ndarray: + """frame-wise application of np.argmin and np.argmax""" if axis is None: value = arg = None - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + for idx in product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])): yxczt = (slice(None), slice(None)) + idx - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) new = np.asarray(self[in_idx]) new_arg = np.unravel_index(fun(new), new.shape) # type: ignore new_value = new[new_arg] @@ -441,7 +486,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): i = fun((value, new_value)) # type: ignore arg = (arg, new_arg + idx)[i] value = (value, new_value)[i] - axes = ''.join(i for i in self.axes if i in 'yx') + 'czt' + axes = "".join(i for i in self.axes if i in "yx") + "czt" arg = np.ravel_multi_index([arg[axes.find(i)] for i in self.axes], self.shape) if out is None: return arg @@ -454,29 +499,29 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): else: axis_str, axis_idx = self.axes[axis], axis if axis_str not in self.axes: - raise IndexError(f'Axis {axis_str} not in {self.axes}.') + raise IndexError(f"Axis {axis_str} not in {self.axes}.") out_shape = list(self.shape) out_axes = list(self.axes) out_shape.pop(axis_idx) out_axes.pop(axis_idx) if out is None: out = np.zeros(out_shape, int) - if axis_str in 'yx': - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + if axis_str in "yx": + for idx in product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])): yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + out_idx = tuple(yxczt["yxczt".find(i)] for i in out_axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) new = self[in_idx] out[out_idx] = fun(np.asarray(new), new.axes.find(axis_str)) else: value = np.zeros(out.shape, self.dtype) - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + for idx in product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])): yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + out_idx = tuple(yxczt["yxczt".find(i)] for i in out_axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) new_value = self[in_idx] - new_arg = np.full_like(new_value, idx['czt'.find(axis_str)]) - if idx['czt'.find(axis_str)] == 0: + new_arg = np.full_like(new_value, idx["czt".find(axis_str)]) + if idx["czt".find(axis_str)] == 0: value[out_idx] = new_value out[out_idx] = new_arg else: @@ -486,13 +531,20 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): out[out_idx] = np.where(i, new_arg, out[out_idx]) return out - def __array_fun__(self, funs: Sequence[Callable[[ArrayLike], Number | np.ndarray]], axis: int | str = None, - dtype: DTypeLike = None, out: np.ndarray = None, keepdims: bool = False, - initials: list[Number | np.ndarray] = None, where: bool | int | np.ndarray = True, - ffuns: Sequence[Callable[[ArrayLike], np.ndarray]] = None, - cfun: Callable[..., np.ndarray] = None) -> Number | np.ndarray: - """ frame-wise application of np.min, np.max, np.sum, np.mean and their nan equivalents """ - p = re.compile(r'\d') + def __array_fun__( + self, + funs: Sequence[Callable[[ArrayLike], Number | np.ndarray]], + axis: int | str = None, + dtype: DTypeLike = None, + out: np.ndarray = None, + keepdims: bool = False, + initials: list[Number | np.ndarray] = None, + where: bool | int | np.ndarray = True, + ffuns: Sequence[Callable[[ArrayLike], np.ndarray]] = None, + cfun: Callable[..., np.ndarray] = None, + ) -> Number | np.ndarray: + """frame-wise application of np.min, np.max, np.sum, np.mean and their nan equivalents""" + p = re.compile(r"\d") dtype = self.dtype if dtype is None else np.dtype(dtype) if initials is None: initials = [None for _ in funs] @@ -501,21 +553,25 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): def ffun_(frame: ArrayLike) -> np.ndarray: return np.asarray(frame) + ffuns = [ffun_ if ffun is None else ffun for ffun in ffuns] if cfun is None: + def cfun(*res): # noqa return res[0] # TODO: smarter transforms if axis is None: - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + for idx in product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])): yxczt = (slice(None), slice(None)) + idx - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) w = where if where is None or isinstance(where, bool) else where[in_idx] - initials = [fun(np.asarray(ffun(self[in_idx])), initial=initial, where=w) # type: ignore - for fun, ffun, initial in zip(funs, ffuns, initials)] + initials = [ + fun(np.asarray(ffun(self[in_idx])), initial=initial, where=w) # type: ignore + for fun, ffun, initial in zip(funs, ffuns, initials) + ] res = cfun(*initials) - res = (np.round(res) if dtype.kind in 'ui' else res).astype(p.sub('', dtype.name)) + res = (np.round(res) if dtype.kind in "ui" else res).astype(p.sub("", dtype.name)) if keepdims: res = np.array(res, dtype, ndmin=self.ndim) if out is None: @@ -530,7 +586,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): axis_idx = axis % self.ndim axis_str = self.axes[axis_idx] if axis_str not in self.axes: - raise IndexError(f'Axis {axis_str} not in {self.axes}.') + raise IndexError(f"Axis {axis_str} not in {self.axes}.") out_shape = list(self.shape) out_axes = list(self.axes) if not keepdims: @@ -538,35 +594,43 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): out_axes.pop(axis_idx) if out is None: out = np.zeros(out_shape, dtype) - if axis_str in 'yx': - yx = 'yx' if self.axes.find('x') > self.axes.find('y') else 'yx' + if axis_str in "yx": + yx = "yx" if self.axes.find("x") > self.axes.find("y") else "yx" frame_ax = yx.find(axis_str) - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + for idx in product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])): yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + out_idx = tuple(yxczt["yxczt".find(i)] for i in out_axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) w = where if where is None or isinstance(where, bool) else where[in_idx] - res = cfun(*[fun(ffun(self[in_idx]), frame_ax, initial=initial, where=w) # type: ignore - for fun, ffun, initial in zip(funs, ffuns, initials)]) - out[out_idx] = (np.round(res) if out.dtype.kind in 'ui' else res).astype(p.sub('', dtype.name)) + res = cfun( + *[ + fun(ffun(self[in_idx]), frame_ax, initial=initial, where=w) # type: ignore + for fun, ffun, initial in zip(funs, ffuns, initials) + ] + ) + out[out_idx] = (np.round(res) if out.dtype.kind in "ui" else res).astype(p.sub("", dtype.name)) else: tmps = [np.zeros(out_shape) for _ in ffuns] - for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + for idx in product(range(self.shape["c"]), range(self.shape["z"]), range(self.shape["t"])): yxczt = (slice(None), slice(None)) + idx - out_idx = tuple(yxczt['yxczt'.find(i)] for i in out_axes) - in_idx = tuple(yxczt['yxczt'.find(i)] for i in self.axes) + out_idx = tuple(yxczt["yxczt".find(i)] for i in out_axes) + in_idx = tuple(yxczt["yxczt".find(i)] for i in self.axes) - if idx['czt'.find(axis_str)] == 0: + if idx["czt".find(axis_str)] == 0: w = where if where is None or isinstance(where, bool) else (where[in_idx],) for tmp, fun, ffun, initial in zip(tmps, funs, ffuns, initials): tmp[out_idx] = fun((ffun(self[in_idx]),), 0, initial=initial, where=w) # type: ignore else: - w = where if where is None or isinstance(where, bool) else \ - (np.ones_like(where[in_idx]), where[in_idx]) + w = ( + where + if where is None or isinstance(where, bool) + else (np.ones_like(where[in_idx]), where[in_idx]) + ) for tmp, fun, ffun in zip(tmps, funs, ffuns): tmp[out_idx] = fun((tmp[out_idx], ffun(self[in_idx])), 0, where=w) # type: ignore - out[...] = (np.round(cfun(*tmps)) if out.dtype.kind in 'ui' else - cfun(*tmps)).astype(p.sub('', dtype.name)) + out[...] = (np.round(cfun(*tmps)) if out.dtype.kind in "ui" else cfun(*tmps)).astype( + p.sub("", dtype.name) + ) return out @property @@ -591,10 +655,10 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): @cached_property def extrametadata(self) -> Optional[Any]: if isinstance(self.path, Path): - if self.path.with_suffix('.pzl2').exists(): - pname = self.path.with_suffix('.pzl2') - elif self.path.with_suffix('.pzl').exists(): - pname = self.path.with_suffix('.pzl') + if self.path.with_suffix(".pzl2").exists(): + pname = self.path.with_suffix(".pzl2") + elif self.path.with_suffix(".pzl").exists(): + pname = self.path.with_suffix(".pzl") else: return None try: @@ -620,51 +684,65 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): if isinstance(value, Shape): self._shape = value else: - self._shape = Shape([value['yxczt'.find(i.lower())] for i in self.axes], self.axes) + self._shape = Shape([value["yxczt".find(i.lower())] for i in self.axes], self.axes) @property def summary(self) -> str: - """ gives a helpful summary of the recorded experiment """ - s = [f'path/filename: {self.path}', - f'series/pos: {self.series}', - f"reader: {self.base.__class__.__module__.split('.')[-1]}"] - s.extend((f'dtype: {self.dtype}', - f'shape ({self.axes}):'.ljust(15) + f"{' x '.join(str(i) for i in self.shape)}")) + """gives a helpful summary of the recorded experiment""" + s = [ + f"path/filename: {self.path}", + f"series/pos: {self.series}", + f"reader: {self.base.__class__.__module__.split('.')[-1]}", + ] + s.extend( + ( + f"dtype: {self.dtype}", + f"shape ({self.axes}):".ljust(15) + f"{' x '.join(str(i) for i in self.shape)}", + ) + ) if self.pxsize_um: - s.append(f'pixel size: {1000 * self.pxsize_um:.2f} nm') + s.append(f"pixel size: {1000 * self.pxsize_um:.2f} nm") if self.zstack and self.deltaz_um: - s.append(f'z-interval: {1000 * self.deltaz_um:.2f} nm') + s.append(f"z-interval: {1000 * self.deltaz_um:.2f} nm") if self.exposuretime_s and not all(e is None for e in self.exposuretime_s): - s.append(f'exposuretime: {self.exposuretime_s[0]:.2f} s') + s.append(f"exposuretime: {self.exposuretime_s[0]:.2f} s") if self.timeseries and self.timeinterval: - s.append(f'time interval: {self.timeinterval:.3f} s') + s.append(f"time interval: {self.timeinterval:.3f} s") if self.binning: - s.append('binning: {}x{}'.format(*self.binning)) + s.append("binning: {}x{}".format(*self.binning)) if self.laserwavelengths: - s.append('laser colors: ' + ' | '.join([' & '.join(len(w) * ('{:.0f}',)).format(*w) - for w in self.laserwavelengths]) + ' nm') + s.append( + "laser colors: " + + " | ".join([" & ".join(len(w) * ("{:.0f}",)).format(*w) for w in self.laserwavelengths]) + + " nm" + ) if self.laserpowers: - s.append('laser powers: ' + ' | '.join([' & '.join(len(p) * ('{:.3g}',)).format(*[100 * i for i in p]) - for p in self.laserpowers]) + ' %') + s.append( + "laser powers: " + + " | ".join( + [" & ".join(len(p) * ("{:.3g}",)).format(*[100 * i for i in p]) for p in self.laserpowers] + ) + + " %" + ) if self.objective and self.objective.model: - s.append(f'objective: {self.objective.model}') + s.append(f"objective: {self.objective.model}") if self.magnification: - s.append(f'magnification: {self.magnification}x') + s.append(f"magnification: {self.magnification}x") if self.tubelens and self.tubelens.model: - s.append(f'tubelens: {self.tubelens.model}') + s.append(f"tubelens: {self.tubelens.model}") if self.filter: - s.append(f'filterset: {self.filter}') + s.append(f"filterset: {self.filter}") if self.powermode: - s.append(f'powermode: {self.powermode}') + s.append(f"powermode: {self.powermode}") if self.collimator: - s.append('collimator: ' + (' {}' * len(self.collimator)).format(*self.collimator)) + s.append("collimator: " + (" {}" * len(self.collimator)).format(*self.collimator)) if self.tirfangle: - s.append('TIRF angle: ' + (' {:.2f}°' * len(self.tirfangle)).format(*self.tirfangle)) + s.append("TIRF angle: " + (" {:.2f}°" * len(self.tirfangle)).format(*self.tirfangle)) if self.gain: - s.append('gain: ' + (' {:.0f}' * len(self.gain)).format(*self.gain)) + s.append("gain: " + (" {:.0f}" * len(self.gain)).format(*self.gain)) if self.pcf: - s.append('pcf: ' + (' {:.2f}' * len(self.pcf)).format(*self.pcf)) - return '\n'.join(s) + s.append("pcf: " + (" {:.2f}" * len(self.pcf)).format(*self.pcf)) + return "\n".join(s) @property def T(self) -> Imread: # noqa @@ -672,11 +750,11 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): @cached_property def timeseries(self) -> bool: - return self.shape['t'] > 1 + return self.shape["t"] > 1 @cached_property def zstack(self) -> bool: - return self.shape['z'] > 1 + return self.shape["z"] > 1 @wraps(np.ndarray.argmax) def argmax(self, *args, **kwargs): @@ -709,7 +787,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): @wraps(np.moveaxis) def moveaxis(self, source, destination): - raise NotImplementedError('moveaxis is not implemented') + raise NotImplementedError("moveaxis is not implemented") @wraps(np.nanmax) def nanmax(self, axis=None, out=None, keepdims=False, initial=None, where=True, **_): @@ -753,13 +831,17 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): return np.invert(np.isnan(frame)) if std: + def cfun(s, s2, n): - return np.sqrt((s2 - s ** 2 / n) / (n - ddof)) + return np.sqrt((s2 - s**2 / n) / (n - ddof)) else: + def cfun(s, s2, n): - return (s2 - s ** 2 / n) / (n - ddof) - return self.__array_fun__([np.nansum, np.nansum, np.sum], axis, dtype, out, keepdims, None, where, - (sfun, s2fun, nfun), cfun) + return (s2 - s**2 / n) / (n - ddof) + + return self.__array_fun__( + [np.nansum, np.nansum, np.sum], axis, dtype, out, keepdims, None, where, (sfun, s2fun, nfun), cfun + ) @wraps(np.ndarray.flatten) def flatten(self, *args, **kwargs): @@ -779,8 +861,8 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): else: axes = tuple(new.axes.find(ax) if isinstance(ax, str) else ax for ax in axes) if any([new.shape[ax] != 1 for ax in axes]): - raise ValueError('cannot select an axis to squeeze out which has size not equal to one') - new.axes = ''.join(j for i, j in enumerate(new.axes) if i not in axes) + raise ValueError("cannot select an axis to squeeze out which has size not equal to one") + new.axes = "".join(j for i, j in enumerate(new.axes) if i not in axes) return new @wraps(np.ndarray.std) @@ -809,7 +891,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): if not axes: new.axes = new.axes[::-1] else: - new.axes = ''.join(ax if isinstance(ax, str) else new.axes[ax] for ax in axes) + new.axes = "".join(ax if isinstance(ax, str) else new.axes[ax] for ax in axes) return new @wraps(np.ndarray.var) @@ -824,11 +906,14 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): return np.asarray(frame).astype(float) ** 2 if std: + def cfun(s, s2): - return np.sqrt((s2 - s ** 2 / n) / (n - ddof)) + return np.sqrt((s2 - s**2 / n) / (n - ddof)) else: + def cfun(s, s2): - return (s2 - s ** 2 / n) / (n - ddof) + return (s2 - s**2 / n) / (n - ddof) + return self.__array_fun__([np.sum, np.sum], axis, dtype, out, keepdims, None, where, (sfun, s2fun), cfun) def asarray(self) -> np.ndarray: @@ -840,12 +925,18 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): new.dtype = dtype return new - def block(self, y: int | Sequence[int] = None, x: int | Sequence[int] = None, - c: int | Sequence[int] = None, z: int | Sequence[int] = None, - t: int | Sequence[int] = None) -> np.ndarray: - """ returns 5D block of frames """ - y, x, c, z, t = (np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) - for i, e in zip('yxczt', (y, x, c, z, t))) + def block( + self, + y: int | Sequence[int] = None, + x: int | Sequence[int] = None, + c: int | Sequence[int] = None, + z: int | Sequence[int] = None, + t: int | Sequence[int] = None, + ) -> np.ndarray: + """returns 5D block of frames""" + y, x, c, z, t = ( + np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) for i, e in zip("yxczt", (y, x, c, z, t)) + ) d = np.empty((len(y), len(x), len(c), len(z), len(t)), self.dtype) for (ci, cj), (zi, zj), (ti, tj) in product(enumerate(c), enumerate(z), enumerate(t)): d[:, :, ci, zi, ti] = self.frame(cj, zj, tj)[y][:, x] # type: ignore @@ -855,16 +946,16 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): return View(self) def data(self, c: int | Sequence[int] = 0, z: int | Sequence[int] = 0, t: int | Sequence[int] = 0) -> np.ndarray: - """ returns 3D stack of frames """ - c, z, t = (np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) for i, e in zip('czt', (c, z, t))) + """returns 3D stack of frames""" + c, z, t = (np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) for i, e in zip("czt", (c, z, t))) return np.dstack([self.frame(ci, zi, ti) for ci, zi, ti in product(c, z, t)]) def frame(self, c: int = 0, z: int = 0, t: int = 0) -> np.ndarray: - """ returns single 2D frame """ + """returns single 2D frame""" c = self.get_channel(c) - c %= self.base.shape['c'] - z %= self.base.shape['z'] - t %= self.base.shape['t'] + c %= self.base.shape["c"] + z %= self.base.shape["z"] + t %= self.base.shape["t"] # cache last n (default 16) frames in memory for speed (~250x faster) key = (c, z, t, self.transform, self.frame_decorator) @@ -887,31 +978,36 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): return channel_name else: c = [i for i, c in enumerate(self.channel_names) if c.lower().startswith(channel_name.lower())] - assert len(c) > 0, f'Channel {c} not found in {self.channel_names}' - assert len(c) < 2, f'Channel {c} not unique in {self.channel_names}' + assert len(c) > 0, f"Channel {c} not found in {self.channel_names}" + assert len(c) < 2, f"Channel {c} not unique in {self.channel_names}" return c[0] @staticmethod def get_config(file: Path | str) -> Any: - """ Open a yml config file """ + """Open a yml config file""" loader = yaml.SafeLoader loader.add_implicit_resolver( - r'tag:yaml.org,2002:float', - re.compile(r'''^(?: + r"tag:yaml.org,2002:float", + re.compile( + r"""^(?: [-+]?([0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?([0-9][0-9_]*)([eE][-+]?[0-9]+) |\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\\.(?:inf|Inf|INF) - |\.(?:nan|NaN|NAN))$''', re.X), - list(r'-+0123456789.')) + |\.(?:nan|NaN|NAN))$""", + re.X, + ), + list(r"-+0123456789."), + ) with open(file) as f: return yaml.load(f, loader) - def get_czt(self, c: int | Sequence[int], z: int | Sequence[int], - t: int | Sequence[int]) -> tuple[list[int], list[int], list[int]]: + def get_czt( + self, c: int | Sequence[int], z: int | Sequence[int], t: int | Sequence[int] + ) -> tuple[list[int], list[int], list[int]]: czt = [] - for i, n in zip('czt', (c, z, t)): + for i, n in zip("czt", (c, z, t)): if n is None: czt.append(list(range(self.shape[i]))) elif isinstance(n, range): @@ -930,8 +1026,8 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): @staticmethod def bioformats_ome(path: [str, Path]) -> OME: - """ Use java BioFormats to make an ome metadata structure. """ - with multiprocessing.get_context('spawn').Pool(1) as pool: + """Use java BioFormats to make an ome metadata structure.""" + with multiprocessing.get_context("spawn").Pool(1) as pool: return pool.map(bioformats_ome, (path,))[0] @staticmethod @@ -939,26 +1035,35 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): # fix ome if necessary for image in ome.images: try: - if image.pixels.physical_size_z is None and len(set([plane.the_z - for plane in image.pixels.planes])) > 1: - z = np.array([(plane.position_z * ureg.Quantity(plane.position_z_unit.value).to(ureg.m).magnitude, - plane.the_z) - for plane in image.pixels.planes if plane.the_c == 0 and plane.the_t == 0]) + if ( + image.pixels.physical_size_z is None + and len(set([plane.the_z for plane in image.pixels.planes])) > 1 + ): + z = np.array( + [ + ( + plane.position_z * ureg.Quantity(plane.position_z_unit.value).to(ureg.m).magnitude, + plane.the_z, + ) + for plane in image.pixels.planes + if plane.the_c == 0 and plane.the_t == 0 + ] + ) i = np.argsort(z[:, 1]) image.pixels.physical_size_z = np.nanmean(np.true_divide(*np.diff(z[i], axis=0).T)) * 1e6 - image.pixels.physical_size_z_unit = 'µm' # type: ignore - except Exception: # noqa + image.pixels.physical_size_z_unit = "µm" # type: ignore + except Exception: # noqa pass return ome @staticmethod def read_ome(path: [str, Path]) -> Optional[OME]: path = Path(path) - if path.with_suffix('.ome.xml').exists(): - return OME.from_xml(path.with_suffix('.ome.xml')) + if path.with_suffix(".ome.xml").exists(): + return OME.from_xml(path.with_suffix(".ome.xml")) def get_ome(self) -> OME: - """ overload this """ + """overload this""" return self.bioformats_ome(self.path) @cached_property @@ -972,11 +1077,11 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): return cache[self.path] def is_noise(self, volume: ArrayLike = None) -> bool: - """ True if volume only has noise """ + """True if volume only has noise""" if volume is None: volume = self fft = np.fft.fftn(volume) - corr = np.fft.fftshift(np.fft.ifftn(fft * fft.conj()).real / np.sum(volume ** 2)) + corr = np.fft.fftshift(np.fft.ifftn(fft * fft.conj()).real / np.sum(volume**2)) return 1 - corr[tuple([0] * corr.ndim)] < 0.0067 @staticmethod @@ -984,24 +1089,33 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): JVM().kill_vm() def new(self, *args: Any, **kwargs: Any) -> View: - warnings.warn('Imread.new has been deprecated, use Imread.view instead.', DeprecationWarning, 2) + warnings.warn("Imread.new has been deprecated, use Imread.view instead.", DeprecationWarning, 2) return self.view(*args, **kwargs) - def save_as_movie(self, fname: Path | str = None, - c: int | Sequence[int] = None, z: int | Sequence[int] = None, # noqa - t: str | int | Sequence[int] = None, # noqa - colors: tuple[str] = None, brightnesses: tuple[float] = None, - scale: int = None, bar: bool = True) -> None: - """ saves the image as a mp4 or mkv file """ + def save_as_movie( + self, + fname: Path | str = None, + c: int | Sequence[int] = None, + z: int | Sequence[int] = None, # noqa + t: str | int | Sequence[int] = None, # noqa + colors: tuple[str] = None, + brightnesses: tuple[float] = None, + scale: int = None, + bar: bool = True, + speed: float = None, + ) -> None: + """saves the image as a mp4 or mkv file""" from matplotlib.colors import to_rgb from skvideo.io import FFmpegWriter if t is None: - t = np.arange(self.shape['t']) + t = np.arange(self.shape["t"]) elif isinstance(t, str): t = eval(f"np.arange(self.shape['t'])[{t}]") elif np.isscalar(t): t = (t,) + if speed is None: + speed = 25 / 7 def get_ab(tyx: Imread, p: tuple[float, float] = (1, 99)) -> tuple[float, float]: s = tyx.flatten() @@ -1017,72 +1131,127 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): color = to_rgb(color) frame = (frame - a) / (b - a) frame = np.dstack([255 * frame * i for i in color]) - return np.clip(np.round(frame), 0, 255).astype('uint8') + return np.clip(np.round(frame), 0, 255).astype("uint8") - ab = list(zip(*[get_ab(i) for i in self.transpose('cztyx')])) # type: ignore - colors = colors or ('r', 'g', 'b')[:self.shape['c']] + max(0, self.shape['c'] - 3) * ('w',) - brightnesses = brightnesses or (1,) * self.shape['c'] + ab = list(zip(*[get_ab(i) for i in self.transpose("cztyx")])) # type: ignore + colors = colors or ("r", "g", "b")[: self.shape["c"]] + max(0, self.shape["c"] - 3) * ("w",) + brightnesses = brightnesses or (1,) * self.shape["c"] scale = scale or 1 - shape_x = 2 * ((self.shape['x'] * scale + 1) // 2) - shape_y = 2 * ((self.shape['y'] * scale + 1) // 2) + shape_x = 2 * ((self.shape["x"] * scale + 1) // 2) + shape_y = 2 * ((self.shape["y"] * scale + 1) // 2) with FFmpegWriter( - str(fname).format(name=self.path.stem, path=str(self.path.parent)), - outputdict={'-vcodec': 'libx264', '-preset': 'veryslow', '-pix_fmt': 'yuv420p', '-r': '7', - '-vf': f'setpts={25 / 7}*PTS,scale={shape_x}:{shape_y}:flags=neighbor'} + str(fname).format(name=self.path.stem, path=str(self.path.parent)), + outputdict={ + "-vcodec": "libx264", + "-preset": "veryslow", + "-pix_fmt": "yuv420p", + "-r": "7", + "-vf": f"setpts={speed}*PTS,scale={shape_x}:{shape_y}:flags=neighbor", + }, ) as movie: - im = self.transpose('tzcyx') # type: ignore - for ti in tqdm(t, desc='Saving movie', disable=not bar): - movie.writeFrame(np.max([cframe(yx, c, a, b / s, scale) - for yx, a, b, c, s in zip(im[ti].max('z'), *ab, colors, brightnesses)], 0)) + im = self.transpose("tzcyx") # type: ignore + for ti in tqdm(t, desc="Saving movie", disable=not bar): + movie.writeFrame( + np.max( + [ + cframe(yx, c, a, b / s, scale) + for yx, a, b, c, s in zip(im[ti].max("z"), *ab, colors, brightnesses) + ], + 0, + ) + ) - def save_as_tiff(self, fname: Path | str = None, c: int | Sequence[int] = None, z: int | Sequence[int] = None, - t: int | Sequence[int] = None, split: bool = False, bar: bool = True, pixel_type: str = 'uint16', - **kwargs: Any) -> None: - """ saves the image as a tif file - split: split channels into different files """ + def save_as_tiff( + self, + fname: Path | str = None, + c: int | Sequence[int] = None, + z: int | Sequence[int] = None, + t: int | Sequence[int] = None, + split: bool = False, + bar: bool = True, + pixel_type: str = "uint16", + compression: str = None, + **kwargs: Any, + ) -> None: + """saves the image as a tif file + split: split channels into different files""" fname = Path(str(fname).format(name=self.path.stem, path=str(self.path.parent))) if fname is None: - fname = self.path.with_suffix('.tif') + fname = self.path.with_suffix(".tif") if fname == self.path: - raise FileExistsError(f'File {fname} exists already.') + raise FileExistsError(f"File {fname} exists already.") if not isinstance(fname, Path): fname = Path(fname) if split: - for i in range(self.shape['c']): + for i in range(self.shape["c"]): if self.timeseries: - self.save_as_tiff(fname.with_name(f'{fname.stem}_C{i:01d}').with_suffix('.tif'), i, 0, None, False, - bar, pixel_type) + self.save_as_tiff( + fname.with_name(f"{fname.stem}_C{i:01d}").with_suffix(".tif"), + i, + 0, + None, + False, + bar, + pixel_type, + compression, + **kwargs, + ) else: - self.save_as_tiff(fname.with_name(f'{fname.stem}_C{i:01d}').with_suffix('.tif'), i, None, 0, False, - bar, pixel_type) + self.save_as_tiff( + fname.with_name(f"{fname.stem}_C{i:01d}").with_suffix(".tif"), + i, + None, + 0, + False, + bar, + pixel_type, + compression, + **kwargs, + ) else: n = [c, z, t] - for i, ax in enumerate('czt'): + for i, ax in enumerate("czt"): if n[i] is None: n[i] = range(self.shape[ax]) elif not isinstance(n[i], (tuple, list)): n[i] = (n[i],) shape = [len(i) for i in n] - with TransformTiff(self, fname.with_suffix('.tif'), dtype=pixel_type, - pxsize=self.pxsize_um, deltaz=self.deltaz_um, **kwargs) as tif: - for i, m in tqdm(zip(product(*[range(s) for s in shape]), product(*n)), # noqa - total=np.prod(shape), desc='Saving tiff', disable=not bar): + with TransformTiff( + self, + fname.with_suffix(".tif"), + dtype=pixel_type, + pxsize=self.pxsize_um, + deltaz=self.deltaz_um, + compression=compression, + **kwargs, + ) as tif: + for i, m in tqdm( + zip(product(*[range(s) for s in shape]), product(*n)), # noqa + total=np.prod(shape), + desc="Saving tiff", + disable=not bar, + ): tif.save(m, *i) - def with_transform(self, channels: bool = True, drift: bool = False, file: Path | str = None, - bead_files: Sequence[Path | str] = ()) -> View: - """ returns a view where channels and/or frames are registered with an affine transformation - channels: True/False register channels using bead_files - drift: True/False register frames to correct drift - file: load registration from file with name file, default: transform.yml in self.path.parent - bead_files: files used to register channels, default: files in self.path.parent, - with names starting with 'beads' - """ + def with_transform( + self, + channels: bool = True, + drift: bool = False, + file: Path | str = None, + bead_files: Sequence[Path | str] = (), + ) -> View: + """returns a view where channels and/or frames are registered with an affine transformation + channels: True/False register channels using bead_files + drift: True/False register frames to correct drift + file: load registration from file with name file, default: transform.yml in self.path.parent + bead_files: files used to register channels, default: files in self.path.parent, + with names starting with 'beads' + """ view = self.view() if file is None: - file = Path(view.path.parent) / 'transform.yml' + file = Path(view.path.parent) / "transform.yml" else: file = Path(file) if not bead_files: @@ -1090,7 +1259,7 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): bead_files = Transforms.get_bead_files(view.path.parent) except Exception: # noqa if not file.exists(): - raise Exception('No transform file and no bead file found.') + raise Exception("No transform file and no bead file found.") bead_files = () if channels: @@ -1100,8 +1269,8 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): view.transform = Transforms().with_beads(view.cyllens, bead_files) if drift: view.transform = view.transform.with_drift(view) - view.transform.save(file.with_suffix('.yml')) - view.transform.save_channel_transform_tiff(bead_files, file.with_suffix('.tif')) + view.transform.save(file.with_suffix(".yml")) + view.transform.save_channel_transform_tiff(bead_files, file.with_suffix(".tif")) elif drift: try: view.transform = Transforms.from_file(file, C=False) @@ -1119,8 +1288,8 @@ class Imread(np.lib.mixins.NDArrayOperatorsMixin, ABC): def split_path_series(path: Path | str) -> tuple[Path, int]: if isinstance(path, str): path = Path(path) - if isinstance(path, Path) and path.name.startswith('Pos') and path.name.lstrip('Pos').isdigit(): - return path.parent, int(path.name.lstrip('Pos')) + if isinstance(path, Path) and path.name.startswith("Pos") and path.name.lstrip("Pos").isdigit(): + return path.parent, int(path.name.lstrip("Pos")) return path, 0 def view(self, *args: Any, **kwargs: Any) -> View: @@ -1134,19 +1303,19 @@ class View(Imread, ABC): def __getattr__(self, item: str) -> Any: if not hasattr(self.base, item): - raise AttributeError(f'{self.__class__} object has no attribute {item}') + raise AttributeError(f"{self.__class__} object has no attribute {item}") return self.base.__getattribute__(item) class AbstractReader(Imread, metaclass=ABCMeta): priority = 99 - do_not_pickle = 'cache' + do_not_pickle = "cache" ureg = ureg @staticmethod @abstractmethod def _can_open(path: Path | str) -> bool: - """ Override this method, and return true when the subclass can open the file """ + """Override this method, and return true when the subclass can open the file""" return False @staticmethod @@ -1155,16 +1324,16 @@ class AbstractReader(Imread, metaclass=ABCMeta): @abstractmethod def __frame__(self, c: int, z: int, t: int) -> np.ndarray: - """ Override this, return the frame at c, z, t """ - return np.random.randint(0, 255, self.shape['yx']) + """Override this, return the frame at c, z, t""" + return np.random.randint(0, 255, self.shape["yx"]) def open(self) -> None: - """ Optionally override this, open file handles etc. - filehandles cannot be pickled and should be marked such by setting do_not_pickle = 'file_handle_name' """ + """Optionally override this, open file handles etc. + filehandles cannot be pickled and should be marked such by setting do_not_pickle = 'file_handle_name'""" return def close(self) -> None: - """ Optionally override this, close file handles etc. """ + """Optionally override this, close file handles etc.""" return def __init__(self, path: Path | str | Imread | Any = None, dtype: DTypeLike = None, axes: str = None) -> None: @@ -1177,18 +1346,18 @@ class AbstractReader(Imread, metaclass=ABCMeta): self.path, self.series = self.split_path_series(path) if isinstance(path, Path) and path.exists(): self.title = self.path.name - self.acquisitiondate = datetime.fromtimestamp(self.path.stat().st_mtime).strftime('%y-%m-%dT%H:%M:%S') + self.acquisitiondate = datetime.fromtimestamp(self.path.stat().st_mtime).strftime("%y-%m-%dT%H:%M:%S") else: # ndarray self.title = self.__class__.__name__ - self.acquisitiondate = 'now' + self.acquisitiondate = "now" self.reader = None self.pcf = None self.powermode = None self.collimator = None self.tirfangle = None - self.cyllens = ['None', 'None'] - self.duolink = 'None' + self.cyllens = ["None", "None"] + self.duolink = "None" self.detector = [0, 1] self.track = [0] self.cache = DequeDict(16) @@ -1200,12 +1369,13 @@ class AbstractReader(Imread, metaclass=ABCMeta): image = self.ome.images[self.series if len(self.ome.images) > 1 else 0] pixels = image.pixels self.shape = pixels.size_y, pixels.size_x, pixels.size_c, pixels.size_z, pixels.size_t - self.base_shape = Shape((pixels.size_y, pixels.size_x, pixels.size_c, pixels.size_z, pixels.size_t), 'yxczt') + self.base_shape = Shape((pixels.size_y, pixels.size_x, pixels.size_c, pixels.size_z, pixels.size_t), "yxczt") self.dtype = pixels.type.value if dtype is None else dtype self.pxsize = pixels.physical_size_x_quantity try: - self.exposuretime = tuple(find(image.pixels.planes, the_c=c).exposure_time_quantity - for c in range(self.shape['c'])) + self.exposuretime = tuple( + find(image.pixels.planes, the_c=c).exposure_time_quantity for c in range(self.shape["c"]) + ) except AttributeError: self.exposuretime = () @@ -1220,21 +1390,21 @@ class AbstractReader(Imread, metaclass=ABCMeta): self.objective = None try: t0 = find(image.pixels.planes, the_c=0, the_t=0, the_z=0).delta_t - t1 = find(image.pixels.planes, the_c=0, the_t=self.shape['t'] - 1, the_z=0).delta_t - self.timeinterval = (t1 - t0) / (self.shape['t'] - 1) if self.shape['t'] > 1 and t1 > t0 else None + t1 = find(image.pixels.planes, the_c=0, the_t=self.shape["t"] - 1, the_z=0).delta_t + self.timeinterval = (t1 - t0) / (self.shape["t"] - 1) if self.shape["t"] > 1 and t1 > t0 else None except AttributeError: self.timeinterval = None try: - self.binning = [int(i) for i in image.pixels.channels[0].detector_settings.binning.value.split('x')] + self.binning = [int(i) for i in image.pixels.channels[0].detector_settings.binning.value.split("x")] if self.pxsize is not None: self.pxsize *= self.binning[0] except (AttributeError, IndexError, ValueError): self.binning = None self.channel_names = [channel.name for channel in image.pixels.channels] - self.channel_names += [chr(97 + i) for i in range(len(self.channel_names), self.shape['c'])] + self.channel_names += [chr(97 + i) for i in range(len(self.channel_names), self.shape["c"])] self.cnamelist = self.channel_names try: - optovars = [objective for objective in instrument.objectives if 'tubelens' in objective.id.lower()] + optovars = [objective for objective in instrument.objectives if "tubelens" in objective.id.lower()] except AttributeError: optovars = [] if len(optovars) == 0: @@ -1251,24 +1421,33 @@ class AbstractReader(Imread, metaclass=ABCMeta): self.magnification = None self.NA = None - self.gain = [find(instrument.detectors, id=channel.detector_settings.id).amplification_gain - for channel in image.pixels.channels - if channel.detector_settings - and find(instrument.detectors, id=channel.detector_settings.id).amplification_gain] - self.laserwavelengths = [(channel.excitation_wavelength_quantity.to(self.ureg.nm).m,) - for channel in pixels.channels if channel.excitation_wavelength_quantity] - self.laserpowers = try_default(lambda: [(1 - channel.light_source_settings.attenuation,) - for channel in pixels.channels], []) + self.gain = [ + find(instrument.detectors, id=channel.detector_settings.id).amplification_gain + for channel in image.pixels.channels + if channel.detector_settings + and find(instrument.detectors, id=channel.detector_settings.id).amplification_gain + ] + self.laserwavelengths = [ + (channel.excitation_wavelength_quantity.to(self.ureg.nm).m,) + for channel in pixels.channels + if channel.excitation_wavelength_quantity + ] + self.laserpowers = try_default( + lambda: [(1 - channel.light_source_settings.attenuation,) for channel in pixels.channels], [] + ) self.filter = try_default( # type: ignore - lambda: [find(instrument.filter_sets, id=channel.filter_set_ref.id).model - for channel in image.pixels.channels], None) + lambda: [ + find(instrument.filter_sets, id=channel.filter_set_ref.id).model for channel in image.pixels.channels + ], + None, + ) self.pxsize_um = None if self.pxsize is None else self.pxsize.to(self.ureg.um).m self.exposuretime_s = [None if i is None else i.to(self.ureg.s).m for i in self.exposuretime] if axes is None: - self.axes = ''.join(i for i in 'cztyx' if self.shape[i] > 1) - elif axes.lower() == 'full': - self.axes = 'cztyx' + self.axes = "".join(i for i in "cztyx" if self.shape[i] > 1) + elif axes.lower() == "full": + self.axes = "cztyx" else: self.axes = axes self.slice = [np.arange(s, dtype=int) for s in self.shape.yxczt] @@ -1276,23 +1455,24 @@ class AbstractReader(Imread, metaclass=ABCMeta): m = self.extrametadata if m is not None: try: - self.cyllens = m['CylLens'] - self.duolink = m['DLFilterSet'].split(' & ')[m['DLFilterChannel']] - if 'FeedbackChannels' in m: - self.feedback = m['FeedbackChannels'] + self.cyllens = m["CylLens"] + self.duolink = m["DLFilterSet"].split(" & ")[m["DLFilterChannel"]] + if "FeedbackChannels" in m: + self.feedback = m["FeedbackChannels"] else: - self.feedback = m['FeedbackChannel'] + self.feedback = m["FeedbackChannel"] except Exception: # noqa - self.cyllens = ['None', 'None'] - self.duolink = 'None' + self.cyllens = ["None", "None"] + self.duolink = "None" self.feedback = [] try: - self.cyllenschannels = np.where([self.cyllens[self.detector[c]].lower() != 'none' - for c in range(self.shape['c'])])[0].tolist() + self.cyllenschannels = np.where( + [self.cyllens[self.detector[c]].lower() != "none" for c in range(self.shape["c"])] + )[0].tolist() except Exception: # noqa pass try: - s = int(re.findall(r'_(\d{3})_', self.duolink)[0]) * ureg.nm + s = int(re.findall(r"_(\d{3})_", self.duolink)[0]) * ureg.nm except Exception: # noqa s = 561 * ureg.nm try: @@ -1305,7 +1485,7 @@ class AbstractReader(Imread, metaclass=ABCMeta): sigma /= 2 * self.NA * self.pxsize self.sigma = sigma.magnitude.tolist() # type: ignore except Exception: # noqa - self.sigma = [2] * self.shape['c'] + self.sigma = [2] * self.shape["c"] if not self.NA: self.immersionN = 1 elif 1.5 < self.NA: @@ -1317,35 +1497,51 @@ class AbstractReader(Imread, metaclass=ABCMeta): else: self.immersionN = 1 - p = re.compile(r'(\d+):(\d+)$') + p = re.compile(r"(\d+):(\d+)$") try: - self.track, self.detector = zip(*[[int(i) for i in p.findall(find( - self.ome.images[self.series].pixels.channels, id=f'Channel:{c}').detector_settings.id)[0]] - for c in range(self.shape['c'])]) + self.track, self.detector = zip( + *[ + [ + int(i) + for i in p.findall( + find(self.ome.images[self.series].pixels.channels, id=f"Channel:{c}").detector_settings.id + )[0] + ] + for c in range(self.shape["c"]) + ] + ) except Exception: # noqa pass def main() -> None: - parser = ArgumentParser(description='Display info and save as tif') - parser.add_argument('-v', '--version', action='version', version=__version__) - parser.add_argument('file', help='image_file', type=str, nargs='*') - parser.add_argument('-w', '--write', help='path to tif/movie out, {folder}, {name} and {ext} take this from file in', - type=str, default=None) - parser.add_argument('-o', '--extract_ome', help='extract ome to xml file', action='store_true') - parser.add_argument('-r', '--register', help='register channels', action='store_true') - parser.add_argument('-c', '--channel', help='channel', type=int, default=None) - parser.add_argument('-z', '--zslice', help='z-slice', type=int, default=None) - parser.add_argument('-t', '--time', help='time (frames) in python slicing notation', type=str, default=None) - parser.add_argument('-s', '--split', help='split channels', action='store_true') - parser.add_argument('-f', '--force', help='force overwrite', action='store_true') - parser.add_argument('-C', '--movie-colors', help='colors for channels in movie', type=str, nargs='*') - parser.add_argument('-B', '--movie-brightnesses', help='scale brightness of each channel', - type=float, nargs='*') - parser.add_argument('-S', '--movie-scale', help='upscale movie xy size, int', type=float) + parser = ArgumentParser(description="Display info and save as tif") + parser.add_argument("-v", "--version", action="version", version=__version__) + parser.add_argument("file", help="image_file", type=str, nargs="*") + parser.add_argument( + "-w", + "--write", + help="path to tif/movie out, {folder}, {name} and {ext} take this from file in", + type=str, + default=None, + ) + parser.add_argument("-o", "--extract_ome", help="extract ome to xml file", action="store_true") + parser.add_argument("-r", "--register", help="register channels", action="store_true") + parser.add_argument("-c", "--channel", help="channel", type=int, default=None) + parser.add_argument("-z", "--zslice", help="z-slice", type=int, default=None) + parser.add_argument("-t", "--time", help="time (frames) in python slicing notation", type=str, default=None) + parser.add_argument("-s", "--split", help="split channels", action="store_true") + parser.add_argument("-f", "--force", help="force overwrite", action="store_true") + parser.add_argument( + "--compression", help="compression when writing tiffs, zstd or deflate", type=str, default=None + ) + parser.add_argument("-C", "--movie-colors", help="colors for channels in movie", type=str, nargs="*") + parser.add_argument("-V", "--movie_speed", help="speed of move, default = 25 / 7", type=float, default=None) + parser.add_argument("-B", "--movie-brightnesses", help="scale brightness of each channel", type=float, nargs="*") + parser.add_argument("-S", "--movie-scale", help="upscale movie xy size, int", type=float) args = parser.parse_args() - for file in tqdm(args.file, desc='operating on files', disable=len(args.file) == 1): + for file in tqdm(args.file, desc="operating on files", disable=len(args.file) == 1): file = Path(file) with Imread(file) as im: # noqa if args.register: @@ -1354,14 +1550,31 @@ def main() -> None: write = Path(args.write.format(folder=str(file.parent), name=file.stem, ext=file.suffix)).absolute() # noqa write.parent.mkdir(parents=True, exist_ok=True) if write.exists() and not args.force: - print(f'File {args.write} exists already, add the -f flag if you want to overwrite it.') - elif write.suffix in ('.mkv', '.mp4'): - im.save_as_movie(write, args.channel, args.zslice, args.time, args.movie_colors, - args.movie_brightnesses, args.movie_scale, bar=len(args.file) == 1) + print(f"File {args.write} exists already, add the -f flag if you want to overwrite it.") + elif write.suffix in (".mkv", ".mp4", ".mov"): + im.save_as_movie( + write, + args.channel, + args.zslice, + args.time, + args.movie_colors, + args.movie_brightnesses, + args.movie_scale, + bar=len(args.file) == 1, + speed=args.movie_speed, + ) else: - im.save_as_tiff(write, args.channel, args.zslice, args.time, args.split, bar=len(args.file) == 1) + im.save_as_tiff( + write, + args.channel, + args.zslice, + args.time, + args.split, + bar=len(args.file) == 1, + compression=args.compression, + ) if args.extract_ome: - with open(im.path.with_suffix('.ome.xml'), 'w') as f: + with open(im.path.with_suffix(".ome.xml"), "w") as f: f.write(im.ome.to_xml()) if len(args.file) == 1: print(im.summary) diff --git a/ndbioimage/jvm.py b/ndbioimage/jvm.py index 777f324..20a09e8 100644 --- a/ndbioimage/jvm.py +++ b/ndbioimage/jvm.py @@ -7,10 +7,12 @@ class JVMException(Exception): try: + class JVM: - """ There can be only one java virtual machine per python process, - so this is a singleton class to manage the jvm. + """There can be only one java virtual machine per python process, + so this is a singleton class to manage the jvm. """ + _instance = None vm_started = False vm_killed = False @@ -24,7 +26,7 @@ try: def __init__(self, jars=None): if not self.vm_started and not self.vm_killed: try: - jar_path = Path(__file__).parent / 'jars' + jar_path = Path(__file__).parent / "jars" if jars is None: jars = {} for jar, src in jars.items(): @@ -33,6 +35,7 @@ try: classpath = [str(jar_path / jar) for jar in jars.keys()] import jpype + jpype.startJVM(classpath=classpath) except Exception: # noqa self.vm_started = False @@ -56,11 +59,11 @@ try: pass if self.vm_killed: - raise Exception('The JVM was killed before, and cannot be restarted in this Python process.') + raise Exception("The JVM was killed before, and cannot be restarted in this Python process.") @staticmethod def download(src, dest): - print(f'Downloading {dest.name} to {dest}.') + print(f"Downloading {dest.name} to {dest}.") dest.parent.mkdir(exist_ok=True) dest.write_bytes(request.urlopen(src).read()) @@ -69,6 +72,7 @@ try: self = cls._instance if self is not None and self.vm_started and not self.vm_killed: import jpype + jpype.shutdownJVM() # noqa self.vm_started = False self.vm_killed = True diff --git a/ndbioimage/readers/__init__.py b/ndbioimage/readers/__init__.py index d4ca3bc..cad02aa 100644 --- a/ndbioimage/readers/__init__.py +++ b/ndbioimage/readers/__init__.py @@ -1 +1,6 @@ -__all__ = 'bfread', 'cziread', 'fijiread', 'ndread', 'seqread', 'tifread', 'metaseriesread' +from .. import JVM + +if JVM is None: + __all__ = "cziread", "fijiread", "ndread", "seqread", "tifread", "metaseriesread" +else: + __all__ = "bfread", "cziread", "fijiread", "ndread", "seqread", "tifread", "metaseriesread" diff --git a/ndbioimage/readers/bfread.py b/ndbioimage/readers/bfread.py index 1bf6188..b8bcca7 100644 --- a/ndbioimage/readers/bfread.py +++ b/ndbioimage/readers/bfread.py @@ -8,13 +8,15 @@ import numpy as np from .. import JVM, AbstractReader, JVMException -jars = {'bioformats_package.jar': 'https://downloads.openmicroscopy.org/bio-formats/latest/artifacts/' - 'bioformats_package.jar'} +jars = { + "bioformats_package.jar": "https://downloads.openmicroscopy.org/bio-formats/latest/artifacts/" + "bioformats_package.jar" +} class JVMReader: def __init__(self, path: Path, series: int) -> None: - mp = multiprocessing.get_context('spawn') + mp = multiprocessing.get_context("spawn") self.path = path self.series = series self.queue_in = mp.Queue() @@ -23,7 +25,7 @@ class JVMReader: self.process = mp.Process(target=self.run) self.process.start() status, message = self.queue_out.get() - if status == 'status' and message == 'started': + if status == "status" and message == "started": self.is_alive = True else: raise JVMException(message) @@ -45,14 +47,14 @@ class JVMReader: def frame(self, c: int, z: int, t: int) -> np.ndarray: self.queue_in.put((c, z, t)) status, message = self.queue_out.get() - if status == 'frame': + if status == "frame": return message else: raise JVMException(message) def run(self) -> None: - """ Read planes from the image reader file. - adapted from python-bioformats/bioformats/formatreader.py + """Read planes from the image reader file. + adapted from python-bioformats/bioformats/formatreader.py """ jvm = None try: @@ -74,20 +76,20 @@ class JVMReader: elif pixel_type == jvm.format_tools.UINT8: dtype = np.uint8 elif pixel_type == jvm.format_tools.UINT16: - dtype = 'u2' + dtype = "u2" elif pixel_type == jvm.format_tools.INT16: - dtype = 'i2' + dtype = "i2" elif pixel_type == jvm.format_tools.UINT32: - dtype = 'u4' + dtype = "u4" elif pixel_type == jvm.format_tools.INT32: - dtype = 'i4' + dtype = "i4" elif pixel_type == jvm.format_tools.FLOAT: - dtype = 'f4' + dtype = "f4" elif pixel_type == jvm.format_tools.DOUBLE: - dtype = 'f8' + dtype = "f8" else: dtype = None - self.queue_out.put(('status', 'started')) + self.queue_out.put(("status", "started")) while not self.done.is_set(): try: @@ -116,8 +118,10 @@ class JVMReader: image.shape = (height, width, 3) del rdr elif reader.getSizeC() > 1: - images = [np.frombuffer(open_bytes_func(reader.getIndex(z, i, t)), dtype) - for i in range(reader.getSizeC())] + images = [ + np.frombuffer(open_bytes_func(reader.getIndex(z, i, t)), dtype) + for i in range(reader.getSizeC()) + ] image = np.dstack(images) image.shape = (height, width, reader.getSizeC()) # if not channel_names is None: @@ -161,13 +165,13 @@ class JVMReader: image.shape = (height, width) if image.ndim == 3: - self.queue_out.put(('frame', image[..., c])) + self.queue_out.put(("frame", image[..., c])) else: - self.queue_out.put(('frame', image)) + self.queue_out.put(("frame", image)) except queues.Empty: # noqa continue except (Exception,): - self.queue_out.put(('error', format_exc())) + self.queue_out.put(("error", format_exc())) finally: if jvm is not None: jvm.kill_vm() @@ -186,16 +190,17 @@ def can_open(path: Path) -> bool: class Reader(AbstractReader, ABC): - """ This class is used as a last resort, when we don't have another way to open the file. We don't like it - because it requires the java vm. + """This class is used as a last resort, when we don't have another way to open the file. We don't like it + because it requires the java vm. """ + priority = 99 # panic and open with BioFormats - do_not_pickle = 'reader', 'key', 'jvm' + do_not_pickle = "reader", "key", "jvm" @staticmethod def _can_open(path: Path) -> bool: - """ Use java BioFormats to make an ome metadata structure. """ - with multiprocessing.get_context('spawn').Pool(1) as pool: + """Use java BioFormats to make an ome metadata structure.""" + with multiprocessing.get_context("spawn").Pool(1) as pool: return pool.map(can_open, (path,))[0] def open(self) -> None: diff --git a/ndbioimage/readers/cziread.py b/ndbioimage/readers/cziread.py index c345d7b..97c7889 100644 --- a/ndbioimage/readers/cziread.py +++ b/ndbioimage/readers/cziread.py @@ -26,11 +26,12 @@ except ImportError: zoom = None -Element = TypeVar('Element') +Element = TypeVar("Element") def zstd_decode(data: bytes) -> bytes: # noqa - """ decode zstd bytes, copied from BioFormats ZeissCZIReader """ + """decode zstd bytes, copied from BioFormats ZeissCZIReader""" + def read_var_int(stream: BytesIO) -> int: # noqa a = stream.read(1)[0] if a & 128: @@ -51,7 +52,7 @@ def zstd_decode(data: bytes) -> bytes: # noqa if chunk_id == 1: high_low_unpacking = (stream.read(1)[0] & 1) == 1 else: - raise ValueError(f'Invalid chunk id: {chunk_id}') + raise ValueError(f"Invalid chunk id: {chunk_id}") pointer = stream.tell() except Exception: # noqa high_low_unpacking = False @@ -112,8 +113,7 @@ def data(self, raw: bool = False, resize: bool = True, order: int = 0) -> np.nda # sub / supersampling factors = [j / i for i, j in zip(de.stored_shape, de.shape)] - factors = [(int(round(f)) if abs(f - round(f)) < 0.0001 else f) - for f in factors] + factors = [(int(round(f)) if abs(f - round(f)) < 0.0001 else f) for f in factors] # use repeat if possible if order == 0 and all(isinstance(f, int) for f in factors): @@ -154,27 +154,27 @@ czifile.czifile.SubBlockSegment.data = data class Reader(AbstractReader, ABC): priority = 0 - do_not_pickle = 'reader', 'filedict' + do_not_pickle = "reader", "filedict" @staticmethod def _can_open(path: Path) -> bool: - return isinstance(path, Path) and path.suffix == '.czi' + return isinstance(path, Path) and path.suffix == ".czi" def open(self) -> None: self.reader = czifile.CziFile(self.path) filedict = {} for directory_entry in self.reader.filtered_subblock_directory: idx = self.get_index(directory_entry, self.reader.start) - if 'S' not in self.reader.axes or self.series in range(*idx[self.reader.axes.index('S')]): - for c in range(*idx[self.reader.axes.index('C')]): - for z in range(*idx[self.reader.axes.index('Z')]): - for t in range(*idx[self.reader.axes.index('T')]): + if "S" not in self.reader.axes or self.series in range(*idx[self.reader.axes.index("S")]): + for c in range(*idx[self.reader.axes.index("C")]): + for z in range(*idx[self.reader.axes.index("Z")]): + for t in range(*idx[self.reader.axes.index("T")]): if (c, z, t) in filedict: filedict[c, z, t].append(directory_entry) else: filedict[c, z, t] = [directory_entry] if len(filedict) == 0: - raise FileNotFoundError(f'Series {self.series} not found in {self.path}.') + raise FileNotFoundError(f"Series {self.series} not found in {self.path}.") self.filedict = filedict # noqa def close(self) -> None: @@ -184,19 +184,21 @@ class Reader(AbstractReader, ABC): return OmeParse.get_ome(self.reader, self.filedict) def __frame__(self, c: int = 0, z: int = 0, t: int = 0) -> np.ndarray: - f = np.zeros(self.base_shape['yx'], self.dtype) + f = np.zeros(self.base_shape["yx"], self.dtype) if (c, z, t) in self.filedict: directory_entries = self.filedict[c, z, t] - x_min = min([f.start[f.axes.index('X')] for f in directory_entries]) - y_min = min([f.start[f.axes.index('Y')] for f in directory_entries]) - xy_min = {'X': x_min, 'Y': y_min} + x_min = min([f.start[f.axes.index("X")] for f in directory_entries]) + y_min = min([f.start[f.axes.index("Y")] for f in directory_entries]) + xy_min = {"X": x_min, "Y": y_min} for directory_entry in directory_entries: subblock = directory_entry.data_segment() tile = subblock.data(resize=True, order=0) axes_min = [xy_min.get(ax, 0) for ax in directory_entry.axes] - index = [slice(i - j - m, i - j + k) - for i, j, k, m in zip(directory_entry.start, self.reader.start, tile.shape, axes_min)] - index = tuple(index[self.reader.axes.index(i)] for i in 'YX') + index = [ + slice(i - j - m, i - j + k) + for i, j, k, m in zip(directory_entry.start, self.reader.start, tile.shape, axes_min) + ] + index = tuple(index[self.reader.axes.index(i)] for i in "YX") f[index] = tile.squeeze() return f @@ -225,28 +227,27 @@ class OmeParse: self.reader = reader self.filedict = filedict xml = reader.metadata() - self.attachments = {i.attachment_entry.name: i.attachment_entry.data_segment() - for i in reader.attachments()} + self.attachments = {i.attachment_entry.name: i.attachment_entry.data_segment() for i in reader.attachments()} self.tree = etree.fromstring(xml) - self.metadata = self.tree.find('Metadata') - version = self.metadata.find('Version') + self.metadata = self.tree.find("Metadata") + version = self.metadata.find("Version") if version is not None: self.version = version.text else: - self.version = self.metadata.find('Experiment').attrib['Version'] + self.version = self.metadata.find("Experiment").attrib["Version"] self.ome = OME() - self.information = self.metadata.find('Information') - self.display_setting = self.metadata.find('DisplaySetting') - self.experiment = self.metadata.find('Experiment') - self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock') - self.instrument = self.information.find('Instrument') - self.image = self.information.find('Image') + self.information = self.metadata.find("Information") + self.display_setting = self.metadata.find("DisplaySetting") + self.experiment = self.metadata.find("Experiment") + self.acquisition_block = self.experiment.find("ExperimentBlocks").find("AcquisitionBlock") + self.instrument = self.information.find("Instrument") + self.image = self.information.find("Image") - if self.version == '1.0': - self.experiment = self.metadata.find('Experiment') - self.acquisition_block = self.experiment.find('ExperimentBlocks').find('AcquisitionBlock') - self.multi_track_setup = self.acquisition_block.find('MultiTrackSetup') + if self.version == "1.0": + self.experiment = self.metadata.find("Experiment") + self.acquisition_block = self.experiment.find("ExperimentBlocks").find("AcquisitionBlock") + self.multi_track_setup = self.acquisition_block.find("MultiTrackSetup") else: self.experiment = None self.acquisition_block = None @@ -281,326 +282,396 @@ class OmeParse: return default def get_experimenters(self) -> None: - if self.version == '1.0': + if self.version == "1.0": self.ome.experimenters = [ - model.Experimenter(id='Experimenter:0', - user_name=self.information.find('User').find('DisplayName').text)] - elif self.version in ('1.1', '1.2'): + model.Experimenter( + id="Experimenter:0", user_name=self.information.find("User").find("DisplayName").text + ) + ] + elif self.version in ("1.1", "1.2"): self.ome.experimenters = [ - model.Experimenter(id='Experimenter:0', - user_name=self.information.find('Document').find('UserName').text)] + model.Experimenter( + id="Experimenter:0", user_name=self.information.find("Document").find("UserName").text + ) + ] def get_instruments(self) -> None: - if self.version == '1.0': - self.ome.instruments.append(model.Instrument(id=self.instrument.attrib['Id'])) - elif self.version in ('1.1', '1.2'): - for _ in self.instrument.find('Microscopes'): - self.ome.instruments.append(model.Instrument(id='Instrument:0')) + if self.version == "1.0": + self.ome.instruments.append(model.Instrument(id=self.instrument.attrib["Id"])) + elif self.version in ("1.1", "1.2"): + for _ in self.instrument.find("Microscopes"): + self.ome.instruments.append(model.Instrument(id="Instrument:0")) def get_detectors(self) -> None: - if self.version == '1.0': - for detector in self.instrument.find('Detectors'): + if self.version == "1.0": + for detector in self.instrument.find("Detectors"): try: - detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "") + detector_type = model.Detector_Type(self.text(detector.find("Type")).upper() or "") except ValueError: detector_type = model.Detector_Type.OTHER self.ome.instruments[0].detectors.append( model.Detector( - id=detector.attrib['Id'], model=self.text(detector.find('Manufacturer').find('Model')), - amplification_gain=float(self.text(detector.find('AmplificationGain'))), - gain=float(self.text(detector.find('Gain'))), zoom=float(self.text(detector.find('Zoom'))), - type=detector_type - )) - elif self.version in ('1.1', '1.2'): - for detector in self.instrument.find('Detectors'): + id=detector.attrib["Id"], + model=self.text(detector.find("Manufacturer").find("Model")), + amplification_gain=float(self.text(detector.find("AmplificationGain"))), + gain=float(self.text(detector.find("Gain"))), + zoom=float(self.text(detector.find("Zoom"))), + type=detector_type, + ) + ) + elif self.version in ("1.1", "1.2"): + for detector in self.instrument.find("Detectors"): try: - detector_type = model.Detector_Type(self.text(detector.find('Type')).upper() or "") + detector_type = model.Detector_Type(self.text(detector.find("Type")).upper() or "") except ValueError: detector_type = model.Detector_Type.OTHER self.ome.instruments[0].detectors.append( model.Detector( - id=detector.attrib['Id'].replace(' ', ''), - model=self.text(detector.find('Manufacturer').find('Model')), - type=detector_type - )) + id=detector.attrib["Id"].replace(" ", ""), + model=self.text(detector.find("Manufacturer").find("Model")), + type=detector_type, + ) + ) def get_objectives(self) -> None: - for objective in self.instrument.find('Objectives'): + for objective in self.instrument.find("Objectives"): self.ome.instruments[0].objectives.append( model.Objective( - id=objective.attrib['Id'], - model=self.text(objective.find('Manufacturer').find('Model')), - immersion=self.text(objective.find('Immersion')), # type: ignore - lens_na=float(self.text(objective.find('LensNA'))), - nominal_magnification=float(self.text(objective.find('NominalMagnification'))))) + id=objective.attrib["Id"], + model=self.text(objective.find("Manufacturer").find("Model")), + immersion=self.text(objective.find("Immersion")), # type: ignore + lens_na=float(self.text(objective.find("LensNA"))), + nominal_magnification=float(self.text(objective.find("NominalMagnification"))), + ) + ) def get_tubelenses(self) -> None: - if self.version == '1.0': - for idx, tube_lens in enumerate({self.text(track_setup.find('TubeLensPosition')) - for track_setup in self.multi_track_setup}): + if self.version == "1.0": + for idx, tube_lens in enumerate( + {self.text(track_setup.find("TubeLensPosition")) for track_setup in self.multi_track_setup} + ): try: - nominal_magnification = float(re.findall(r'\d+[,.]\d*', tube_lens)[0].replace(',', '.')) + nominal_magnification = float(re.findall(r"\d+[,.]\d*", tube_lens)[0].replace(",", ".")) except Exception: # noqa nominal_magnification = 1.0 self.ome.instruments[0].objectives.append( - model.Objective(id=f'Objective:Tubelens:{idx}', model=tube_lens, - nominal_magnification=nominal_magnification)) - elif self.version in ('1.1', '1.2'): - for tubelens in self.def_list(self.instrument.find('TubeLenses')): + model.Objective( + id=f"Objective:Tubelens:{idx}", model=tube_lens, nominal_magnification=nominal_magnification + ) + ) + elif self.version in ("1.1", "1.2"): + for tubelens in self.def_list(self.instrument.find("TubeLenses")): try: - nominal_magnification = float(re.findall(r'\d+(?:[,.]\d*)?', - tubelens.attrib['Name'])[0].replace(',', '.')) + nominal_magnification = float( + re.findall(r"\d+(?:[,.]\d*)?", tubelens.attrib["Name"])[0].replace(",", ".") + ) except Exception: # noqa nominal_magnification = 1.0 self.ome.instruments[0].objectives.append( model.Objective( id=f"Objective:{tubelens.attrib['Id']}", - model=tubelens.attrib['Name'], - nominal_magnification=nominal_magnification)) + model=tubelens.attrib["Name"], + nominal_magnification=nominal_magnification, + ) + ) def get_light_sources(self) -> None: - if self.version == '1.0': - for light_source in self.def_list(self.instrument.find('LightSources')): + if self.version == "1.0": + for light_source in self.def_list(self.instrument.find("LightSources")): try: - if light_source.find('LightSourceType').find('Laser') is not None: + if light_source.find("LightSourceType").find("Laser") is not None: self.ome.instruments[0].lasers.append( model.Laser( - id=light_source.attrib['Id'], - model=self.text(light_source.find('Manufacturer').find('Model')), - power=float(self.text(light_source.find('Power'))), + id=light_source.attrib["Id"], + model=self.text(light_source.find("Manufacturer").find("Model")), + power=float(self.text(light_source.find("Power"))), wavelength=float( - self.text(light_source.find('LightSourceType').find('Laser').find('Wavelength'))))) + self.text(light_source.find("LightSourceType").find("Laser").find("Wavelength")) + ), + ) + ) except AttributeError: pass - elif self.version in ('1.1', '1.2'): - for light_source in self.def_list(self.instrument.find('LightSources')): + elif self.version in ("1.1", "1.2"): + for light_source in self.def_list(self.instrument.find("LightSources")): try: - if light_source.find('LightSourceType').find('Laser') is not None: + if light_source.find("LightSourceType").find("Laser") is not None: self.ome.instruments[0].lasers.append( model.Laser( id=f"LightSource:{light_source.attrib['Id']}", - power=float(self.text(light_source.find('Power'))), - wavelength=float(light_source.attrib['Id'][-3:]))) # TODO: follow Id reference + power=float(self.text(light_source.find("Power"))), + wavelength=float(light_source.attrib["Id"][-3:]), + ) + ) # TODO: follow Id reference except (AttributeError, ValueError): pass def get_filters(self) -> None: - if self.version == '1.0': - for idx, filter_ in enumerate({self.text(beam_splitter.find('Filter')) - for track_setup in self.multi_track_setup - for beam_splitter in track_setup.find('BeamSplitters')}): - self.ome.instruments[0].filter_sets.append( - model.FilterSet(id=f'FilterSet:{idx}', model=filter_) - ) + if self.version == "1.0": + for idx, filter_ in enumerate( + { + self.text(beam_splitter.find("Filter")) + for track_setup in self.multi_track_setup + for beam_splitter in track_setup.find("BeamSplitters") + } + ): + self.ome.instruments[0].filter_sets.append(model.FilterSet(id=f"FilterSet:{idx}", model=filter_)) def get_pixels(self) -> None: - x_min = min([f.start[f.axes.index('X')] for f in self.filedict[0, 0, 0]]) - y_min = min([f.start[f.axes.index('Y')] for f in self.filedict[0, 0, 0]]) - x_max = max([f.start[f.axes.index('X')] + f.shape[f.axes.index('X')] for f in self.filedict[0, 0, 0]]) - y_max = max([f.start[f.axes.index('Y')] + f.shape[f.axes.index('Y')] for f in self.filedict[0, 0, 0]]) + x_min = min([f.start[f.axes.index("X")] for f in self.filedict[0, 0, 0]]) + y_min = min([f.start[f.axes.index("Y")] for f in self.filedict[0, 0, 0]]) + x_max = max([f.start[f.axes.index("X")] + f.shape[f.axes.index("X")] for f in self.filedict[0, 0, 0]]) + y_max = max([f.start[f.axes.index("Y")] + f.shape[f.axes.index("Y")] for f in self.filedict[0, 0, 0]]) self.size_x = x_max - x_min self.size_y = y_max - y_min - self.size_c, self.size_z, self.size_t = (self.reader.shape[self.reader.axes.index(directory_entry)] - for directory_entry in 'CZT') - image = self.information.find('Image') - pixel_type = self.text(image.find('PixelType'), 'Gray16') - if pixel_type.startswith('Gray'): - pixel_type = 'uint' + pixel_type[4:] - objective_settings = image.find('ObjectiveSettings') + self.size_c, self.size_z, self.size_t = ( + self.reader.shape[self.reader.axes.index(directory_entry)] for directory_entry in "CZT" + ) + image = self.information.find("Image") + pixel_type = self.text(image.find("PixelType"), "Gray16") + if pixel_type.startswith("Gray"): + pixel_type = "uint" + pixel_type[4:] + objective_settings = image.find("ObjectiveSettings") self.ome.images.append( model.Image( - id='Image:0', + id="Image:0", name=f"{self.text(self.information.find('Document').find('Name'))} #1", pixels=model.Pixels( - id='Pixels:0', size_x=self.size_x, size_y=self.size_y, - size_c=self.size_c, size_z=self.size_z, size_t=self.size_t, - dimension_order='XYCZT', type=pixel_type, # type: ignore - significant_bits=int(self.text(image.find('ComponentBitCount'))), - big_endian=False, interleaved=False, metadata_only=True), # type: ignore - experimenter_ref=model.ExperimenterRef(id='Experimenter:0'), - instrument_ref=model.InstrumentRef(id='Instrument:0'), + id="Pixels:0", + size_x=self.size_x, + size_y=self.size_y, + size_c=self.size_c, + size_z=self.size_z, + size_t=self.size_t, + dimension_order="XYCZT", + type=pixel_type, # type: ignore + significant_bits=int(self.text(image.find("ComponentBitCount"))), + big_endian=False, + interleaved=False, + metadata_only=True, + ), # type: ignore + experimenter_ref=model.ExperimenterRef(id="Experimenter:0"), + instrument_ref=model.InstrumentRef(id="Instrument:0"), objective_settings=model.ObjectiveSettings( - id=objective_settings.find('ObjectiveRef').attrib['Id'], - medium=self.text(objective_settings.find('Medium')), # type: ignore - refractive_index=float(self.text(objective_settings.find('RefractiveIndex')))), + id=objective_settings.find("ObjectiveRef").attrib["Id"], + medium=self.text(objective_settings.find("Medium")), # type: ignore + refractive_index=float(self.text(objective_settings.find("RefractiveIndex"))), + ), stage_label=model.StageLabel( - name=f'Scene position #0', - x=self.positions[0], x_unit=self.um, - y=self.positions[1], y_unit=self.um, - z=self.positions[2], z_unit=self.um))) + name=f"Scene position #0", + x=self.positions[0], + x_unit=self.um, + y=self.positions[1], + y_unit=self.um, + z=self.positions[2], + z_unit=self.um, + ), + ) + ) - for distance in self.metadata.find('Scaling').find('Items'): - if distance.attrib['Id'] == 'X': - self.ome.images[0].pixels.physical_size_x = float(self.text(distance.find('Value'))) * 1e6 - elif distance.attrib['Id'] == 'Y': - self.ome.images[0].pixels.physical_size_y = float(self.text(distance.find('Value'))) * 1e6 - elif self.size_z > 1 and distance.attrib['Id'] == 'Z': - self.ome.images[0].pixels.physical_size_z = float(self.text(distance.find('Value'))) * 1e6 + for distance in self.metadata.find("Scaling").find("Items"): + if distance.attrib["Id"] == "X": + self.ome.images[0].pixels.physical_size_x = float(self.text(distance.find("Value"))) * 1e6 + elif distance.attrib["Id"] == "Y": + self.ome.images[0].pixels.physical_size_y = float(self.text(distance.find("Value"))) * 1e6 + elif self.size_z > 1 and distance.attrib["Id"] == "Z": + self.ome.images[0].pixels.physical_size_z = float(self.text(distance.find("Value"))) * 1e6 @cached_property def positions(self) -> tuple[float, float, Optional[float]]: - if self.version == '1.0': - scenes = self.image.find('Dimensions').find('S').find('Scenes') - positions = scenes[0].find('Positions')[0] - return float(positions.attrib['X']), float(positions.attrib['Y']), float(positions.attrib['Z']) - elif self.version in ('1.1', '1.2'): + if self.version == "1.0": + scenes = self.image.find("Dimensions").find("S").find("Scenes") + positions = scenes[0].find("Positions")[0] + return float(positions.attrib["X"]), float(positions.attrib["Y"]), float(positions.attrib["Z"]) + elif self.version in ("1.1", "1.2"): try: # TODO - scenes = self.image.find('Dimensions').find('S').find('Scenes') - center_position = [float(pos) for pos in self.text(scenes[0].find('CenterPosition')).split(',')] + scenes = self.image.find("Dimensions").find("S").find("Scenes") + center_position = [float(pos) for pos in self.text(scenes[0].find("CenterPosition")).split(",")] except AttributeError: center_position = [0, 0] return center_position[0], center_position[1], None @cached_property def channels_im(self) -> dict: - return {channel.attrib['Id']: channel for channel in self.image.find('Dimensions').find('Channels')} + return {channel.attrib["Id"]: channel for channel in self.image.find("Dimensions").find("Channels")} @cached_property def channels_ds(self) -> dict: - return {channel.attrib['Id']: channel for channel in self.display_setting.find('Channels')} + return {channel.attrib["Id"]: channel for channel in self.display_setting.find("Channels")} @cached_property def channels_ts(self) -> dict: - return {detector.attrib['Id']: track_setup - for track_setup in - self.experiment.find('ExperimentBlocks').find('AcquisitionBlock').find('MultiTrackSetup') - for detector in track_setup.find('Detectors')} + return { + detector.attrib["Id"]: track_setup + for track_setup in self.experiment.find("ExperimentBlocks") + .find("AcquisitionBlock") + .find("MultiTrackSetup") + for detector in track_setup.find("Detectors") + } def get_channels(self) -> None: - if self.version == '1.0': + if self.version == "1.0": for idx, (key, channel) in enumerate(self.channels_im.items()): - detector_settings = channel.find('DetectorSettings') - laser_scan_info = channel.find('LaserScanInfo') - detector = detector_settings.find('Detector') + detector_settings = channel.find("DetectorSettings") + laser_scan_info = channel.find("LaserScanInfo") + detector = detector_settings.find("Detector") try: - binning = model.Binning(self.text(detector_settings.find('Binning'))) + binning = model.Binning(self.text(detector_settings.find("Binning"))) except ValueError: binning = model.Binning.OTHER - filterset = self.text(self.channels_ts[key].find('BeamSplitters')[0].find('Filter')) + filterset = self.text(self.channels_ts[key].find("BeamSplitters")[0].find("Filter")) filterset_idx = [filterset.model for filterset in self.ome.instruments[0].filter_sets].index(filterset) - light_sources_settings = channel.find('LightSourcesSettings') + light_sources_settings = channel.find("LightSourcesSettings") # no space in ome for multiple lightsources simultaneously if len(light_sources_settings) > idx: light_source_settings = light_sources_settings[idx] else: light_source_settings = light_sources_settings[0] light_source_settings = model.LightSourceSettings( - id=light_source_settings.find('LightSource').attrib['Id'], - attenuation=float(self.text(light_source_settings.find('Attenuation'))), - wavelength=float(self.text(light_source_settings.find('Wavelength'))), - wavelength_unit=self.nm) + id=light_source_settings.find("LightSource").attrib["Id"], + attenuation=float(self.text(light_source_settings.find("Attenuation"))), + wavelength=float(self.text(light_source_settings.find("Wavelength"))), + wavelength_unit=self.nm, + ) self.ome.images[0].pixels.channels.append( model.Channel( - id=f'Channel:{idx}', - name=channel.attrib['Name'], - acquisition_mode=self.text(channel.find('AcquisitionMode')), # type: ignore - color=model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white')), - detector_settings=model.DetectorSettings(id=detector.attrib['Id'], binning=binning), + id=f"Channel:{idx}", + name=channel.attrib["Name"], + acquisition_mode=self.text(channel.find("AcquisitionMode")), # type: ignore + color=model.Color(self.text(self.channels_ds[channel.attrib["Id"]].find("Color"), "white")), + detector_settings=model.DetectorSettings(id=detector.attrib["Id"], binning=binning), # emission_wavelength=text(channel.find('EmissionWavelength')), # TODO: fix excitation_wavelength=light_source_settings.wavelength, filter_set_ref=model.FilterSetRef(id=self.ome.instruments[0].filter_sets[filterset_idx].id), - illumination_type=self.text(channel.find('IlluminationType')), # type: ignore + illumination_type=self.text(channel.find("IlluminationType")), # type: ignore light_source_settings=light_source_settings, - samples_per_pixel=int(self.text(laser_scan_info.find('Averaging'))))) - elif self.version in ('1.1', '1.2'): + samples_per_pixel=int(self.text(laser_scan_info.find("Averaging"))), + ) + ) + elif self.version in ("1.1", "1.2"): for idx, (key, channel) in enumerate(self.channels_im.items()): - detector_settings = channel.find('DetectorSettings') - laser_scan_info = channel.find('LaserScanInfo') - detector = detector_settings.find('Detector') + detector_settings = channel.find("DetectorSettings") + laser_scan_info = channel.find("LaserScanInfo") + detector = detector_settings.find("Detector") try: - color = model.Color(self.text(self.channels_ds[channel.attrib['Id']].find('Color'), 'white')) + color = model.Color(self.text(self.channels_ds[channel.attrib["Id"]].find("Color"), "white")) except Exception: # noqa color = None try: - if (i := self.text(channel.find('EmissionWavelength'))) != '0': + if (i := self.text(channel.find("EmissionWavelength"))) != "0": emission_wavelength = float(i) else: emission_wavelength = None except Exception: # noqa emission_wavelength = None if laser_scan_info is not None: - samples_per_pixel = int(self.text(laser_scan_info.find('Averaging'), '1')) + samples_per_pixel = int(self.text(laser_scan_info.find("Averaging"), "1")) else: samples_per_pixel = 1 try: - binning = model.Binning(self.text(detector_settings.find('Binning'))) + binning = model.Binning(self.text(detector_settings.find("Binning"))) except ValueError: binning = model.Binning.OTHER - light_sources_settings = channel.find('LightSourcesSettings') + light_sources_settings = channel.find("LightSourcesSettings") # no space in ome for multiple lightsources simultaneously if light_sources_settings is not None: light_source_settings = light_sources_settings[0] light_source_settings = model.LightSourceSettings( - id='LightSource:' + '_'.join([light_source_settings.find('LightSource').attrib['Id'] - for light_source_settings in light_sources_settings]), - attenuation=self.try_default(float, None, self.text(light_source_settings.find('Attenuation'))), - wavelength=self.try_default(float, None, self.text(light_source_settings.find('Wavelength'))), - wavelength_unit=self.nm) + id="LightSource:" + + "_".join( + [ + light_source_settings.find("LightSource").attrib["Id"] + for light_source_settings in light_sources_settings + ] + ), + attenuation=self.try_default( + float, None, self.text(light_source_settings.find("Attenuation")) + ), + wavelength=self.try_default(float, None, self.text(light_source_settings.find("Wavelength"))), + wavelength_unit=self.nm, + ) else: light_source_settings = None self.ome.images[0].pixels.channels.append( model.Channel( - id=f'Channel:{idx}', - name=channel.attrib['Name'], - acquisition_mode=self.text(channel.find('AcquisitionMode')).replace( # type: ignore - 'SingleMoleculeLocalisation', 'SingleMoleculeImaging'), + id=f"Channel:{idx}", + name=channel.attrib["Name"], + acquisition_mode=self.text(channel.find("AcquisitionMode")).replace( # type: ignore + "SingleMoleculeLocalisation", "SingleMoleculeImaging" + ), color=color, detector_settings=model.DetectorSettings( - id=detector.attrib['Id'].replace(' ', ""), - binning=binning), + id=detector.attrib["Id"].replace(" ", ""), binning=binning + ), emission_wavelength=emission_wavelength, - excitation_wavelength=self.try_default(float, None, - self.text(channel.find('ExcitationWavelength'))), + excitation_wavelength=self.try_default( + float, None, self.text(channel.find("ExcitationWavelength")) + ), # filter_set_ref=model.FilterSetRef(id=ome.instruments[0].filter_sets[filterset_idx].id), - illumination_type=self.text(channel.find('IlluminationType')), # type: ignore + illumination_type=self.text(channel.find("IlluminationType")), # type: ignore light_source_settings=light_source_settings, - samples_per_pixel=samples_per_pixel)) + samples_per_pixel=samples_per_pixel, + ) + ) def get_planes(self) -> None: try: - exposure_times = [float(self.text(channel.find('LaserScanInfo').find('FrameTime'))) - for channel in self.channels_im.values()] + exposure_times = [ + float(self.text(channel.find("LaserScanInfo").find("FrameTime"))) + for channel in self.channels_im.values() + ] except Exception: # noqa exposure_times = [None] * len(self.channels_im) - delta_ts = self.attachments['TimeStamps'].data() + delta_ts = self.attachments["TimeStamps"].data() dt = np.diff(delta_ts) if len(dt) and np.std(dt) / np.mean(dt) > 0.02: dt = np.median(dt[dt > 0]) delta_ts = dt * np.arange(len(delta_ts)) - warnings.warn(f'delta_t is inconsistent, using median value: {dt}') + warnings.warn(f"delta_t is inconsistent, using median value: {dt}") for t, z, c in product(range(self.size_t), range(self.size_z), range(self.size_c)): self.ome.images[0].pixels.planes.append( - model.Plane(the_c=c, the_z=z, the_t=t, delta_t=delta_ts[t], - exposure_time=exposure_times[c], - position_x=self.positions[0], position_x_unit=self.um, - position_y=self.positions[1], position_y_unit=self.um, - position_z=self.positions[2], position_z_unit=self.um)) + model.Plane( + the_c=c, + the_z=z, + the_t=t, + delta_t=delta_ts[t], + exposure_time=exposure_times[c], + position_x=self.positions[0], + position_x_unit=self.um, + position_y=self.positions[1], + position_y_unit=self.um, + position_z=self.positions[2], + position_z_unit=self.um, + ) + ) def get_annotations(self) -> None: idx = 0 - for layer in [] if (ml := self.metadata.find('Layers')) is None else ml: - rectangle = layer.find('Elements').find('Rectangle') + for layer in [] if (ml := self.metadata.find("Layers")) is None else ml: + rectangle = layer.find("Elements").find("Rectangle") if rectangle is not None: - geometry = rectangle.find('Geometry') - roi = model.ROI(id=f'ROI:{idx}', description=self.text(layer.find('Usage'))) + geometry = rectangle.find("Geometry") + roi = model.ROI(id=f"ROI:{idx}", description=self.text(layer.find("Usage"))) roi.union.append( model.Rectangle( - id='Shape:0:0', - height=float(self.text(geometry.find('Height'))), - width=float(self.text(geometry.find('Width'))), - x=float(self.text(geometry.find('Left'))), - y=float(self.text(geometry.find('Top'))))) + id="Shape:0:0", + height=float(self.text(geometry.find("Height"))), + width=float(self.text(geometry.find("Width"))), + x=float(self.text(geometry.find("Left"))), + y=float(self.text(geometry.find("Top"))), + ) + ) self.ome.rois.append(roi) - self.ome.images[0].roi_refs.append(model.ROIRef(id=f'ROI:{idx}')) + self.ome.images[0].roi_refs.append(model.ROIRef(id=f"ROI:{idx}")) idx += 1 diff --git a/ndbioimage/readers/fijiread.py b/ndbioimage/readers/fijiread.py index f11c0e7..c939b7b 100644 --- a/ndbioimage/readers/fijiread.py +++ b/ndbioimage/readers/fijiread.py @@ -12,13 +12,14 @@ from .. import AbstractReader class Reader(AbstractReader, ABC): - """ Can read some tif files written with Fiji which are broken because Fiji didn't finish writing. """ + """Can read some tif files written with Fiji which are broken because Fiji didn't finish writing.""" + priority = 90 - do_not_pickle = 'reader' + do_not_pickle = "reader" @staticmethod def _can_open(path): - if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'): + if isinstance(path, Path) and path.suffix in (".tif", ".tiff"): with TiffFile(path) as tif: return tif.is_imagej and not tif.is_bigtiff else: @@ -26,17 +27,17 @@ class Reader(AbstractReader, ABC): def __frame__(self, c, z, t): # Override this, return the frame at c, z, t self.reader.filehandle.seek(self.offset + t * self.count) - return np.reshape(unpack(self.fmt, self.reader.filehandle.read(self.count)), self.base_shape['yx']) + return np.reshape(unpack(self.fmt, self.reader.filehandle.read(self.count)), self.base_shape["yx"]) def open(self): - warn(f'File {self.path.name} is probably damaged, opening with fijiread.') + warn(f"File {self.path.name} is probably damaged, opening with fijiread.") self.reader = TiffFile(self.path) - assert self.reader.pages[0].compression == 1, 'Can only read uncompressed tiff files.' - assert self.reader.pages[0].samplesperpixel == 1, 'Can only read 1 sample per pixel.' + assert self.reader.pages[0].compression == 1, "Can only read uncompressed tiff files." + assert self.reader.pages[0].samplesperpixel == 1, "Can only read 1 sample per pixel." self.offset = self.reader.pages[0].dataoffsets[0] # noqa self.count = self.reader.pages[0].databytecounts[0] # noqa self.bytes_per_sample = self.reader.pages[0].bitspersample // 8 # noqa - self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * 'BHILQ'[self.bytes_per_sample - 1] # noqa + self.fmt = self.reader.byteorder + self.count // self.bytes_per_sample * "BHILQ"[self.bytes_per_sample - 1] # noqa def close(self): self.reader.close() @@ -51,9 +52,17 @@ class Reader(AbstractReader, ABC): ome.images.append( model.Image( pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=pixel_type), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) + size_c=size_c, + size_z=size_z, + size_t=size_t, + size_x=size_x, + size_y=size_y, + dimension_order="XYCZT", + type=pixel_type, + ), + objective_settings=model.ObjectiveSettings(id="Objective:0"), + ) + ) for c, z, t in product(range(size_c), range(size_z), range(size_t)): ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0)) return ome diff --git a/ndbioimage/readers/metaseriesread.py b/ndbioimage/readers/metaseriesread.py index f8087e4..c15c107 100644 --- a/ndbioimage/readers/metaseriesread.py +++ b/ndbioimage/readers/metaseriesread.py @@ -12,16 +12,17 @@ from .. import AbstractReader class Reader(AbstractReader, ABC): priority = 20 - do_not_pickle = 'last_tif' + do_not_pickle = "last_tif" @staticmethod def _can_open(path): - return isinstance(path, Path) and (path.is_dir() or - (path.parent.is_dir() and path.name.lower().startswith('pos'))) + return isinstance(path, Path) and ( + path.is_dir() or (path.parent.is_dir() and path.name.lower().startswith("pos")) + ) @staticmethod def get_positions(path: str | Path) -> Optional[list[int]]: - pat = re.compile(rf's(\d)_t\d+\.(tif|TIF)$') + pat = re.compile(rf"s(\d)_t\d+\.(tif|TIF)$") return sorted({int(m.group(1)) for file in Path(path).iterdir() if (m := pat.search(file.name))}) def get_ome(self): @@ -31,7 +32,7 @@ class Reader(AbstractReader, ABC): size_z = len(tif.pages) page = tif.pages[0] shape = {axis.lower(): size for axis, size in zip(page.axes, page.shape)} - size_x, size_y = shape['x'], shape['y'] + size_x, size_y = shape["x"], shape["y"] ome.instruments.append(model.Instrument()) @@ -41,16 +42,23 @@ class Reader(AbstractReader, ABC): ome.images.append( model.Image( pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, - size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=pixel_type), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) + size_c=size_c, + size_z=size_z, + size_t=size_t, + size_x=size_x, + size_y=size_y, + dimension_order="XYCZT", + type=pixel_type, + ), + objective_settings=model.ObjectiveSettings(id="Objective:0"), + ) + ) return ome def open(self): - pat = re.compile(rf's{self.series}_t\d+\.(tif|TIF)$') + pat = re.compile(rf"s{self.series}_t\d+\.(tif|TIF)$") filelist = sorted([file for file in self.path.iterdir() if pat.search(file.name)]) - pattern = re.compile(r't(\d+)$') + pattern = re.compile(r"t(\d+)$") self.filedict = {int(pattern.search(file.stem).group(1)) - 1: file for file in filelist} if len(self.filedict) == 0: raise FileNotFoundError @@ -72,9 +80,9 @@ class Reader(AbstractReader, ABC): def __frame__(self, c=0, z=0, t=0): tif = self.get_tif(t) page = tif.pages[z] - if page.axes.upper() == 'YX': + if page.axes.upper() == "YX": return page.asarray() - elif page.axes.upper() == 'XY': + elif page.axes.upper() == "XY": return page.asarray().T else: - raise NotImplementedError(f'reading axes {page.axes} is not implemented') + raise NotImplementedError(f"reading axes {page.axes} is not implemented") diff --git a/ndbioimage/readers/ndread.py b/ndbioimage/readers/ndread.py index 5a2b216..3b51b86 100644 --- a/ndbioimage/readers/ndread.py +++ b/ndbioimage/readers/ndread.py @@ -17,23 +17,32 @@ class Reader(AbstractReader, ABC): def get_ome(self): def shape(size_x=1, size_y=1, size_c=1, size_z=1, size_t=1): # noqa return size_x, size_y, size_c, size_z, size_t + size_x, size_y, size_c, size_z, size_t = shape(*self.array.shape) try: pixel_type = model.PixelType(self.array.dtype.name) except ValueError: - if self.array.dtype.name.startswith('int'): - pixel_type = model.PixelType('int32') + if self.array.dtype.name.startswith("int"): + pixel_type = model.PixelType("int32") else: - pixel_type = model.PixelType('float') + pixel_type = model.PixelType("float") ome = model.OME() ome.instruments.append(model.Instrument()) ome.images.append( model.Image( pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=pixel_type), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) + size_c=size_c, + size_z=size_z, + size_t=size_t, + size_x=size_x, + size_y=size_y, + dimension_order="XYCZT", + type=pixel_type, + ), + objective_settings=model.ObjectiveSettings(id="Objective:0"), + ) + ) for c, z, t in product(range(size_c), range(size_z), range(size_t)): ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=0)) return ome @@ -43,11 +52,11 @@ class Reader(AbstractReader, ABC): self.array = np.array(self.path) while self.array.ndim < 5: self.array = np.expand_dims(self.array, -1) # noqa - self.path = 'numpy array' + self.path = "numpy array" def __frame__(self, c, z, t): frame = self.array[:, :, c, z, t] - if self.axes.find('y') > self.axes.find('x'): + if self.axes.find("y") > self.axes.find("x"): return frame.T else: return frame diff --git a/ndbioimage/readers/seqread.py b/ndbioimage/readers/seqread.py index 4d74683..ee72d8f 100644 --- a/ndbioimage/readers/seqread.py +++ b/ndbioimage/readers/seqread.py @@ -21,24 +21,26 @@ def lazy_property(function, field, *arg_fields): except Exception: # noqa pass return self.__dict__[field] + return property(lazy) class Plane(model.Plane): - """ Lazily retrieve delta_t from metadata """ + """Lazily retrieve delta_t from metadata""" + def __init__(self, t0, file, **kwargs): # noqa super().__init__(**kwargs) # setting fields here because they would be removed by ome_types/pydantic after class definition - setattr(self.__class__, 'delta_t', lazy_property(self.get_delta_t, 'delta_t', 't0', 'file')) - setattr(self.__class__, 'delta_t_quantity', _quantity_property('delta_t')) - self.__dict__['t0'] = t0 # noqa - self.__dict__['file'] = file # noqa + setattr(self.__class__, "delta_t", lazy_property(self.get_delta_t, "delta_t", "t0", "file")) + setattr(self.__class__, "delta_t_quantity", _quantity_property("delta_t")) + self.__dict__["t0"] = t0 # noqa + self.__dict__["file"] = file # noqa @staticmethod def get_delta_t(t0, file): with tifffile.TiffFile(file) as tif: - info = yaml.safe_load(tif.pages[0].tags[50839].value['Info']) - return float((datetime.strptime(info['Time'], '%Y-%m-%d %H:%M:%S %z') - t0).seconds) + info = yaml.safe_load(tif.pages[0].tags[50839].value["Info"]) + return float((datetime.strptime(info["Time"], "%Y-%m-%d %H:%M:%S %z") - t0).seconds) class Reader(AbstractReader, ABC): @@ -46,78 +48,108 @@ class Reader(AbstractReader, ABC): @staticmethod def _can_open(path): - pat = re.compile(r'(?:\d+-)?Pos.*', re.IGNORECASE) - return (isinstance(path, Path) and path.is_dir() and - (pat.match(path.name) or any(file.is_dir() and pat.match(file.stem) for file in path.iterdir()))) + pat = re.compile(r"(?:\d+-)?Pos.*", re.IGNORECASE) + return ( + isinstance(path, Path) + and path.is_dir() + and (pat.match(path.name) or any(file.is_dir() and pat.match(file.stem) for file in path.iterdir())) + ) def get_ome(self): ome = model.OME() with tifffile.TiffFile(self.filedict[0, 0, 0]) as tif: metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()} ome.experimenters.append( - model.Experimenter(id='Experimenter:0', user_name=metadata['Info']['Summary']['UserName'])) - objective_str = metadata['Info']['ZeissObjectiveTurret-Label'] + model.Experimenter(id="Experimenter:0", user_name=metadata["Info"]["Summary"]["UserName"]) + ) + objective_str = metadata["Info"]["ZeissObjectiveTurret-Label"] ome.instruments.append(model.Instrument()) ome.instruments[0].objectives.append( model.Objective( - id='Objective:0', manufacturer='Zeiss', model=objective_str, - nominal_magnification=float(re.findall(r'(\d+)x', objective_str)[0]), - lens_na=float(re.findall(r'/(\d\.\d+)', objective_str)[0]), - immersion=model.Objective_Immersion.OIL if 'oil' in objective_str.lower() else None)) - tubelens_str = metadata['Info']['ZeissOptovar-Label'] + id="Objective:0", + manufacturer="Zeiss", + model=objective_str, + nominal_magnification=float(re.findall(r"(\d+)x", objective_str)[0]), + lens_na=float(re.findall(r"/(\d\.\d+)", objective_str)[0]), + immersion=model.Objective_Immersion.OIL if "oil" in objective_str.lower() else None, + ) + ) + tubelens_str = metadata["Info"]["ZeissOptovar-Label"] ome.instruments[0].objectives.append( model.Objective( - id='Objective:Tubelens:0', manufacturer='Zeiss', model=tubelens_str, - nominal_magnification=float(re.findall(r'\d?\d*[,.]?\d+(?=x$)', tubelens_str)[0].replace(',', '.')))) - ome.instruments[0].detectors.append( - model.Detector( - id='Detector:0', amplification_gain=100)) + id="Objective:Tubelens:0", + manufacturer="Zeiss", + model=tubelens_str, + nominal_magnification=float(re.findall(r"\d?\d*[,.]?\d+(?=x$)", tubelens_str)[0].replace(",", ".")), + ) + ) + ome.instruments[0].detectors.append(model.Detector(id="Detector:0", amplification_gain=100)) ome.instruments[0].filter_sets.append( - model.FilterSet(id='FilterSet:0', model=metadata['Info']['ZeissReflectorTurret-Label'])) + model.FilterSet(id="FilterSet:0", model=metadata["Info"]["ZeissReflectorTurret-Label"]) + ) - pxsize = metadata['Info']['PixelSizeUm'] - pxsize_cam = 6.5 if 'Hamamatsu' in metadata['Info']['Core-Camera'] else None + pxsize = metadata["Info"]["PixelSizeUm"] + pxsize_cam = 6.5 if "Hamamatsu" in metadata["Info"]["Core-Camera"] else None if pxsize == 0: pxsize = pxsize_cam / ome.instruments[0].objectives[0].nominal_magnification - pixel_type = metadata['Info']['PixelType'].lower() - if pixel_type.startswith('gray'): - pixel_type = 'uint' + pixel_type[4:] + pixel_type = metadata["Info"]["PixelType"].lower() + if pixel_type.startswith("gray"): + pixel_type = "uint" + pixel_type[4:] else: - pixel_type = 'uint16' # assume + pixel_type = "uint16" # assume size_c, size_z, size_t = (max(i) + 1 for i in zip(*self.filedict.keys())) - t0 = datetime.strptime(metadata['Info']['Time'], '%Y-%m-%d %H:%M:%S %z') + t0 = datetime.strptime(metadata["Info"]["Time"], "%Y-%m-%d %H:%M:%S %z") ome.images.append( model.Image( pixels=model.Pixels( - size_c=size_c, size_z=size_z, size_t=size_t, - size_x=metadata['Info']['Width'], size_y=metadata['Info']['Height'], - dimension_order='XYCZT', # type: ignore - type=pixel_type, physical_size_x=pxsize, physical_size_y=pxsize, - physical_size_z=metadata['Info']['Summary']['z-step_um']), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) + size_c=size_c, + size_z=size_z, + size_t=size_t, + size_x=metadata["Info"]["Width"], + size_y=metadata["Info"]["Height"], + dimension_order="XYCZT", # type: ignore + type=pixel_type, + physical_size_x=pxsize, + physical_size_y=pxsize, + physical_size_z=metadata["Info"]["Summary"]["z-step_um"], + ), + objective_settings=model.ObjectiveSettings(id="Objective:0"), + ) + ) for c, z, t in product(range(size_c), range(size_z), range(size_t)): ome.images[0].pixels.planes.append( - Plane(t0, self.filedict[c, z, t], - the_c=c, the_z=z, the_t=t, exposure_time=metadata['Info']['Exposure-ms'] / 1000)) + Plane( + t0, + self.filedict[c, z, t], + the_c=c, + the_z=z, + the_t=t, + exposure_time=metadata["Info"]["Exposure-ms"] / 1000, + ) + ) # compare channel names from metadata with filenames - pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE) + pattern_c = re.compile(r"img_\d{3,}_(.*)_\d{3,}$", re.IGNORECASE) for c in range(size_c): ome.images[0].pixels.channels.append( model.Channel( - id=f'Channel:{c}', name=pattern_c.findall(self.filedict[c, 0, 0].stem)[0], + id=f"Channel:{c}", + name=pattern_c.findall(self.filedict[c, 0, 0].stem)[0], detector_settings=model.DetectorSettings( - id='Detector:0', binning=metadata['Info']['Hamamatsu_sCMOS-Binning']), - filter_set_ref=model.FilterSetRef(id='FilterSet:0'))) + id="Detector:0", binning=metadata["Info"]["Hamamatsu_sCMOS-Binning"] + ), + filter_set_ref=model.FilterSetRef(id="FilterSet:0"), + ) + ) return ome def open(self): # /some_path/Pos4: path = /some_path, series = 4 # /some_path/5-Pos_001_005: path = /some_path/5-Pos_001_005, series = 0 - if re.match(r'(?:\d+-)?Pos.*', self.path.name, re.IGNORECASE) is None: - pat = re.compile(rf'^(?:\d+-)?Pos{self.series}$', re.IGNORECASE) + if re.match(r"(?:\d+-)?Pos.*", self.path.name, re.IGNORECASE) is None: + pat = re.compile(rf"^(?:\d+-)?Pos{self.series}$", re.IGNORECASE) files = sorted(file for file in self.path.iterdir() if pat.match(file.name)) if len(files): path = files[0] @@ -126,21 +158,26 @@ class Reader(AbstractReader, ABC): else: path = self.path - pat = re.compile(r'^img_\d{3,}.*\d{3,}.*\.tif$', re.IGNORECASE) + pat = re.compile(r"^img_\d{3,}.*\d{3,}.*\.tif$", re.IGNORECASE) filelist = sorted([file for file in path.iterdir() if pat.search(file.name)]) with tifffile.TiffFile(self.path / filelist[0]) as tif: metadata = {key: yaml.safe_load(value) for key, value in tif.pages[0].tags[50839].value.items()} # compare channel names from metadata with filenames - cnamelist = metadata['Info']['Summary']['ChNames'] + cnamelist = metadata["Info"]["Summary"]["ChNames"] cnamelist = [c for c in cnamelist if any([c in f.name for f in filelist])] - pattern_c = re.compile(r'img_\d{3,}_(.*)_\d{3,}$', re.IGNORECASE) - pattern_z = re.compile(r'(\d{3,})$') - pattern_t = re.compile(r'img_(\d{3,})', re.IGNORECASE) - self.filedict = {(cnamelist.index(pattern_c.findall(file.stem)[0]), # noqa - int(pattern_z.findall(file.stem)[0]), - int(pattern_t.findall(file.stem)[0])): file for file in filelist} + pattern_c = re.compile(r"img_\d{3,}_(.*)_\d{3,}$", re.IGNORECASE) + pattern_z = re.compile(r"(\d{3,})$") + pattern_t = re.compile(r"img_(\d{3,})", re.IGNORECASE) + self.filedict = { + ( + cnamelist.index(pattern_c.findall(file.stem)[0]), # noqa + int(pattern_z.findall(file.stem)[0]), + int(pattern_t.findall(file.stem)[0]), + ): file + for file in filelist + } def __frame__(self, c=0, z=0, t=0): return tifffile.imread(self.path / self.filedict[(c, z, t)]) diff --git a/ndbioimage/readers/tifread.py b/ndbioimage/readers/tifread.py index 28052a4..2a2de5f 100644 --- a/ndbioimage/readers/tifread.py +++ b/ndbioimage/readers/tifread.py @@ -15,11 +15,11 @@ from .. import AbstractReader, try_default class Reader(AbstractReader, ABC): priority = 0 - do_not_pickle = 'reader' + do_not_pickle = "reader" @staticmethod def _can_open(path): - if isinstance(path, Path) and path.suffix in ('.tif', '.tiff'): + if isinstance(path, Path) and path.suffix in (".tif", ".tiff"): with tifffile.TiffFile(path) as tif: return tif.is_imagej and tif.pages[-1]._nextifd() == 0 # noqa else: @@ -27,19 +27,21 @@ class Reader(AbstractReader, ABC): @cached_property def metadata(self): - return {key: try_default(yaml.safe_load, value, value) if isinstance(value, str) else value - for key, value in self.reader.imagej_metadata.items()} + return { + key: try_default(yaml.safe_load, value, value) if isinstance(value, str) else value + for key, value in self.reader.imagej_metadata.items() + } def get_ome(self): if self.reader.is_ome: - match = re.match(r'^(.*)(pos.*)$', self.path.stem, flags=re.IGNORECASE) + match = re.match(r"^(.*)(pos.*)$", self.path.stem, flags=re.IGNORECASE) if match is not None and len(match.groups()) == 2: a, b = match.groups() - with tifffile.TiffFile(self.path.with_stem(a + re.sub(r'\d', '0', b))) as file0: + with tifffile.TiffFile(self.path.with_stem(a + re.sub(r"\d", "0", b))) as file0: with warnings.catch_warnings(): - warnings.simplefilter('ignore', category=UserWarning) + warnings.simplefilter("ignore", category=UserWarning) ome = from_xml(file0.ome_metadata) - ome.images = [image for image in ome.images if self.path.stem[:len(image.name)] == image.name] + ome.images = [image for image in ome.images if self.path.stem[: len(image.name)] == image.name] return ome page = self.reader.pages[0] @@ -47,11 +49,11 @@ class Reader(AbstractReader, ABC): size_x = page.imagewidth if self.p_ndim == 3: size_c = page.samplesperpixel - size_t = self.metadata.get('frames', 1) # // C + size_t = self.metadata.get("frames", 1) # // C else: - size_c = self.metadata.get('channels', 1) - size_t = self.metadata.get('frames', 1) - size_z = self.metadata.get('slices', 1) + size_c = self.metadata.get("channels", 1) + size_t = self.metadata.get("frames", 1) + size_z = self.metadata.get("slices", 1) if 282 in page.tags and 296 in page.tags and page.tags[296].value == 1: f = page.tags[282].value pxsize = f[1] / f[0] @@ -59,24 +61,44 @@ class Reader(AbstractReader, ABC): pxsize = None dtype = page.dtype.name - if dtype not in ('int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32', - 'float', 'double', 'complex', 'double-complex', 'bit'): - dtype = 'float' + if dtype not in ( + "int8", + "int16", + "int32", + "uint8", + "uint16", + "uint32", + "float", + "double", + "complex", + "double-complex", + "bit", + ): + dtype = "float" - interval_t = self.metadata.get('interval', 0) + interval_t = self.metadata.get("interval", 0) ome = model.OME() - ome.instruments.append(model.Instrument(id='Instrument:0')) - ome.instruments[0].objectives.append(model.Objective(id='Objective:0')) + ome.instruments.append(model.Instrument(id="Instrument:0")) + ome.instruments[0].objectives.append(model.Objective(id="Objective:0")) ome.images.append( model.Image( - id='Image:0', + id="Image:0", pixels=model.Pixels( - id='Pixels:0', - size_c=size_c, size_z=size_z, size_t=size_t, size_x=size_x, size_y=size_y, - dimension_order='XYCZT', type=dtype, # type: ignore - physical_size_x=pxsize, physical_size_y=pxsize), - objective_settings=model.ObjectiveSettings(id='Objective:0'))) + id="Pixels:0", + size_c=size_c, + size_z=size_z, + size_t=size_t, + size_x=size_x, + size_y=size_y, + dimension_order="XYCZT", + type=dtype, # type: ignore + physical_size_x=pxsize, + physical_size_y=pxsize, + ), + objective_settings=model.ObjectiveSettings(id="Objective:0"), + ) + ) for c, z, t in product(range(size_c), range(size_z), range(size_t)): ome.images[0].pixels.planes.append(model.Plane(the_c=c, the_z=z, the_t=t, delta_t=interval_t * t)) return ome @@ -86,9 +108,9 @@ class Reader(AbstractReader, ABC): page = self.reader.pages.first self.p_ndim = page.ndim # noqa if self.p_ndim == 3: - self.p_transpose = [i for i in [page.axes.find(j) for j in 'SYX'] if i >= 0] # noqa + self.p_transpose = [i for i in [page.axes.find(j) for j in "SYX"] if i >= 0] # noqa else: - self.p_transpose = [i for i in [page.axes.find(j) for j in 'YX'] if i >= 0] # noqa + self.p_transpose = [i for i in [page.axes.find(j) for j in "YX"] if i >= 0] # noqa def close(self): self.reader.close() @@ -96,12 +118,12 @@ class Reader(AbstractReader, ABC): def __frame__(self, c: int, z: int, t: int): dimension_order = self.ome.images[0].pixels.dimension_order.value if self.p_ndim == 3: - axes = ''.join([ax.lower() for ax in dimension_order if ax.lower() in 'zt']) - ct = {'z': z, 't': t} + axes = "".join([ax.lower() for ax in dimension_order if ax.lower() in "zt"]) + ct = {"z": z, "t": t} n = sum([ct[ax] * np.prod(self.base_shape[axes[:i]]) for i, ax in enumerate(axes)]) return np.transpose(self.reader.asarray(int(n)), self.p_transpose)[int(c)] else: - axes = ''.join([ax.lower() for ax in dimension_order if ax.lower() in 'czt']) - czt = {'c': c, 'z': z, 't': t} + axes = "".join([ax.lower() for ax in dimension_order if ax.lower() in "czt"]) + czt = {"c": c, "z": z, "t": t} n = sum([czt[ax] * np.prod(self.base_shape[axes[:i]]) for i, ax in enumerate(axes)]) return np.transpose(self.reader.asarray(int(n)), self.p_transpose) diff --git a/ndbioimage/transforms.py b/ndbioimage/transforms.py index 3a54ec3..3f53ecb 100644 --- a/ndbioimage/transforms.py +++ b/ndbioimage/transforms.py @@ -21,7 +21,7 @@ except ImportError: DataFrame, Series, concat = None, None, None -if hasattr(yaml, 'full_load'): +if hasattr(yaml, "full_load"): yamlload = yaml.full_load else: yamlload = yaml.load @@ -34,7 +34,7 @@ class Transforms(dict): @classmethod def from_file(cls, file, C=True, T=True): - with open(Path(file).with_suffix('.yml')) as f: + with open(Path(file).with_suffix(".yml")) as f: return cls.from_dict(yamlload(f), C, T) @classmethod @@ -42,7 +42,7 @@ class Transforms(dict): new = cls() for key, value in d.items(): if isinstance(key, str) and C: - new[key.replace(r'\:', ':').replace('\\\\', '\\')] = Transform.from_dict(value) + new[key.replace(r"\:", ":").replace("\\\\", "\\")] = Transform.from_dict(value) elif T: new[key] = Transform.from_dict(value) return new @@ -69,8 +69,10 @@ class Transforms(dict): return new def asdict(self): - return {key.replace('\\', '\\\\').replace(':', r'\:') if isinstance(key, str) else key: value.asdict() - for key, value in self.items()} + return { + key.replace("\\", "\\\\").replace(":", r"\:") if isinstance(key, str) else key: value.asdict() + for key, value in self.items() + } def __getitem__(self, item): return np.prod([self[i] for i in item[::-1]]) if isinstance(item, tuple) else super().__getitem__(item) @@ -88,7 +90,7 @@ class Transforms(dict): return hash(frozenset((*self.__dict__.items(), *self.items()))) def save(self, file): - with open(Path(file).with_suffix('.yml'), 'w') as f: + with open(Path(file).with_suffix(".yml"), "w") as f: yaml.safe_dump(self.asdict(), f, default_flow_style=None) def copy(self): @@ -109,8 +111,9 @@ class Transforms(dict): transform_channels = {key for key in self.keys() if isinstance(key, str)} if set(channel_names) - transform_channels: mapping = key_map(channel_names, transform_channels) - warnings.warn(f'The image file and the transform do not have the same channels,' - f' creating a mapping: {mapping}') + warnings.warn( + f"The image file and the transform do not have the same channels, creating a mapping: {mapping}" + ) for key_im, key_t in mapping.items(): self[key_im] = self[key_t] @@ -127,16 +130,16 @@ class Transforms(dict): return concat([self.coords_pandas(row, channel_names, columns) for _, row in array.iterrows()], axis=1).T elif isinstance(array, Series): key = [] - if 'C' in array: - key.append(channel_names[int(array['C'])]) - if 'T' in array: - key.append(int(array['T'])) + if "C" in array: + key.append(channel_names[int(array["C"])]) + if "T" in array: + key.append(int(array["T"])) return self[tuple(key)].coords(array, columns) else: - raise TypeError('Not a pandas DataFrame or Series.') + raise TypeError("Not a pandas DataFrame or Series.") def with_beads(self, cyllens, bead_files): - assert len(bead_files) > 0, 'At least one file is needed to calculate the registration.' + assert len(bead_files) > 0, "At least one file is needed to calculate the registration." transforms = [self.calculate_channel_transforms(file, cyllens) for file in bead_files] for key in {key for transform in transforms for key in transform.keys()}: new_transforms = [transform[key] for transform in transforms if key in transform] @@ -145,16 +148,18 @@ class Transforms(dict): else: self[key] = Transform() self[key].parameters = np.mean([t.parameters for t in new_transforms], 0) - self[key].dparameters = (np.std([t.parameters for t in new_transforms], 0) / - np.sqrt(len(new_transforms))).tolist() + self[key].dparameters = ( + np.std([t.parameters for t in new_transforms], 0) / np.sqrt(len(new_transforms)) + ).tolist() return self @staticmethod def get_bead_files(path): from . import Imread + files = [] for file in path.iterdir(): - if file.name.lower().startswith('beads'): + if file.name.lower().startswith("beads"): try: with Imread(file): files.append(file) @@ -162,32 +167,32 @@ class Transforms(dict): pass files = sorted(files) if not files: - raise Exception('No bead file found!') + raise Exception("No bead file found!") checked_files = [] for file in files: try: if file.is_dir(): - file /= 'Pos0' + file /= "Pos0" with Imread(file): # check for errors opening the file checked_files.append(file) except (Exception,): continue if not checked_files: - raise Exception('No bead file found!') + raise Exception("No bead file found!") return checked_files @staticmethod def calculate_channel_transforms(bead_file, cyllens): - """ When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162 - in the horizontal direction """ + """When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162 + in the horizontal direction""" from . import Imread - with Imread(bead_file, axes='zcyx') as im: # noqa - max_ims = im.max('z') + with Imread(bead_file, axes="zcyx") as im: # noqa + max_ims = im.max("z") goodch = [c for c, max_im in enumerate(max_ims) if not im.is_noise(max_im)] if not goodch: goodch = list(range(len(max_ims))) - untransformed = [c for c in range(im.shape['c']) if cyllens[im.detector[c]].lower() == 'none'] + untransformed = [c for c in range(im.shape["c"]) if cyllens[im.detector[c]].lower() == "none"] good_and_untrans = sorted(set(goodch) & set(untransformed)) if good_and_untrans: @@ -200,7 +205,7 @@ class Transforms(dict): matrix[0, 0] = 0.86 transform.matrix = matrix transforms = Transforms() - for c in tqdm(goodch, desc='Calculating channel transforms'): # noqa + for c in tqdm(goodch, desc="Calculating channel transforms"): # noqa if c == masterch: transforms[im.channel_names[c]] = transform else: @@ -210,44 +215,45 @@ class Transforms(dict): @staticmethod def save_channel_transform_tiff(bead_files, tiffile): from . import Imread + n_channels = 0 for file in bead_files: with Imread(file) as im: - n_channels = max(n_channels, im.shape['c']) + n_channels = max(n_channels, im.shape["c"]) with IJTiffFile(tiffile) as tif: for t, file in enumerate(bead_files): with Imread(file) as im: with Imread(file).with_transform() as jm: - for c in range(im.shape['c']): - tif.save(np.hstack((im(c=c, t=0).max('z'), jm(c=c, t=0).max('z'))), c, 0, t) + for c in range(im.shape["c"]): + tif.save(np.hstack((im(c=c, t=0).max("z"), jm(c=c, t=0).max("z"))), c, 0, t) def with_drift(self, im): - """ Calculate shifts relative to the first frame - divide the sequence into groups, - compare each frame to the frame in the middle of the group and compare these middle frames to each other + """Calculate shifts relative to the first frame + divide the sequence into groups, + compare each frame to the frame in the middle of the group and compare these middle frames to each other """ - im = im.transpose('tzycx') - t_groups = [list(chunk) for chunk in Chunks(range(im.shape['t']), size=round(np.sqrt(im.shape['t'])))] + im = im.transpose("tzycx") + t_groups = [list(chunk) for chunk in Chunks(range(im.shape["t"]), size=round(np.sqrt(im.shape["t"])))] t_keys = [int(np.round(np.mean(t_group))) for t_group in t_groups] t_pairs = [(int(np.round(np.mean(t_group))), frame) for t_group in t_groups for frame in t_group] t_pairs.extend(zip(t_keys, t_keys[1:])) - fmaxz_keys = {t_key: filters.gaussian(im[t_key].max('z'), 5) for t_key in t_keys} + fmaxz_keys = {t_key: filters.gaussian(im[t_key].max("z"), 5) for t_key in t_keys} def fun(t_key_t, im, fmaxz_keys): t_key, t = t_key_t if t_key == t: return 0, 0 else: - fmaxz = filters.gaussian(im[t].max('z'), 5) - return Transform.register(fmaxz_keys[t_key], fmaxz, 'translation').parameters[4:] + fmaxz = filters.gaussian(im[t].max("z"), 5) + return Transform.register(fmaxz_keys[t_key], fmaxz, "translation").parameters[4:] - shifts = np.array(pmap(fun, t_pairs, (im, fmaxz_keys), desc='Calculating image shifts.')) + shifts = np.array(pmap(fun, t_pairs, (im, fmaxz_keys), desc="Calculating image shifts.")) shift_keys_cum = np.zeros(2) - for shift_keys, t_group in zip(np.vstack((-shifts[0], shifts[im.shape['t']:])), t_groups): + for shift_keys, t_group in zip(np.vstack((-shifts[0], shifts[im.shape["t"] :])), t_groups): shift_keys_cum += shift_keys shifts[t_group] += shift_keys_cum - for i, shift in enumerate(shifts[:im.shape['t']]): + for i, shift in enumerate(shifts[: im.shape["t"]]): self[i] = Transform.from_shift(shift) return self @@ -257,9 +263,9 @@ class Transform: if sitk is None: self.transform = None else: - self.transform = sitk.ReadTransform(str(Path(__file__).parent / 'transform.txt')) - self.dparameters = [0., 0., 0., 0., 0., 0.] - self.shape = [512., 512.] + self.transform = sitk.ReadTransform(str(Path(__file__).parent / "transform.txt")) + self.dparameters = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + self.shape = [512.0, 512.0] self.origin = [255.5, 255.5] self._last, self._inverse = None, None @@ -274,12 +280,13 @@ class Transform: @classmethod def register(cls, fix, mov, kind=None): - """ kind: 'affine', 'translation', 'rigid' """ + """kind: 'affine', 'translation', 'rigid'""" if sitk is None: - raise ImportError('SimpleElastix is not installed: ' - 'https://simpleelastix.readthedocs.io/GettingStarted.html') + raise ImportError( + "SimpleElastix is not installed: https://simpleelastix.readthedocs.io/GettingStarted.html" + ) new = cls() - kind = kind or 'affine' + kind = kind or "affine" new.shape = fix.shape fix, mov = new.cast_image(fix), new.cast_image(mov) # TODO: implement RigidTransform @@ -290,16 +297,16 @@ class Transform: tfilter.SetParameterMap(sitk.GetDefaultParameterMap(kind)) tfilter.Execute() transform = tfilter.GetTransformParameterMap()[0] - if kind == 'affine': - new.parameters = [float(t) for t in transform['TransformParameters']] - new.shape = [float(t) for t in transform['Size']] - new.origin = [float(t) for t in transform['CenterOfRotationPoint']] - elif kind == 'translation': - new.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform['TransformParameters']] - new.shape = [float(t) for t in transform['Size']] + if kind == "affine": + new.parameters = [float(t) for t in transform["TransformParameters"]] + new.shape = [float(t) for t in transform["Size"]] + new.origin = [float(t) for t in transform["CenterOfRotationPoint"]] + elif kind == "translation": + new.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform["TransformParameters"]] + new.shape = [float(t) for t in transform["Size"]] new.origin = [(t - 1) / 2 for t in new.shape] else: - raise NotImplementedError(f'{kind} transforms not implemented (yet)') + raise NotImplementedError(f"{kind} transforms not implemented (yet)") new.dparameters = 6 * [np.nan] return new @@ -315,18 +322,24 @@ class Transform: @classmethod def from_file(cls, file): - with open(Path(file).with_suffix('.yml')) as f: + with open(Path(file).with_suffix(".yml")) as f: return cls.from_dict(yamlload(f)) @classmethod def from_dict(cls, d): new = cls() - new.origin = None if d['CenterOfRotationPoint'] is None else [float(i) for i in d['CenterOfRotationPoint']] - new.parameters = ((1., 0., 0., 1., 0., 0.) if d['TransformParameters'] is None else - [float(i) for i in d['TransformParameters']]) - new.dparameters = ([(0., 0., 0., 0., 0., 0.) if i is None else float(i) for i in d['dTransformParameters']] - if 'dTransformParameters' in d else 6 * [np.nan] and d['dTransformParameters'] is not None) - new.shape = None if d['Size'] is None else [None if i is None else float(i) for i in d['Size']] + new.origin = None if d["CenterOfRotationPoint"] is None else [float(i) for i in d["CenterOfRotationPoint"]] + new.parameters = ( + (1.0, 0.0, 0.0, 1.0, 0.0, 0.0) + if d["TransformParameters"] is None + else [float(i) for i in d["TransformParameters"]] + ) + new.dparameters = ( + [(0.0, 0.0, 0.0, 0.0, 0.0, 0.0) if i is None else float(i) for i in d["dTransformParameters"]] + if "dTransformParameters" in d + else 6 * [np.nan] and d["dTransformParameters"] is not None + ) + new.shape = None if d["Size"] is None else [None if i is None else float(i) for i in d["Size"]] return new def __mul__(self, other): # TODO: take care of dmatrix @@ -359,9 +372,9 @@ class Transform: @property def matrix(self): - return np.array(((*self.parameters[:2], self.parameters[4]), - (*self.parameters[2:4], self.parameters[5]), - (0, 0, 1))) + return np.array( + ((*self.parameters[:2], self.parameters[4]), (*self.parameters[2:4], self.parameters[5]), (0, 0, 1)) + ) @matrix.setter def matrix(self, value): @@ -370,9 +383,9 @@ class Transform: @property def dmatrix(self): - return np.array(((*self.dparameters[:2], self.dparameters[4]), - (*self.dparameters[2:4], self.dparameters[5]), - (0, 0, 0))) + return np.array( + ((*self.dparameters[:2], self.dparameters[4]), (*self.dparameters[2:4], self.dparameters[5]), (0, 0, 0)) + ) @dmatrix.setter def dmatrix(self, value): @@ -384,7 +397,7 @@ class Transform: if self.transform is not None: return list(self.transform.GetParameters()) else: - return [1., 0., 0., 1., 0., 0.] + return [1.0, 0.0, 0.0, 1.0, 0.0, 0.0] @parameters.setter def parameters(self, value): @@ -420,29 +433,34 @@ class Transform: self.shape = shape[:2] def asdict(self): - return {'CenterOfRotationPoint': self.origin, 'Size': self.shape, 'TransformParameters': self.parameters, - 'dTransformParameters': np.nan_to_num(self.dparameters, nan=1e99).tolist()} + return { + "CenterOfRotationPoint": self.origin, + "Size": self.shape, + "TransformParameters": self.parameters, + "dTransformParameters": np.nan_to_num(self.dparameters, nan=1e99).tolist(), + } def frame(self, im, default=0): if self.is_unity(): return im else: if sitk is None: - raise ImportError('SimpleElastix is not installed: ' - 'https://simpleelastix.readthedocs.io/GettingStarted.html') + raise ImportError( + "SimpleElastix is not installed: https://simpleelastix.readthedocs.io/GettingStarted.html" + ) dtype = im.dtype - im = im.astype('float') + im = im.astype("float") intp = sitk.sitkBSpline if np.issubdtype(dtype, np.floating) else sitk.sitkNearestNeighbor return self.cast_array(sitk.Resample(self.cast_image(im), self.transform, intp, default)).astype(dtype) def coords(self, array, columns=None): - """ Transform coordinates in 2 column numpy array, - or in pandas DataFrame or Series objects in columns ['x', 'y'] + """Transform coordinates in 2 column numpy array, + or in pandas DataFrame or Series objects in columns ['x', 'y'] """ if self.is_unity(): return array.copy() elif DataFrame is not None and isinstance(array, (DataFrame, Series)): - columns = columns or ['x', 'y'] + columns = columns or ["x", "y"] array = array.copy() if isinstance(array, DataFrame): array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy())) @@ -453,10 +471,10 @@ class Transform: return np.array([self.inverse.transform.TransformPoint(i.tolist()) for i in np.asarray(array)]) def save(self, file): - """ save the parameters of the transform calculated - with affine_registration to a yaml file + """save the parameters of the transform calculated + with affine_registration to a yaml file """ - if not file[-3:] == 'yml': - file += '.yml' - with open(file, 'w') as f: + if not file[-3:] == "yml": + file += ".yml" + with open(file, "w") as f: yaml.safe_dump(self.asdict(), f, default_flow_style=None) diff --git a/pyproject.toml b/pyproject.toml index 972734d..c3b70e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ndbioimage" -version = "2025.3.2" +version = "2025.8.0" description = "Bio image reading, metadata and some affine registration." authors = [ { name = "W. Pomp", email = "w.pomp@nki.nl" } @@ -15,7 +15,6 @@ exclude = ["ndbioimage/jars"] dependencies = [ "czifile == 2019.7.2", "imagecodecs", - "JPype1", "lxml", "numpy >= 1.20", "ome-types", @@ -33,6 +32,7 @@ dependencies = [ [project.optional-dependencies] test = ["pytest"] write = ["matplotlib", "scikit-video"] +bioformats = ["JPype1"] [project.urls] repository = "https://github.com/wimpomp/ndbioimage" @@ -46,6 +46,10 @@ filterwarnings = ["ignore:::(colorcet)"] [tool.isort] line_length = 119 +[tool.ruff] +line-length = 119 +indent-width = 4 + [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" diff --git a/tests/test_open.py b/tests/test_open.py index 133fbaa..4d0048b 100644 --- a/tests/test_open.py +++ b/tests/test_open.py @@ -7,7 +7,7 @@ import pytest from ndbioimage import Imread, ReaderNotFoundError -@pytest.mark.parametrize('file', (Path(__file__).parent / 'files').iterdir()) +@pytest.mark.parametrize("file", (Path(__file__).parent / "files").iterdir()) def test_open(file): try: with Imread(file) as im: @@ -21,7 +21,7 @@ def test_open(file): w = pickle.loads(b) assert w[dict(c=0, z=0, t=0)].mean() == mean except ReaderNotFoundError: - assert len(Imread.__subclasses__()), 'No subclasses for Imread found.' + assert len(Imread.__subclasses__()), "No subclasses for Imread found." for child in active_children(): child.kill() diff --git a/tests/test_slicing.py b/tests/test_slicing.py index 0cfdfca..d24c367 100644 --- a/tests/test_slicing.py +++ b/tests/test_slicing.py @@ -11,8 +11,9 @@ im = Imread(r) a = np.array(im) -@pytest.mark.parametrize('s', combinations_with_replacement( - (0, -1, 1, slice(None), slice(0, 1), slice(-1, 0), slice(1, 1)), 5)) +@pytest.mark.parametrize( + "s", combinations_with_replacement((0, -1, 1, slice(None), slice(0, 1), slice(-1, 0), slice(1, 1)), 5) +) def test_slicing(s): s_im, s_a = im[s], a[s] if isinstance(s_a, Number): diff --git a/tests/test_ufuncs.py b/tests/test_ufuncs.py index 7b00e09..092452c 100644 --- a/tests/test_ufuncs.py +++ b/tests/test_ufuncs.py @@ -10,10 +10,30 @@ im = Imread(r) a = np.array(im) -@pytest.mark.parametrize('fun_and_axis', product( - (np.sum, np.nansum, np.min, np.nanmin, np.max, np.nanmax, np.argmin, np.argmax, - np.mean, np.nanmean, np.var, np.nanvar, np.std, np.nanstd), (None, 0, 1, 2, 3, 4))) +@pytest.mark.parametrize( + "fun_and_axis", + product( + ( + np.sum, + np.nansum, + np.min, + np.nanmin, + np.max, + np.nanmax, + np.argmin, + np.argmax, + np.mean, + np.nanmean, + np.var, + np.nanvar, + np.std, + np.nanstd, + ), + (None, 0, 1, 2, 3, 4), + ), +) def test_ufuncs(fun_and_axis): fun, axis = fun_and_axis - assert np.all(np.isclose(fun(im, axis), fun(a, axis))), \ - f'function {fun.__name__} over axis {axis} does not give the correct result' + assert np.all(np.isclose(fun(im, axis), fun(a, axis))), ( + f"function {fun.__name__} over axis {axis} does not give the correct result" + )