diff --git a/README.md b/README.md new file mode 100644 index 0000000..d306ea4 --- /dev/null +++ b/README.md @@ -0,0 +1,82 @@ +# ndbioimage + +Exposes (bio) images as a numpy ndarray like object, but without loading the whole +image into memory, reading from the file only when needed. Some metadata is read +and exposed as attributes to the Imread object (TODO: structure data in OME format). +Additionally, it can automatically calculate an affine transform that corrects for +chromatic abberrations etc. and apply it on the fly to the image. + +Currently supports imagej tif files, czi files, micromanager tif sequences and anything +bioformats can handle. + +## Installation + + pip install ndbioimage@git+https://github.com/wimpomp/ndbioimage.git + +### With bioformats (if java is properly installed) + + pip install ndbioimage[bioformats]@git+https://github.com/wimpomp/ndbioimage.git + +### With affine transforms (only for python 3.8, 3.9 and 3.10) + + pip install ndbioimage[transforms]@git+https://github.com/wimpomp/ndbioimage.git + +## Usage + +- Reading an image file and plotting the frame at channel=2, time=1 + + + import matplotlib.pyplot as plt + from ndbioimage import imread + with imread('image_file.tif', axes='ctxy', dtype=int) as im: + plt.imshow(im[2, 1]) + +- Showing some image metadata + + + from ndbioimage import imread + from pprint import pprint + with imread('image_file.tif') as im: + pprint(im) + +- Slicing the image without loading the image into memory + + + from ndbioimage import imread + with imread('image_file.tif', axes='cztxy') as im: + sliced_im = im[1, :, :, 100:200, 100:200] + +sliced_im is an instance of imread which will load any image data from file only when needed + + +- Converting (part) of the image to a numpy ndarray + + + from ndbioimage import imread + import numpy as np + with imread('image_file.tif', axes='cztxy') as im: + array = np.asarray(im[0, 0]) + +## Adding more formats +Readers for image formats subclass Imread. When an image reader is imported, Imread will +automatically recognize it and use it to open the appropriate file format. Image readers +subclass Imread and are required to implement the following methods: + +- staticmethod _can_open(path): return True if path can be opened by this reader +- \_\_metadata__(self): reads metadata from file and adds them to self as attributes, + - the shape of the data in the file needs to be set as self.shape = (X, Y, C, Z, T) + - other attributes like pxsize, acquisitiontime and title can be set here as well +- \_\_frame__(self, c, z, t): return the frame at channel=c, z-slice=z, time=t from the file + +Optional methods: +- open(self): maybe open some file +- close(self): close any file handles + +Optional fields: +- priority (int): Imread will try readers with a lower number first, default: 99 +- do_not_pickle (strings): any attributes that should not be included when the object is pickled, +for example: any file handles + +# TODO +- structure the metadata in OME format tree +- re-implement transforms diff --git a/ndbioimage/.gitignore b/ndbioimage/.gitignore new file mode 100644 index 0000000..8a65aa4 --- /dev/null +++ b/ndbioimage/.gitignore @@ -0,0 +1,8 @@ +._* +*.pyc +/build/ +*.egg-info +/venv/ +.idea +/.pytest_cache/ +/ndbioimage/_version.py diff --git a/ndbioimage/__init__.py b/ndbioimage/__init__.py new file mode 100755 index 0000000..e63c1cd --- /dev/null +++ b/ndbioimage/__init__.py @@ -0,0 +1,1483 @@ +import os +import re +import inspect +import pandas +import yaml +import numpy as np +from datetime import datetime +from tqdm.auto import tqdm +from itertools import product +from collections import OrderedDict +from abc import ABCMeta, abstractmethod +from functools import cached_property +from parfor import parfor +from tiffwrite import IJTiffFile +from numbers import Number +from argparse import ArgumentParser +from warnings import warn +from ndbioimage.transforms import Transform, Transforms +from ndbioimage.jvm import JVM +from ndbioimage._version import __version__, __git_commit_hash__ + + +class ImTransformsBase(Transforms): + def coords(self, array, colums=None): + if isinstance(array, pandas.DataFrame): + return pandas.concat([self(int(row['C']), int(row['T'])).coords(row, colums) + for _, row in array.iterrows()], axis=1).T + elif isinstance(array, pandas.Series): + return self(int(array['C']), int(array['T'])).coords(array, colums) + else: + raise TypeError('Not a pandas DataFrame or Series.') + + +class ImTransforms(ImTransformsBase): + """ Transforms class with methods to calculate channel transforms from bead files etc. + """ + def __init__(self, path, cyllens, tracks=None, detectors=None, file=None, transforms=None): + super().__init__() + self.cyllens = cyllens + self.tracks = tracks + self.detectors = detectors + if transforms is None: + # TODO: check this + if re.search(r'^Pos\d+', os.path.basename(path.rstrip(os.path.sep))): + self.path = os.path.dirname(os.path.dirname(path)) + else: + self.path = os.path.dirname(path) + if file is not None: + if isinstance(file, str) and file.lower().endswith('.yml'): + self.ymlpath = file + self.beadfile = None + else: + self.ymlpath = os.path.join(self.path, 'transform.yml') + self.beadfile = file + else: + self.ymlpath = os.path.join(self.path, 'transform.yml') + self.beadfile = None + self.tifpath = self.ymlpath[:-3] + 'tif' + try: + self.load(self.ymlpath) + except Exception: + print('No transform file found, trying to generate one.') + if not self.files: + raise FileNotFoundError('No bead files found to calculate the transform from.') + self.calculate_transforms() + self.save(self.ymlpath) + self.save_transform_tiff() + print(f'Saving transform in {self.ymlpath}.') + print(f'Please check the transform in {self.tifpath}.') + else: # load from dict transforms + self.path = path + self.beadfile = file + for key, value in transforms.items(): + self[tuple([int(i) for i in key.split(':')])] = Transform(value) + + @cached_property + def files(self): + try: + if self.beadfile is None: + files = self.get_bead_files() + else: + files = self.beadfile + if isinstance(files, str): + files = (files,) + return files + except Exception: + return () + + def __reduce__(self): + return self.__class__, (self.path, self.cyllens, self.tracks, self.detectors, self.files, self.asdict()) + + def __call__(self, channel, time=None, tracks=None, detectors=None): + tracks = tracks or self.tracks + detectors = detectors or self.detectors + return super().__call__(channel, time, tracks, detectors) + + def get_bead_files(self): + files = sorted([os.path.join(self.path, f) for f in os.listdir(self.path) if f.lower().startswith('beads') + and not f.lower().endswith('.pdf')]) + if not files: + raise Exception('No bead file found!') + Files = [] + for file in files: + try: + if os.path.isdir(file): + file = os.path.join(file, 'Pos0') + with Imread(file) as im: # check for errors opening the file + pass + Files.append(file) + except Exception: + continue + if not Files: + raise Exception('No bead file found!') + return Files + + def calculate_transform(self, file): + """ When no channel is not transformed by a cylindrical lens, assume that the image is scaled by a factor 1.162 + in the horizontal direction + """ + with Imread(file) as im: + ims = [im.max(c) for c in range(im.shape[2])] + goodch = [c for c in range(im.shape[2]) if not im.isnoise(im.max(c))] + untransformed = [c for c in range(im.shape[2]) if self.cyllens[im.detector[c]].lower() == 'none'] + + good_and_untrans = sorted(set(goodch) & set(untransformed)) + if good_and_untrans: + masterch = good_and_untrans[0] + else: + masterch = goodch[0] + print(f'{untransformed = }, {masterch = }, {goodch = }') + C = Transform() + if not np.any(good_and_untrans): + M = C.matrix + M[0, 0] = 0.86 + C.matrix = M + Tr = Transforms() + for c in tqdm(goodch): + if c == masterch: + Tr[im.track[c], im.detector[c]] = C + else: + Tr[im.track[c], im.detector[c]] = Transform(ims[masterch], ims[c]) * C + return Tr + + def calculate_transforms(self): + Tq = [self.calculate_transform(file) for file in self.files] + for key in set([key for t in Tq for key in t.keys()]): + T = [t[key] for t in Tq if key in t] + if len(T) == 1: + self[key] = T[0] + else: + self[key] = Transform() + self[key].parameters = np.mean([t.parameters for t in T], 0) + self[key].dparameters = (np.std([t.parameters for t in T], 0) / np.sqrt(len(T))).tolist() + + def save_transform_tiff(self): + C = 0 + for file in self.files: + with Imread(file) as im: + C = max(C, im.shape[2]) + with IJTiffFile(self.tifpath, (C, 1, len(self.files))) as tif: + for t, file in enumerate(self.files): + with Imread(file) as im: + with Imread(file, transform=True) as jm: + for c in range(im.shape[2]): + tif.save(np.hstack((im.max(c), jm.max(c))), c, 0, t) + + +class ImShiftTransforms(ImTransformsBase): + """ Class to handle drift in xy. The image this is applied to must have a channeltransform already, which is then + replaced by this class. + """ + def __init__(self, im, shifts=None): + """ im: Calculate shifts from channel-transformed images + im, t x 2 array Sets shifts from array, one row per frame + im, dict {frame: shift} Sets shifts from dict, each key is a frame number from where a new shift is applied + im, file Loads shifts from a saved file + """ + super().__init__() + with (Imread(im, transform=True, drift=False) if isinstance(im, str) + else im.new(transform=True, drift=False)) as im: + self.impath = im.path + self.path = os.path.splitext(self.impath)[0] + '_shifts.txt' + self.tracks, self.detectors, self.files = im.track, im.detector, im.beadfile + if shifts is not None: + if isinstance(shifts, np.ndarray): + self.shifts = shifts + self.shifts2transforms(im) + elif isinstance(shifts, dict): + self.shifts = np.zeros((im.shape[4], 2)) + for k in sorted(shifts.keys()): + self.shifts[k:] = shifts[k] + self.shifts2transforms(im) + elif isinstance(shifts, str): + self.load(im, shifts) + elif os.path.exists(self.path): + self.load(im, self.path) + else: + self.calulate_shifts(im) + self.save() + + def __call__(self, channel, time, tracks=None, detectors=None): + tracks = tracks or self.tracks + detectors = detectors or self.detectors + track, detector = tracks[channel], detectors[channel] + if (track, detector, time) in self: + return self[track, detector, time] + elif (0, detector, time) in self: + return self[0, detector, time] + else: + return Transform() + + def __reduce__(self): + return self.__class__, (self.impath, self.shifts) + + def load(self, im, file): + self.shifts = np.loadtxt(file) + self.shifts2transforms(im) + + def save(self, file=None): + self.path = file or self.path + np.savetxt(self.path, self.shifts) + + def calulate_shifts0(self, im): + """ Calculate shifts relative to the first frame """ + im0 = im[:, 0, 0].squeeze().transpose(2, 0, 1) + + @parfor(range(1, im.shape[4]), (im, im0), desc='Calculating image shifts.') + def fun(t, im, im0): + return Transform(im0, im[:, 0, t].squeeze().transpose(2, 0, 1), 'translation') + transforms = [Transform()] + fun + self.shifts = np.array([t.parameters[4:] for t in transforms]) + self.setTransforms(transforms, im.transform) + + def calulate_shifts(self, im): + """ Calculate shifts relative to the previous frame """ + @parfor(range(1, im.shape[4]), (im,), desc='Calculating image shifts.') + def fun(t, im): + return Transform(im[:, 0, t-1].squeeze().transpose(2, 0, 1), im[:, 0, t].squeeze().transpose(2, 0, 1), + 'translation') + transforms = [Transform()] + fun + self.shifts = np.cumsum([t.parameters[4:] for t in transforms]) + self.setTransforms(transforms, im.transform) + + def shifts2transforms(self, im): + self.setTransforms([Transform(np.array(((1, 0, s[0]), (0, 1, s[1]), (0, 0, 1)))) + for s in self.shifts], im.transform) + + def setTransforms(self, shift_transforms, channel_transforms): + for key, value in channel_transforms.items(): + for t, T in enumerate(shift_transforms): + self[key[0], key[1], t] = T * channel_transforms[key] + + +class DequeDict(OrderedDict): + def __init__(self, maxlen=None, *args, **kwargs): + self.maxlen = maxlen + super().__init__(*args, **kwargs) + + def __truncate__(self): + if self.maxlen is not None: + while len(self) > self.maxlen: + self.popitem(False) + + def __setitem__(self, *args, **kwargs): + super().__setitem__(*args, **kwargs) + self.__truncate__() + + def update(self, *args, **kwargs): + super().update(*args, **kwargs) + self.__truncate__() + + +def tolist(item): + if isinstance(item, XmlData): + return [item] + elif hasattr(item, 'items'): + return item + elif isinstance(item, str): + return [item] + try: + iter(item) + return list(item) + except TypeError: + return list((item,)) + + +class Shape(tuple): + def __new__(cls, shape, axes='xyczt'): + instance = super().__new__(cls, shape) + instance.axes = axes.lower() + return instance + + def __getitem__(self, n): + if isinstance(n, str): + if len(n) == 1: + return self[self.axes.find(n.lower())] if n.lower() in self.axes else 1 + else: + return tuple(self[i] for i in n) + return super().__getitem__(n) + + @cached_property + def xyczt(self): + return tuple(self[i] for i in 'xyczt') + + +class XmlData(OrderedDict): + def __init__(self, elem=None): + super(XmlData, self).__init__() + if elem: + if isinstance(elem, dict): + self.update(elem) + else: + self.update(XmlData._todict(elem)[1]) + + def re_search(self, reg, default=None, *args, **kwargs): + return tolist(XmlData._output(XmlData._search(self, reg, True, default, *args, **kwargs)[1])) + + def search(self, key, default=None): + return tolist(XmlData._output(XmlData._search(self, key, False, default)[1])) + + def re_search_all(self, reg, *args, **kwargs): + K, V = XmlData._search_all(self, reg, True, *args, **kwargs) + return {k: XmlData._output(v) for k, v in zip(K, V)} + + def search_all(self, key): + K, V = XmlData._search_all(self, key, False) + return {k: XmlData._output(v) for k, v in zip(K, V)} + + @staticmethod + def _search(d, key, regex=False, default=None, *args, **kwargs): + if isinstance(key, (list, tuple)): + if len(key) == 1: + key = key[0] + else: + for v in XmlData._search_all(d, key[0], regex, *args, **kwargs)[1]: + found, value = XmlData._search(v, key[1:], regex, default, *args, **kwargs) + if found: + return True, value + return False, default + + if hasattr(d, 'items'): + for k, v in d.items(): + if isinstance(k, str): + if (not regex and k == key) or (regex and re.findall(key, k, *args, **kwargs)): + return True, v + elif isinstance(v, dict): + found, value = XmlData._search(v, key, regex, default, *args, **kwargs) + if found: + return True, value + elif isinstance(v, (list, tuple)): + for w in v: + found, value = XmlData._search(w, key, regex, default, *args, **kwargs) + if found: + return True, value + else: + found, value = XmlData._search(v, key, regex, default, *args, **kwargs) + if found: + return True, value + return False, default + + @staticmethod + def _search_all(d, key, regex=False, *args, **kwargs): + K = [] + V = [] + if hasattr(d, 'items'): + for k, v in d.items(): + if isinstance(k, str): + if (not regex and k == key) or (regex and re.findall(key, k, *args, **kwargs)): + K.append(k) + V.append(v) + elif isinstance(v, dict): + q, w = XmlData._search_all(v, key, regex, *args, **kwargs) + K.extend([str(k) + '|' + i for i in q]) + V.extend(w) + elif isinstance(v, (list, tuple)): + for j, val in enumerate(v): + q, w = XmlData._search_all(val, key, regex, *args, **kwargs) + K.extend([str(k) + '|' + str(j) + '|' + i for i in q]) + V.extend(w) + else: + q, w = XmlData._search_all(v, key, regex, *args, **kwargs) + K.extend([str(k) + '|' + i for i in q]) + V.extend(w) + return K, V + + @staticmethod + def _enumdict(d): + d2 = {} + for k, v in d.items(): + idx = [int(i) for i in re.findall(r'(?<=:)\d+$', k)] + if idx: + key = re.findall(r'^.*(?=:\d+$)', k)[0] + if key not in d2: + d2[key] = {} + d2[key][idx[0]] = d['{}:{}'.format(key, idx[0])] + else: + d2[k] = v + rec = False + for k, v in d2.items(): + if [int(i) for i in re.findall(r'(?<=:)\d+$', k)]: + rec = True + break + if rec: + return XmlData._enumdict(d2) + else: + return d2 + + @staticmethod + def _unique_children(l): + if l: + keys, values = zip(*l) + d = {} + for k in set(keys): + value = [v for m, v in zip(keys, values) if k == m] + if len(value) == 1: + d[k] = value[0] + else: + d[k] = value + return d + else: + return {} + + @staticmethod + def _todict(elem): + d = {} + if hasattr(elem, 'Key') and hasattr(elem, 'Value'): + name = elem.Key.cdata + d = elem.Value.cdata + return name, d + + if hasattr(elem, '_attributes') and elem._attributes is not None and 'ID' in elem._attributes: + name = elem._attributes['ID'] + elem._attributes.pop('ID') + elif hasattr(elem, '_name'): + name = elem._name + else: + name = 'none' + + if name == 'Value': + if hasattr(elem, 'children') and len(elem.children): + return XmlData._todict(elem.children[0]) + + if hasattr(elem, 'children'): + children = [XmlData._todict(child) for child in elem.children] + children = XmlData._unique_children(children) + if children: + d = OrderedDict(d, **children) + if hasattr(elem, '_attributes'): + children = elem._attributes + if children: + d = OrderedDict(d, **children) + if not len(d.keys()) and hasattr(elem, 'cdata'): + return name, elem.cdata + + return name, XmlData._enumdict(d) + + @staticmethod + def _output(s): + if isinstance(s, dict): + return XmlData(s) + elif isinstance(s, (tuple, list)): + return [XmlData._output(i) for i in s] + elif not isinstance(s, str): + return s + elif len(s) > 1 and s[0] == '[' and s[-1] == ']': + return [XmlData._output(i) for i in s[1:-1].split(', ')] + elif re.search(r'^[-+]?\d+$', s): + return int(s) + elif re.search(r'^[-+]?\d?\d*\.?\d+([eE][-+]?\d+)?$', s): + return float(s) + elif s.lower() == 'true': + return True + elif s.lower() == 'false': + return False + elif s.lower() == 'none': + return None + else: + return s + + def __getitem__(self, item): + value = super().__getitem__(item) + return XmlData(value) if isinstance(value, dict) else value + + +class Imread(np.lib.mixins.NDArrayOperatorsMixin, metaclass=ABCMeta): + """ class to read image files, while taking good care of important metadata, + currently optimized for .czi files, but can open anything that bioformats can handle + path: path to the image file + optional: + series: in case multiple experiments are saved in one file, like in .lif files + transform: automatically correct warping between channels, need transforms.py among others + drift: automatically correct for drift, only works if transform is not None or False + beadfile: image file(s) with beads which can be used for correcting warp + dtype: datatype to be used when returning frames + meta: define metadata, used for pickle-ing + + NOTE: run imread.kill_vm() at the end of your script/program, otherwise python might not terminate + + modify images on the fly with a decorator function: + define a function which takes an instance of this object, one image frame, + and the coordinates c, z, t as arguments, and one image frame as return + >> imread.frame_decorator = fun + then use imread as usually + + Examples: + >> im = imread('/DATA/lenstra_lab/w.pomp/data/20190913/01_YTL639_JF646_DefiniteFocus.czi') + >> im + << shows summary + >> im.shape + << (256, 256, 2, 1, 600) + >> plt.imshow(im(1, 0, 100)) + << plots frame at position c=1, z=0, t=100 (python type indexing), note: round brackets; always 2d array + with 1 frame + >> data = im[:,:,0,0,:25] + << retrieves 5d numpy array containing first 25 frames at c=0, z=0, note: square brackets; always 5d array + >> plt.imshow(im.max(0, None, 0)) + << plots max-z projection at c=0, t=0 + >> len(im) + << total number of frames + >> im.pxsize + << 0.09708737864077668 image-plane pixel size in um + >> im.laserwavelengths + << [642, 488] + >> im.laserpowers + << [0.02, 0.0005] in % + + See __init__ and other functions for more ideas. + + Subclassing: + Subclass this class to add more file types. A subclass should always have at least the following methods: + staticmethod _can_open(path): returns True when the subclass can open the image in path + __metadata__(self): pulls some metadata from the file and do other format specific things, it needs to + define a few properties, like shape, etc. + __frame__(self, c, z, t): this should return a single frame at channel c, slice z and time t + optional close(self): close the file in a proper way + optional field priority: subclasses with lower priority will be tried first, default = 99 + Any other method can be overridden as needed + wp@tl2019-2021 + """ + + # TODO: more numpy.ndarray methods as needed + # TODO: imread.std + # TODO: metadata in OME format tree + + priority = 99 + do_not_pickle = 'base', 'copies', 'cache' + + @staticmethod + @abstractmethod + def _can_open(path): # Override this method, and return true when the subclass can open the file + return False + + @abstractmethod + def __metadata__(self): + return + + @abstractmethod + def __frame__(self, c, z, t): # Override this, return the frame at c, z, t + return np.random.randint(0, 255, self.shape['xy']) + + def open(self): # Optionally override this, open file handles etc. + """ filehandles cannot be pickled and should be marked such by setting do_not_pickle = 'file_handle_name' """ + return + + def close(self): # Optionally override this, close file handles etc. + return + + def __new__(cls, path=None, *args, **kwargs): + if cls is not Imread: + return super().__new__(cls) + if len(cls.__subclasses__()) == 0: + raise Exception('Restart python kernel please!') + if isinstance(path, Imread): + return path + for subclass in sorted(cls.__subclasses__(), key=lambda subclass: subclass.priority): + if subclass._can_open(path): + do_not_pickle = (cls.do_not_pickle,) if isinstance(cls.do_not_pickle, str) else cls.do_not_pickle + subclass_do_not_pickle = (subclass.do_not_pickle,) if isinstance(subclass.do_not_pickle, str) \ + else subclass.do_not_pickle if hasattr(subclass, 'do_not_pickle') else () + subclass.do_not_pickle = set(do_not_pickle).union(set(subclass_do_not_pickle)) + return super().__new__(subclass) + + def __init__(self, path, series=0, transform=False, drift=False, beadfile=None, sigma=None, dtype=None, + axes='cztxy'): + if isinstance(path, Imread): + return + self._shape = Shape((0, 0, 0, 0, 0)) + self.base = None + self.copies = [] + if isinstance(path, str): + self.path = os.path.abspath(path) + self.title = os.path.splitext(os.path.basename(self.path))[0] + self.acquisitiondate = datetime.fromtimestamp(os.path.getmtime(self.path)).strftime('%y-%m-%dT%H:%M:%S') + else: + self.path = path # ndarray + self.transform = transform + self.drift = drift + self.beadfile = beadfile + self.dtype = dtype + self.series = series + self.pxsize = 1e-1 + self.settimeinterval = 0 + self.pxsizecam = 0 + self.magnification = 0 + self.exposuretime = (0,) + self.deltaz = 1 + self.pcf = (1, 1) + self.laserwavelengths = [[]] + self.laserpowers = [[]] + self.powermode = 'normal' + self.optovar = (1,) + self.binning = 1 + self.collimator = (1,) + self.tirfangle = (0,) + self.gain = (100, 100) + self.objective = 'unknown' + self.filter = 'unknown' + self.NA = 1 + self.cyllens = ['None', 'None'] + self.duolink = 'None' + self.detector = [0, 1] + self.track = [0] + self.metadata = {} + self.cache = DequeDict(16) + self._frame_decorator = None + + self.open() + self.__metadata__() + self.file_shape = self.shape.xyczt + + if axes.lower() == 'squeeze': + self.axes = ''.join(i for i in 'cztxy' if self.shape[i] > 1) + elif axes.lower() == 'full': + self.axes = 'cztxy' + else: + self.axes = axes + self.slice = [np.arange(s, dtype=int) for s in self.shape.xyczt] + + # how far apart the centers of frame and sensor are + self.frameoffset = self.shape['x'] / 2, self.shape['y'] / 2 + if not hasattr(self, 'cnamelist'): + self.cnamelist = 'abcdefghijklmnopqrstuvwxyz'[:self.shape['c']] + + m = self.extrametadata + if m is not None: + try: + self.cyllens = m['CylLens'] + self.duolink = m['DLFilterSet'].split(' & ')[m['DLFilterChannel']] + if 'FeedbackChannels' in m: + self.feedback = m['FeedbackChannels'] + else: + self.feedback = m['FeedbackChannel'] + except Exception: + self.cyllens = ['None', 'None'] + self.duolink = 'None' + self.feedback = [] + try: + self.cyllenschannels = np.where([self.cyllens[self.detector[c]].lower() != 'none' + for c in range(self.shape['c'])])[0].tolist() + except Exception: + pass + self.set_transform() + try: + s = int(re.findall(r'_(\d{3})_', self.duolink)[0]) + except Exception: + s = 561 + if sigma is None: + try: + sigma = [] + for t, d in zip(self.track, self.detector): + l = np.array(self.laserwavelengths[t]) + 22 + sigma.append([l[l > s].max(initial=0), l[l < s].max(initial=0)][d]) + sigma = np.array(sigma) + sigma[sigma == 0] = 600 + sigma /= 2 * self.NA * self.pxsize * 1000 + self.sigma = sigma.tolist() + except Exception: + self.sigma = [2] * self.shape['c'] + else: + self.sigma = sigma + if 1.5 < self.NA: + self.immersionN = 1.661 + elif 1.3 < self.NA < 1.5: + self.immersionN = 1.518 + elif 1 < self.NA < 1.3: + self.immersionN = 1.33 + else: + self.immersionN = 1 + + @cached_property + def timeseries(self): + return self.shape['t'] > 1 + + @cached_property + def zstack(self): + return self.shape['z'] > 1 + + def set_transform(self): + # handle transforms + if self.transform is False or self.transform is None: + self.transform = None + else: + if isinstance(self.transform, Transforms): + self.transform = self.transform + else: + if isinstance(self.transform, str): + self.transform = ImTransforms(self.path, self.cyllens, self.track, self.detector, self.transform) + else: + self.transform = ImTransforms(self.path, self.cyllens, self.track, self.detector, self.beadfile) + if self.drift is True: + self.transform = ImShiftTransforms(self) + elif not (self.drift is False or self.drift is None): + self.transform = ImShiftTransforms(self, self.drift) + self.transform.adapt(self.frameoffset, self.shape.xyczt) + self.beadfile = self.transform.files + + def __framet__(self, c, z, t): + return self.transform_frame(self.__frame__(c, z, t), c, t) + + def new(self, **kwargs): + # TODO: fix this function + c, a = self.__reduce__() + new_kwargs = {key: value for key, value in zip(inspect.getfullargspec(c).args[1:], a)} + for key, value in kwargs.items(): + new_kwargs[key] = value + return c(**new_kwargs) + + @staticmethod + def get_config(file): + """ Open a yml parameter file + """ + loader = yaml.SafeLoader + loader.add_implicit_resolver( + r'tag:yaml.org,2002:float', + re.compile(r'''^(?: + [-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(r'-+0123456789.')) + with open(file, 'r') as f: + return yaml.load(f, loader) + + @staticmethod + def kill_vm(): + JVM().kill_vm() + + @property + def frame_decorator(self): + return self._frame_decorator + + @frame_decorator.setter + def frame_decorator(self, decorator): + self._frame_decorator = decorator + self.cache = DequeDict(self.cache.maxlen) + + def __repr__(self): + """ gives a helpful summary of the recorded experiment + """ + s = [100 * '#'] + s.append('path/filename: {}'.format(self.path)) + s.append('shape ({}): '.format(self.axes) + ' ' * (5 - self.ndim) + + ' x '.join(('{}',) * self.ndim).format(*self.shape)) + s.append('pixelsize: {:.2f} nm'.format(self.pxsize * 1000)) + if self.zstack: + s.append('z-interval: {:.2f} nm'.format(self.deltaz * 1000)) + s.append('Exposuretime: ' + ('{:.2f} ' * len(self.exposuretime)).format( + *(np.array(self.exposuretime) * 1000)) + 'ms') + if self.timeseries: + if self.timeval and np.diff(self.timeval).shape[0]: + s.append('t-interval: {:.3f} ± {:.3f} s'.format( + np.diff(self.timeval).mean(), np.diff(self.timeval).std())) + else: + s.append('t-interval: {:.2f} s'.format(self.settimeinterval)) + s.append('binning: {}x{}'.format(self.binning, self.binning)) + s.append('laser colors: ' + ' | '.join([' & '.join(len(l)*('{:.0f}',)).format(*l) + for l in self.laserwavelengths]) + ' nm') + s.append('laser powers: ' + ' | '.join([' & '.join(len(l)*('{}',)).format(*[100 * i for i in l]) + for l in self.laserpowers]) + ' %') + s.append('objective: {}'.format(self.objective)) + s.append('magnification: {}x'.format(self.magnification)) + s.append('optovar: ' + (' {}' * len(self.optovar)).format(*self.optovar) + 'x') + s.append('filterset: {}'.format(self.filter)) + s.append('powermode: {}'.format(self.powermode)) + s.append('collimator: ' + (' {}' * len(self.collimator)).format(*self.collimator)) + s.append('TIRF angle: ' + (' {:.2f}°' * len(self.tirfangle)).format(*self.tirfangle)) + s.append('gain: ' + (' {:.0f}' * len(self.gain)).format(*self.gain)) + s.append('pcf: ' + (' {:.2f}' * len(self.pcf)).format(*self.pcf)) + return '\n'.join(s) + + def __str__(self): + return self.path + + def __len__(self): + return self.shape[0] + + def __call__(self, *n): + """ returns single 2D frame + im(n): index linearly in czt order + im(c,z): return im(c,z,t=0) + im(c,z,t): return im(c,z,t) + """ + if len(n) == 1: + n = self.get_channel(n[0]) + c = int(n % self.shape['c']) + z = int((n // self.shape['c']) % self.shape['z']) + t = int((n // (self.shape['c'] * self.shape['z'])) % self.shape['t']) + return self.frame(c, z, t) + else: + return self.frame(*[int(i) for i in n]) + + def __getitem__(self, n): + # None = : + if isinstance(n, (slice, Number)): + n = (n,) + elif isinstance(n, type(Ellipsis)): + n = (None,) * len(self.axes) + n = list(n) + + # deal with ... + ell = [i for i, e in enumerate(n) if isinstance(e, type(Ellipsis))] + if len(ell) > 1: + raise IndexError("an index can only have a single ellipsis (...)") + if len(ell): + if len(n) > self.ndim: + n.remove(Ellipsis) + else: + n[ell[0]] = None + while len(n) < self.ndim: + n.insert(ell[0], None) + while len(n) < self.ndim: + n.append(None) + + T = [self.shape.axes.find(i) for i in 'xyczt'] + n = [n[j] if 0 <= j < len(n) else None for j in T] # reorder n + + new_slice = [] + for s, e in zip(self.slice, n): + if e is None: + new_slice.append(s) + else: + new_slice.append(s[e]) + + # TODO: check output dimensionality when requested shape in some dimension is 1 + if all([isinstance(s, Number) or s.size == 1 for s in new_slice]): + return self.block(*new_slice).item() + else: + new = self.copy() + new.slice = new_slice + new._shape = Shape([1 if isinstance(s, Number) else len(s) for s in new_slice]) + new.axes = ''.join(j for j in self.axes if j in [i for i, s in zip('xyczt', new_slice) + if not isinstance(s, Number)]) + return new + + def __contains__(self, item): + def unique_yield(l, i): + for k in l: + print(f'{k} from cache') + yield k + for k in i: + if k not in l: + print(k) + yield k + for idx in unique_yield(list(self.cache.keys()), + product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t']))): + xyczt = (slice(None), slice(None)) + idx + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + if item in np.asarray(self[in_idx]): + return True + return False + + def __array__(self, dtype=None): + block = self.block(*self.slice) + T = [self.shape.axes.find(i) for i in 'xyczt'] + S = tuple({i for i, j in enumerate(T) if j == -1}.union( + {i for i, j in enumerate(self.slice) if isinstance(j, Number)})) + block = block.squeeze(S) + if dtype is not None: + block = block.astype(dtype) + if block.ndim == 0: + return block.item() + axes = ''.join(j for i, j in enumerate('xyczt') if i not in S) + return block.transpose([axes.find(i) for i in self.shape.axes if i in axes]) + + def asarray(self): + return self.__array__() + + def astype(self, dtype): + new = self.copy() + new.dtype = dtype + return new + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + exception = None + self.base = None + for copy in self.copies: + try: + copy.__exit__() + except Exception as e: + exception = e + self.copies = [] + if hasattr(self, 'close'): + self.close() + if exception: + raise exception + + def __getstate__(self): + return {key: value for key, value in self.__dict__.items() if key not in self.do_not_pickle} + + def __setstate__(self, state): + """ What happens during unpickling """ + self.__dict__.update(state) + self.open() + self.copies = [] + self.cache = DequeDict(16) + + def __del__(self): + print('delete') + if not self.copies: + if self.base is None: + self.close() + else: + self.base.copies.remove(self) + + def __copy__(self): + return self.copy() + + def copy(self): + new = Imread.__new__(self.__class__) + new.copies = [] + new.base = self + for key, value in self.__dict__.items(): + if not hasattr(new, key): + new.__dict__[key] = value + self.copies.append(new) + return new + + @property + def shape(self): + return self._shape + + @shape.setter + def shape(self, value): + self._shape = Shape((value['xyczt'.find(i.lower())] for i in self.axes), self.axes) + + @property + def axes(self): + return self.shape.axes + + @axes.setter + def axes(self, value): + shape = self.shape[value] + if isinstance(shape, Number): + shape = (shape,) + self._shape = Shape(shape, value) + + @property + def ndim(self): + return len(self.shape) + + def squeeze(self, axes=None): + new = self.copy() + if axes is None: + axes = tuple(i for i, j in enumerate(new.shape) if j == 1) + elif isinstance(axes, Number): + axes = (axes,) + else: + axes = tuple(new.axes.find(ax) if isinstance(ax, str) else ax for ax in axes) + if any([new.shape[ax] != 1 for ax in axes]): + raise ValueError('cannot select an axis to squeeze out which has size not equal to one') + new.axes = ''.join(j for i, j in enumerate(new.axes) if i not in axes) + return new + + def transpose(self, axes=None): + new = self.copy() + if axes is None: + new.axes = new.axes[::-1] + else: + new.axes = ''.join(ax if isinstance(ax, str) else new.axes[ax] for ax in axes) + return new + + @property + def T(self): + return self.transpose() + + def swapaxes(self, axis1, axis2): + new = self.copy() + axes = new.axes + if isinstance(axis1, str): + axis1 = axes.find(axis1) + if isinstance(axis2, str): + axis2 = axes.find(axis2) + axes[axis1], axes[axis2] = axes[axis2], axes[axis1] + new.axes = axes + return new + + def moveaxis(self, source, destination): + raise NotImplementedError('moveaxis it not implemented') + + def czt(self, n): + """ returns indices c, z, t used when calling im(n) + """ + if not isinstance(n, tuple): + c = n % self.shape['c'] + z = (n // self.shape['c']) % self.shape['z'] + t = (n // (self.shape['c'] * self.shape['z'])) % self.shape['t'] + return c, z, t + n = list(n) + if len(n) == 2 or len(n) == 4: + n.append(slice(0, -1, 1)) + if len(n) == 3: + n = list(n) + for i, (ax, e) in enumerate(zip('czt', n)): + if isinstance(e, slice): + a = [e.start, e.stop, e.step] + if a[0] is None: + a[0] = 0 + if a[1] is None: + a[1] = -1 + if a[2] is None: + a[2] = 1 + for j in range(2): + if a[j] < 0: + a[j] %= self.shape[ax] + a[j] += 1 + n[i] = np.arange(*a) + n = [np.array(i) for i in n] + return tuple(n) + if len(n) == 5: + return tuple(n[2:5]) + + def czt2n(self, c, z, t): + return c + z * self.shape['c'] + t * self.shape['c'] * self.shape['z'] + + def transform_frame(self, frame, c, t=0): + if self.transform is None: + return frame + else: + return self.transform(c, t, self.track, self.detector).frame(frame) + + def get_czt(self, c, z, t): + czt = [] + for i, n in zip('czt', (c, z, t)): + if n is None: + czt.append(list(range(self.shape[i]))) + elif isinstance(n, range): + if n.stop < 0: + stop = n.stop % self.shape[i] + elif n.stop > self.shape[i]: + stop = self.shape[i] + else: + stop = n.stop + czt.append(list(range(n.start % self.shape[i], stop, n.step))) + elif isinstance(n, Number): + czt.append([n % self.shape[i]]) + else: + czt.append([k % self.shape[i] for k in n]) + return [self.get_channel(c) for c in czt[0]], *czt[1:] + + def _stats(self, fun, c=None, z=None, t=None, ffun=None): + """ fun = np.min, np.max, np.sum or their nan varieties """ + warn('Warning: _stats is deprecated.') + c, z, t = self.get_czt(c, z, t) + if fun in (np.min, np.nanmin): + val = np.inf + elif fun in (np.max, np.nanmax): + val = -np.inf + else: + val = 0 + if ffun is None: + ffun = lambda im: im + T = np.full(self.shape['xy'], val, self.dtype) + for ic in c: + m = np.full(self.shape['xy'], val, self.dtype) + if isinstance(self.transform, ImShiftTransforms): + for it in t: + n = np.full(self.shape['xy'], val, self.dtype) + for iz in z: + n = fun((n, ffun(self.__frame__(ic, iz, it))), 0) + m = self.transform_frame(n, ic, it) + else: + for it, iz in product(t, z): + m = fun((m, ffun(self.__frame__(ic, iz, it))), 0) + if isinstance(self.transform, ImTransforms): + m = self.transform_frame(m, ic, 0) + T = fun((T, m), 0) + return T + + @staticmethod + def __array_arg_fun__(self, fun, axis=None, out=None): + """ frame-wise application of np.argmin and np.argmax """ + if axis is None: + value = arg = None + for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + xyczt = (slice(None), slice(None)) + idx + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + new = np.asarray(self[in_idx]) + new_arg = np.unravel_index(fun(new), new.shape) + new_value = new[new_arg] + if value is None: + arg = new_arg + idx + value = new_value + else: + i = fun((value, new_value)) + arg = (arg, new_arg + idx)[i] + value = (value, new_value)[i] + axes = ''.join(i for i in self.axes if i in 'xy') + 'czt' + arg = np.ravel_multi_index([arg[axes.find(i)] for i in self.axes], self.shape) + if out is None: + return arg + else: + out.itemset(arg) + return out + else: + if isinstance(axis, str): + axis_str, axis_idx = axis, self.axes.index(axis) + else: + axis_str, axis_idx = self.axes[axis], axis + if axis_str not in self.axes: + raise IndexError(f'Axis {axis_str} not in {self.axes}.') + out_shape = list(self.shape) + out_axes = list(self.axes) + out_shape.pop(axis_idx) + out_axes.pop(axis_idx) + if out is None: + out = np.zeros(out_shape, int) + if axis_str in 'xy': + for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + xyczt = (slice(None), slice(None)) + idx + out_idx = tuple(xyczt['xyczt'.find(i)] for i in out_axes) + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + new = self[in_idx] + out[out_idx] = fun(np.asarray(new), new.axes.find(axis_str)) + else: + value = np.zeros(out.shape, self.dtype) + for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + xyczt = (slice(None), slice(None)) + idx + out_idx = tuple(xyczt['xyczt'.find(i)] for i in out_axes) + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + new_value = self[in_idx] + new_arg = np.full_like(new_value, idx['czt'.find(axis_str)]) + if idx['czt'.find(axis_str)] == 0: + value[out_idx] = new_value + out[out_idx] = new_arg + else: + old_value = value[out_idx] + i = fun((old_value, new_value), 0) + value[out_idx] = np.where(i, new_value, old_value) + out[out_idx] = np.where(i, new_arg, out[out_idx]) + return out + + def argmin(self, *args, **kwargs): + return Imread.__array_arg_fun__(self, np.argmin, *args, **kwargs) + + def argmax(self, *args, **kwargs): + return Imread.__array_arg_fun__(self, np.argmax, *args, **kwargs) + + def __array_fun__(self, fun, axis=None, dtype=None, out=None, keepdims=False, initial=None, where=True, ffun=None): + """ frame-wise application of np.min, np.max, np.sum, np.mean and theis nan equivalents """ + if ffun is None: + ffun = lambda im: im + if dtype is None: + dtype = self.dtype if out is None else out.dtype + + # TODO: smarter transforms + if where is not True: + raise NotImplementedError('Argument where != True is not implemented.') + if axis is None: + for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + xyczt = (slice(None), slice(None)) + idx + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + initial = fun(np.asarray(ffun(self[in_idx])), initial=initial) + if out is None: + return np.array(initial, dtype, ndmin=self.ndim) if keepdims else initial + else: + out.itemset(initial) + return out + else: + if isinstance(axis, str): + axis_str, axis_idx = axis, self.axes.index(axis) + else: + axis_idx = axis % self.ndim + axis_str = self.axes[axis_idx] + if axis_str not in self.axes: + raise IndexError(f'Axis {axis_str} not in {self.axes}.') + out_shape = list(self.shape) + out_axes = list(self.axes) + if not keepdims: + out_shape.pop(axis_idx) + out_axes.pop(axis_idx) + if out is None: + out = np.zeros(out_shape, dtype) + if axis_str in 'xy': + for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + xyczt = (slice(None), slice(None)) + idx + out_idx = tuple(xyczt['xyczt'.find(i)] for i in out_axes) + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + new = ffun(self[in_idx]) + out[out_idx] = fun(np.asarray(new), new.axes.find(axis_str), initial=initial) + else: + for idx in product(range(self.shape['c']), range(self.shape['z']), range(self.shape['t'])): + xyczt = (slice(None), slice(None)) + idx + out_idx = tuple(xyczt['xyczt'.find(i)] for i in out_axes) + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + if idx['czt'.find(axis_str)] == 0: + out[out_idx] = fun((ffun(self[in_idx]),), 0, initial=initial) + else: + out[out_idx] = fun((out[out_idx], self[in_idx]), 0) + return out + + def sum(self, axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True, ffun=None, **kwargs): + return Imread.__array_fun__(self, np.sum, axis, out=out, dtype=dtype, keepdims=keepdims, initial=initial, + where=where, ffun=ffun) + + def nansum(self, axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True, ffun=None, **kwargs): + return Imread.__array_fun__(self, np.nansum, axis, out=out, dtype=dtype, keepdims=keepdims, initial=initial, + where=where, ffun=ffun) + + def min(self, axis=None, out=None, keepdims=False, initial=0, where=True, ffun=None, **kwargs): + return Imread.__array_fun__(self, np.min, axis, out=out, keepdims=keepdims, initial=initial, + where=where, ffun=ffun) + + def nanmin(self, axis=None, out=None, keepdims=False, initial=0, where=True, ffun=None, **kwargs): + return Imread.__array_fun__(self, np.nanmin, axis, out=out, keepdims=keepdims, initial=initial, + where=where, ffun=ffun) + + def max(self, axis=None, out=None, keepdims=False, initial=0, where=True, ffun=None, **kwargs): + return Imread.__array_fun__(self, np.max, axis, out=out, keepdims=keepdims, initial=initial, + where=where, ffun=ffun) + + def nanmax(self, axis=None, out=None, keepdims=False, initial=0, where=True, ffun=None, **kwargs): + return Imread.__array_fun__(self, np.nanmax, axis, out=out, keepdims=keepdims, initial=initial, + where=where, ffun=ffun) + + def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True, **kwargs): + res = self.sum(axis, out=out, keepdims=keepdims, where=where) + shape = np.prod(self.shape) if axis is None else self.shape[axis] + if out is None: + res = res / shape + if dtype is not None: + res = res.astype(dtype) + return res + else: + if out.dtype.kind in 'ui': + res //= shape + else: + res /= shape + return res.astype(out.dtype, copy=False) + + def nanmean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True, **kwargs): + res = self.nansum(axis, out=out, keepdims=keepdims, where=where) + if out is None: + res = res / self.sum(axis, None, keepdims=keepdims, where=where, ffun=lambda x: np.invert(np.isnan(x))) + if dtype is None: + res = res.astype(dtype) + return res + else: + if out.dtype.kind in 'ui': + res //= self.sum(axis, None, keepdims=keepdims, where=where, ffun=lambda x: np.invert(np.isnan(x))) + else: + res /= self.sum(axis, None, keepdims=keepdims, where=where, ffun=lambda x: np.invert(np.isnan(x))) + return res.astype(out.dtype, copy=False) + + def reshape(self, *args, **kwargs): + return np.asarray(self).reshape(*args, **kwargs) + + @property + def is_noise(self): + """ True if volume only has noise """ + F = np.fft.fftn(self) + S = np.fft.fftshift(np.fft.ifftn(F * F.conj()).real / np.sum(self ** 2)) + return -np.log(1 - S[tuple([0] * S.ndim)]) > 5 + + @property + def dtype(self): + return self._dtype + + @dtype.setter + def dtype(self, value): + self._dtype = np.dtype(value) + + def get_channel(self, channel_name): + if not isinstance(channel_name, str): + return channel_name + else: + c = [i for i, c in enumerate(self.cnamelist) if c.lower().startswith(channel_name.lower())] + assert len(c) > 0, 'Channel {} not found in {}'.format(c, self.cnamelist) + assert len(c) < 2, 'Channel {} not unique in {}'.format(c, self.cnamelist) + return c[0] + + def frame(self, c=0, z=0, t=0): + """ returns single 2D frame + """ + c = self.get_channel(c) + c %= self.file_shape[2] + z %= self.file_shape[3] + t %= self.file_shape[4] + + # cache last n (default 16) frames in memory for speed (~250x faster) + if (c, z, t) in self.cache: + self.cache.move_to_end((c, z, t)) + f = self.cache[(c, z, t)] + else: + f = self.__framet__(c, z, t) + if self.frame_decorator is not None: + f = self.frame_decorator(self, f, c, z, t) + self.cache[(c, z, t)] = f + if self.dtype is not None: + return f.copy().astype(self.dtype) + else: + return f.copy() + + def data(self, c=0, z=0, t=0): + """ returns 3D stack of frames + """ + c, z, t = [np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) for i, e in zip('czt', (c, z, t))] + return np.dstack([self.frame(ci, zi, ti) for ci, zi, ti in product(c, z, t)]) + + def block(self, x=None, y=None, c=None, z=None, t=None): + """ returns 5D block of frames + """ + x, y, c, z, t = [np.arange(self.shape[i]) if e is None else np.array(e, ndmin=1) + for i, e in zip('xyczt', (x, y, c, z, t))] + d = np.full((len(x), len(y), len(c), len(z), len(t)), np.nan, self.dtype) + for (ci, cj), (zi, zj), (ti, tj) in product(enumerate(c), enumerate(z), enumerate(t)): + d[:, :, ci, zi, ti] = self.frame(cj, zj, tj)[x][:, y] + return d + + @cached_property + def timeval(self): + if hasattr(self, 'metadata') and isinstance(self.metadata, XmlData): + image = self.metadata.search('Image') + if (isinstance(image, dict) and self.series in image) or (isinstance(image, list) and len(image)): + image = XmlData(image[0]) + return sorted(np.unique(image.search_all('DeltaT').values()))[:self.shape['t']] + else: + return (np.arange(self.shape['t']) * self.settimeinterval).tolist() + + @cached_property + def timeinterval(self): + return float(np.diff(self.timeval).mean()) if len(self.timeval) > 1 else 1 + + @cached_property + def piezoval(self): + """ gives the height of the piezo and focus motor, only available when CylLensGUI was used + """ + def upack(idx): + time = list() + val = list() + if len(idx) == 0: + return time, val + for i in idx: + time.append(int(re.search(r'\d+', n[i]).group(0))) + val.append(w[i]) + return zip(*sorted(zip(time, val))) + + # Maybe the values are stored in the metadata + n = self.metadata.search('LsmTag|Name')[0] + w = self.metadata.search('LsmTag')[0] + if n is not None: + # n = self.metadata['LsmTag|Name'][1:-1].split(', ') + # w = str2float(self.metadata['LsmTag'][1:-1].split(', ')) + + pidx = np.where([re.search(r'^Piezo\s\d+$', x) is not None for x in n])[0] + sidx = np.where([re.search(r'^Zstage\s\d+$', x) is not None for x in n])[0] + + ptime, pval = upack(pidx) + stime, sval = upack(sidx) + + # Or maybe in an extra '.pzl' file + else: + m = self.extrametadata + if m is not None and 'p' in m: + q = np.array(m['p']) + if not len(q.shape): + q = np.zeros((1, 3)) + + ptime = [int(i) for i in q[:, 0]] + pval = [float(i) for i in q[:, 1]] + sval = [float(i) for i in q[:, 2]] + + else: + ptime = [] + pval = [] + sval = [] + + df = pandas.DataFrame(columns=['frame', 'piezoZ', 'stageZ']) + df['frame'] = ptime + df['piezoZ'] = pval + df['stageZ'] = np.array(sval) - np.array(pval) - \ + self.metadata.re_search(r'AcquisitionModeSetup\|ReferenceZ', 0)[0] * 1e6 + + # remove duplicates + df = df[~df.duplicated('frame', 'last')] + return df + + @cached_property + def extrametadata(self): + if isinstance(self.path, str) and len(self.path) > 3: + if os.path.isfile(self.path[:-3] + 'pzl2'): + pname = self.path[:-3] + 'pzl2' + elif os.path.isfile(self.path[:-3] + 'pzl'): + pname = self.path[:-3] + 'pzl' + else: + return + try: + return self.get_config(pname) + except Exception: + return + return + + def save_as_tiff(self, fname=None, c=None, z=None, t=None, split=False, bar=True, pixel_type='uint16'): + """ saves the image as a tiff-file + split: split channels into different files + """ + if fname is None: + if isinstance(self.path, str): + fname = self.path[:-3] + 'tif' + else: + raise Exception('No filename given.') + elif not fname[-3:] == 'tif': + fname += '.tif' + if split: + for i in range(self.shape['c']): + if self.timeseries: + self.save_as_tiff(fname[:-3] + '_C{:01d}.tif'.format(i), i, 0, None, False, bar, pixel_type) + else: + self.save_as_tiff(fname[:-3] + '_C{:01d}.tif'.format(i), i, None, 0, False, bar, pixel_type) + else: + n = [c, z, t] + for i, ax in enumerate('czt'): + if n[i] is None: + n[i] = range(self.shape[ax]) + elif not isinstance(n[i], (tuple, list)): + n[i] = (n[i],) + + shape = [len(i) for i in n] + at_least_one = False + with IJTiffFile(fname, shape, pixel_type, pxsize=self.pxsize, deltaz=self.deltaz) as tif: + for i, m in tqdm(zip(product(*[range(s) for s in shape]), product(*n)), + total=np.prod(shape), desc='Saving tiff', disable=not bar): + if np.any(self(*m)) or not at_least_one: + tif.save(self(*m), *i) + at_least_one = True + + @cached_property + def summary(self): + return self.__repr__() + + +def main(): + parser = ArgumentParser(description='Display info and save as tif') + parser.add_argument('file', help='image_file') + parser.add_argument('out', help='path to tif out', type=str, default=None, nargs='?') + parser.add_argument('-r', '--register', help='register channels', action='store_true') + parser.add_argument('-c', '--channel', help='channel', type=int, default=None) + parser.add_argument('-z', '--zslice', help='z-slice', type=int, default=None) + parser.add_argument('-t', '--time', help='time', type=int, default=None) + parser.add_argument('-s', '--split', help='split channels', action='store_true') + parser.add_argument('-f', '--force', help='force overwrite', action='store_true') + args = parser.parse_args() + + if os.path.exists(args.file): + with Imread(args.file, transform=args.register) as im: + print(im.summary) + if args.out: + out = os.path.abspath(args.out) + if not os.path.exists(os.path.dirname(out)): + os.makedirs(os.path.dirname(out)) + if os.path.exists(out) and not args.force: + print('File {} exists already, add the -f flag if you want to overwrite it.'.format(args.out)) + else: + im.save_as_tiff(out, args.channel, args.zslice, args.time, args.split) + else: + print('File does not exist.') + + +from ndbioimage.readers import * diff --git a/ndbioimage/_version.py b/ndbioimage/_version.py new file mode 100644 index 0000000..e1d65be --- /dev/null +++ b/ndbioimage/_version.py @@ -0,0 +1,2 @@ +__version__ = '2022.7.0' +__git_commit_hash__ = 'unknown' diff --git a/ndbioimage/jvm.py b/ndbioimage/jvm.py new file mode 100644 index 0000000..349a5cc --- /dev/null +++ b/ndbioimage/jvm.py @@ -0,0 +1,38 @@ +try: + import javabridge + import bioformats + + class JVM: + """ There can be only one java virtual machine per python process, + so this is a singleton class to manage the jvm. + """ + _instance = None + vm_started = False + vm_killed = False + + def __new__(cls, *args): + if cls._instance is None: + cls._instance = object.__new__(cls) + return cls._instance + + def start_vm(self): + if not self.vm_started and not self.vm_killed: + javabridge.start_vm(class_path=bioformats.JARS, run_headless=True) + outputstream = javabridge.make_instance('java/io/ByteArrayOutputStream', "()V") + printstream = javabridge.make_instance('java/io/PrintStream', "(Ljava/io/OutputStream;)V", outputstream) + javabridge.static_call('Ljava/lang/System;', "setOut", "(Ljava/io/PrintStream;)V", printstream) + javabridge.static_call('Ljava/lang/System;', "setErr", "(Ljava/io/PrintStream;)V", printstream) + self.vm_started = True + log4j = javabridge.JClassWrapper("loci.common.Log4jTools") + log4j.enableLogging() + log4j.setRootLevel("ERROR") + + if self.vm_killed: + raise Exception('The JVM was killed before, and cannot be restarted in this Python process.') + + def kill_vm(self): + javabridge.kill_vm() + self.vm_started = False + self.vm_killed = True +except ImportError: + JVM = None diff --git a/ndbioimage/readers/__init__.py b/ndbioimage/readers/__init__.py new file mode 100644 index 0000000..7d6663a --- /dev/null +++ b/ndbioimage/readers/__init__.py @@ -0,0 +1,3 @@ +import os +__all__ = [os.path.splitext(os.path.basename(file))[0] for file in os.listdir(os.path.dirname(__file__)) + if file.endswith('.py') and not file == os.path.basename(__file__)] diff --git a/ndbioimage/readers/bfread.py b/ndbioimage/readers/bfread.py new file mode 100644 index 0000000..1a00a48 --- /dev/null +++ b/ndbioimage/readers/bfread.py @@ -0,0 +1,89 @@ +from ndbioimage import Imread, XmlData, JVM +import os +import numpy as np +import untangle + +if JVM is not None: + import bioformats + + class Reader(Imread): + """ This class is used as a last resort, when we don't have another way to open the file. We don't like it + because it requires the java vm. + """ + priority = 99 # panic and open with BioFormats + do_not_pickle = 'reader', 'key', 'jvm' + + @staticmethod + def _can_open(path): + return True + + def open(self): + self.jvm = JVM() + self.jvm.start_vm() + self.key = np.random.randint(1e9) + self.reader = bioformats.get_image_reader(self.key, self.path) + + def __metadata__(self): + s = self.reader.rdr.getSeriesCount() + if self.series >= s: + print('Series {} does not exist.'.format(self.series)) + self.reader.rdr.setSeries(self.series) + + X = self.reader.rdr.getSizeX() + Y = self.reader.rdr.getSizeY() + C = self.reader.rdr.getSizeC() + Z = self.reader.rdr.getSizeZ() + T = self.reader.rdr.getSizeT() + self.shape = (X, Y, C, Z, T) + + omexml = bioformats.get_omexml_metadata(self.path) + self.metadata = XmlData(untangle.parse(omexml)) + + image = list(self.metadata.search_all('Image').values()) + if len(image) and self.series in image[0]: + image = XmlData(image[0][self.series]) + else: + image = self.metadata + + unit = lambda u: 10 ** {'nm': 9, 'µm': 6, 'um': 6, 'mm': 3, 'm': 0}[u] + + pxsizeunit = image.search('PhysicalSizeXUnit')[0] + pxsize = image.search('PhysicalSizeX')[0] + if pxsize is not None: + self.pxsize = pxsize / unit(pxsizeunit) * 1e6 + + if self.zstack: + deltazunit = image.search('PhysicalSizeZUnit')[0] + deltaz = image.search('PhysicalSizeZ')[0] + if deltaz is not None: + self.deltaz = deltaz / unit(deltazunit) * 1e6 + + if self.path.endswith('.lif'): + self.title = os.path.splitext(os.path.basename(self.path))[0] + self.exposuretime = self.metadata.re_search(r'WideFieldChannelInfo\|ExposureTime', self.exposuretime) + if self.timeseries: + self.settimeinterval = \ + self.metadata.re_search(r'ATLCameraSettingDefinition\|CycleTime', self.settimeinterval * 1e3)[ + 0] / 1000 + if not self.settimeinterval: + self.settimeinterval = self.exposuretime[0] + self.pxsizecam = self.metadata.re_search(r'ATLCameraSettingDefinition\|TheoCamSensorPixelSizeX', + self.pxsizecam) + self.objective = self.metadata.re_search(r'ATLCameraSettingDefinition\|ObjectiveName', 'none')[0] + self.magnification = \ + self.metadata.re_search(r'ATLCameraSettingDefinition\|Magnification', self.magnification)[0] + elif self.path.endswith('.ims'): + self.magnification = self.metadata.search('LensPower', 100)[0] + self.NA = self.metadata.search('NumericalAperture', 1.47)[0] + self.title = self.metadata.search('Name', self.title) + self.binning = self.metadata.search('BinningX', 1)[0] + + def __frame__(self, *args): + frame = self.reader.read(*args, rescale=False).astype('float') + if frame.ndim == 3: + return frame[..., args[0]] + else: + return frame + + def close(self): + bioformats.release_image_reader(self.key) diff --git a/ndbioimage/readers/cziread.py b/ndbioimage/readers/cziread.py new file mode 100644 index 0000000..595e7a9 --- /dev/null +++ b/ndbioimage/readers/cziread.py @@ -0,0 +1,115 @@ +from ndbioimage import Imread, XmlData, tolist +import czifile +import untangle +import numpy as np +import re +from functools import cached_property + + +class Reader(Imread): + priority = 0 + do_not_pickle = 'reader', 'filedict' + + @staticmethod + def _can_open(path): + return isinstance(path, str) and path.endswith('.czi') + + def open(self): + self.reader = czifile.CziFile(self.path) + filedict = {} + for directory_entry in self.reader.filtered_subblock_directory: + idx = self.get_index(directory_entry, self.reader.start) + for c in range(*idx[self.reader.axes.index('C')]): + for z in range(*idx[self.reader.axes.index('Z')]): + for t in range(*idx[self.reader.axes.index('T')]): + if (c, z, t) in filedict: + filedict[(c, z, t)].append(directory_entry) + else: + filedict[(c, z, t)] = [directory_entry] + self.filedict = filedict + + def close(self): + self.reader.close() + + def __metadata__(self): + # TODO: make sure frame function still works when a subblock has data from more than one frame + self.shape = tuple([self.reader.shape[self.reader.axes.index(directory_entry)] for directory_entry in 'XYCZT']) + self.metadata = XmlData(untangle.parse(self.reader.metadata())) + + image = [i for i in self.metadata.search_all('Image').values() if i] + if len(image) and self.series in image[0]: + image = XmlData(image[0][self.series]) + else: + image = self.metadata + + pxsize = image.search('ScalingX')[0] + if pxsize is not None: + self.pxsize = pxsize * 1e6 + if self.zstack: + deltaz = image.search('ScalingZ')[0] + if deltaz is not None: + self.deltaz = deltaz * 1e6 + + self.title = self.metadata.re_search(('Information', 'Document', 'Name'), self.title)[0] + self.acquisitiondate = self.metadata.re_search(('Information', 'Document', 'CreationDate'), + self.acquisitiondate)[0] + self.exposuretime = self.metadata.re_search(('TrackSetup', 'CameraIntegrationTime'), self.exposuretime) + if self.timeseries: + self.settimeinterval = self.metadata.re_search(('Interval', 'TimeSpan', 'Value'), + self.settimeinterval * 1e3)[0] / 1000 + if not self.settimeinterval: + self.settimeinterval = self.exposuretime[0] + self.pxsizecam = self.metadata.re_search(('AcquisitionModeSetup', 'PixelPeriod'), self.pxsizecam) + self.magnification = self.metadata.re_search('NominalMagnification', self.magnification)[0] + attenuators = self.metadata.search_all('Attenuator') + self.laserwavelengths = [[1e9 * float(i['Wavelength']) for i in tolist(attenuator)] + for attenuator in attenuators.values()] + self.laserpowers = [[float(i['Transmission']) for i in tolist(attenuator)] + for attenuator in attenuators.values()] + self.collimator = self.metadata.re_search(('Collimator', 'Position')) + detector = self.metadata.search(('Instrument', 'Detector')) + self.gain = [int(i.get('AmplificationGain', 1)) for i in detector] + self.powermode = self.metadata.re_search(('TrackSetup', 'FWFOVPosition'))[0] + optovar = self.metadata.re_search(('TrackSetup', 'TubeLensPosition'), '1x') + self.optovar = [] + for o in optovar: + a = re.search(r'\d?\d*[,.]?\d+(?=x$)', o) + if hasattr(a, 'group'): + self.optovar.append(float(a.group(0).replace(',', '.'))) + self.pcf = [2 ** self.metadata.re_search(('Image', 'ComponentBitCount'), 14)[0] / float(i) + for i in self.metadata.re_search(('Channel', 'PhotonConversionFactor'), 1)] + self.binning = self.metadata.re_search(('AcquisitionModeSetup', 'CameraBinning'), 1)[0] + self.objective = self.metadata.re_search(('AcquisitionModeSetup', 'Objective'))[0] + self.NA = self.metadata.re_search(('Instrument', 'Objective', 'LensNA'))[0] + self.filter = self.metadata.re_search(('TrackSetup', 'BeamSplitter', 'Filter'))[0] + self.tirfangle = [50 * i for i in self.metadata.re_search(('TrackSetup', 'TirfAngle'), 0)] + self.frameoffset = [self.metadata.re_search(('AcquisitionModeSetup', 'CameraFrameOffsetX'))[0], + self.metadata.re_search(('AcquisitionModeSetup', 'CameraFrameOffsetY'))[0]] + self.cnamelist = [c['DetectorSettings']['Detector']['Id'] for c in + self.metadata['ImageDocument']['Metadata']['Information']['Image'].search('Channel')] + try: + self.track, self.detector = zip(*[[int(i) for i in re.findall(r'\d', c)] for c in self.cnamelist]) + except ValueError: + self.track = tuple(range(len(self.cnamelist))) + self.detector = (0,) * len(self.cnamelist) + + def __frame__(self, c=0, z=0, t=0): + f = np.zeros(self.file_shape[:2], self.dtype) + for directory_entry in self.filedict[(c, z, t)]: + subblock = directory_entry.data_segment() + tile = subblock.data(resize=True, order=0) + index = [slice(i - j, i - j + k) for i, j, k in + zip(directory_entry.start, self.reader.start, tile.shape)] + index = tuple([index[self.reader.axes.index(i)] for i in 'XY']) + f[index] = tile.squeeze() + return f + + @staticmethod + def get_index(directory_entry, start): + return [(i - j, i - j + k) for i, j, k in zip(directory_entry.start, start, directory_entry.shape)] + + @cached_property + def timeval(self): + tval = np.unique(list(filter(lambda x: x.attachment_entry.filename.startswith('TimeStamp'), + self.reader.attachments()))[0].data()) + return sorted(tval[tval > 0])[:self.shape['t']] \ No newline at end of file diff --git a/ndbioimage/readers/ndread.py b/ndbioimage/readers/ndread.py new file mode 100644 index 0000000..ab4d46b --- /dev/null +++ b/ndbioimage/readers/ndread.py @@ -0,0 +1,29 @@ +from ndbioimage import Imread +import numpy as np + + +class Reader(Imread): + priority = 20 + + @staticmethod + def _can_open(path): + return isinstance(path, np.ndarray) and 1 <= path.ndim <= 5 + + def __metadata__(self): + self.base = np.array(self.path, ndmin=5) + self.title = self.path = 'numpy array' + self.axes = self.axes[:self.base.ndim] + self.shape = self.base.shape + self.acquisitiondate = 'now' + + def __frame__(self, c, z, t): + xyczt = (slice(None), slice(None), c, z, t) + in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes) + frame = self.base[in_idx] + if self.axes.find('y') < self.axes.find('x'): + return frame.T + else: + return frame + + def __str__(self): + return self.path diff --git a/ndbioimage/readers/seqread.py b/ndbioimage/readers/seqread.py new file mode 100644 index 0000000..63296fc --- /dev/null +++ b/ndbioimage/readers/seqread.py @@ -0,0 +1,85 @@ +from ndbioimage import Imread, XmlData +import os +import tifffile +import yaml +import json +import re + + +class Reader(Imread): + priority = 10 + + @staticmethod + def _can_open(path): + return isinstance(path, str) and os.path.splitext(path)[1] == '' + + def __metadata__(self): + filelist = sorted([file for file in os.listdir(self.path) if re.search(r'^img_\d{3,}.*\d{3,}.*\.tif$', file)]) + + try: + with tifffile.TiffFile(os.path.join(self.path, filelist[0])) as tif: + self.metadata = XmlData({key: yaml.safe_load(value) + for key, value in tif.pages[0].tags[50839].value.items()}) + except Exception: # fallback + with open(os.path.join(self.path, 'metadata.txt'), 'r') as metadatafile: + self.metadata = XmlData(json.loads(metadatafile.read())) + + # compare channel names from metadata with filenames + cnamelist = self.metadata.search('ChNames') + cnamelist = [c for c in cnamelist if any([c in f for f in filelist])] + + self.filedict = {} + maxc = 0 + maxz = 0 + maxt = 0 + for file in filelist: + T = re.search(r'(?<=img_)\d{3,}', file) + Z = re.search(r'\d{3,}(?=\.tif$)', file) + C = file[T.end() + 1:Z.start() - 1] + t = int(T.group(0)) + z = int(Z.group(0)) + if C in cnamelist: + c = cnamelist.index(C) + else: + c = len(cnamelist) + cnamelist.append(C) + + self.filedict[(c, z, t)] = file + if c > maxc: + maxc = c + if z > maxz: + maxz = z + if t > maxt: + maxt = t + self.cnamelist = [str(cname) for cname in cnamelist] + + X = self.metadata.search('Width')[0] + Y = self.metadata.search('Height')[0] + self.shape = (int(X), int(Y), maxc + 1, maxz + 1, maxt + 1) + + self.pxsize = self.metadata.re_search(r'(?i)pixelsize_?um', 0)[0] + if self.zstack: + self.deltaz = self.metadata.re_search(r'(?i)z-step_?um', 0)[0] + if self.timeseries: + self.settimeinterval = self.metadata.re_search(r'(?i)interval_?ms', 0)[0] / 1000 + if 'Hamamatsu' in self.metadata.search('Core-Camera', '')[0]: + self.pxsizecam = 6.5 + self.title = self.metadata.search('Prefix')[0] + self.acquisitiondate = self.metadata.search('Time')[0] + self.exposuretime = [i / 1000 for i in self.metadata.search('Exposure-ms')] + self.objective = self.metadata.search('ZeissObjectiveTurret-Label')[0] + self.optovar = [] + for o in self.metadata.search('ZeissOptovar-Label'): + a = re.search(r'\d?\d*[,.]?\d+(?=x$)', o) + if hasattr(a, 'group'): + self.optovar.append(float(a.group(0).replace(',', '.'))) + if self.pxsize == 0: + self.magnification = int(re.findall(r'(\d+)x', self.objective)[0]) * self.optovar[0] + self.pxsize = self.pxsizecam / self.magnification + else: + self.magnification = self.pxsizecam / self.pxsize + self.pcf = self.shape[2] * self.metadata.re_search(r'(?i)conversion\sfactor\scoeff', 1) + self.filter = self.metadata.search('ZeissReflectorTurret-Label', self.filter)[0] + + def __frame__(self, c=0, z=0, t=0): + return tifffile.imread(os.path.join(self.path, self.filedict[(c, z, t)])) diff --git a/ndbioimage/readers/tifread.py b/ndbioimage/readers/tifread.py new file mode 100644 index 0000000..cfdff88 --- /dev/null +++ b/ndbioimage/readers/tifread.py @@ -0,0 +1,50 @@ +from ndbioimage import Imread, XmlData +import numpy as np +import tifffile +import yaml + + +class Reader(Imread): + priority = 0 + do_not_pickle = 'reader' + + @staticmethod + def _can_open(path): + if isinstance(path, str) and (path.endswith('.tif') or path.endswith('.tiff')): + with tifffile.TiffFile(path) as tif: + return tif.is_imagej + else: + return False + + def open(self): + self.reader = tifffile.TiffFile(self.path) + + def close(self): + self.reader.close() + + def __metadata__(self): + self.metadata = XmlData({key: yaml.safe_load(value) if isinstance(value, str) else value + for key, value in self.reader.imagej_metadata.items()}) + P = self.reader.pages[0] + self.pndim = P.ndim + X = P.imagelength + Y = P.imagewidth + if self.pndim == 3: + C = P.samplesperpixel + self.transpose = [i for i in [P.axes.find(j) for j in 'SYX'] if i >= 0] + T = self.metadata.get('frames', 1) # // C + else: + C = self.metadata.get('channels', 1) + T = self.metadata.get('frames', 1) + Z = self.metadata.get('slices', 1) + self.shape = (X, Y, C, Z, T) + if 282 in P.tags and 296 in P.tags and P.tags[296].value == 1: + f = P.tags[282].value + self.pxsize = f[1] / f[0] + # TODO: more metadata + + def __frame__(self, c, z, t): + if self.pndim == 3: + return np.transpose(self.reader.asarray(z + t * self.shape[3]), self.transpose)[c] + else: + return self.reader.asarray(c + z * self.shape[2] + t * self.shape[2] * self.shape[3]) diff --git a/ndbioimage/transform.txt b/ndbioimage/transform.txt new file mode 100644 index 0000000..a177cc0 --- /dev/null +++ b/ndbioimage/transform.txt @@ -0,0 +1,7 @@ +#Insight Transform File V1.0 +#Transform 0 +Transform: CompositeTransform_double_2_2 +#Transform 1 +Transform: AffineTransform_double_2_2 +Parameters: 1 0 0 1 0 0 +FixedParameters: 255.5 255.5 diff --git a/ndbioimage/transforms.py b/ndbioimage/transforms.py new file mode 100644 index 0000000..2b10c1a --- /dev/null +++ b/ndbioimage/transforms.py @@ -0,0 +1,261 @@ +import yaml +import os +import numpy as np +from copy import deepcopy +from collections import OrderedDict + +try: + # best if SimpleElastix is installed: https://simpleelastix.readthedocs.io/GettingStarted.html + import SimpleITK as sitk + installed = True +except ImportError: + installed = False + +try: + pp = True + from pandas import DataFrame, Series +except ImportError: + pp = False + + +if hasattr(yaml, 'full_load'): + yamlload = yaml.full_load +else: + yamlload = yaml.load + + +class Transforms(OrderedDict): + def __init__(self, *args, **kwargs): + super().__init__(*args[1:], **kwargs) + if len(args): + self.load(args[0]) + + def asdict(self): + return {f'{key[0]:.0f}:{key[1]:.0f}': value.asdict() for key, value in self.items()} + + def load(self, file): + if isinstance(file, dict): + d = file + else: + if not file[-3:] == 'yml': + file += '.yml' + with open(file, 'r') as f: + d = yamlload(f) + for key, value in d.items(): + self[tuple([int(k) for k in key.split(':')])] = Transform(value) + + def __call__(self, channel, time, tracks, detectors): + track, detector = tracks[channel], detectors[channel] + if (track, detector) in self: + return self[track, detector] + elif (0, detector) in self: + return self[0, detector] + else: + return Transform() + + def __reduce__(self): + return self.__class__, (self.asdict(),) + + def save(self, file): + if not file[-3:] == 'yml': + file += '.yml' + with open(file, 'w') as f: + yaml.safe_dump(self.asdict(), f, default_flow_style=None) + + def copy(self): + return deepcopy(self) + + def adapt(self, origin, shape): + for value in self.values(): + value.adapt(origin, shape) + + @property + def inverse(self): + inverse = self.copy() + for key, value in self.items(): + inverse[key] = value.inverse + return inverse + + +class Transform: + def __init__(self, *args): + if not installed: + raise ImportError('SimpleElastix is not installed: https://simpleelastix.readthedocs.io/GettingStarted.html') + self.transform = sitk.ReadTransform(os.path.join(os.path.dirname(__file__), 'transform.txt')) + self.dparameters = (0, 0, 0, 0, 0, 0) + self.shape = (512, 512) + self.origin = (255.5, 255.5) + if len(args) == 1: # load from file or dict + if isinstance(args[0], np.ndarray): + self.matrix = args[0] + else: + self.load(*args) + elif len(args) > 1: # make new transform using fixed and moving image + self.register(*args) + self._last = None + + def __mul__(self, other): # TODO: take care of dmatrix + result = self.copy() + if isinstance(other, Transform): + result.matrix = self.matrix @ other.matrix + result.dmatrix = self.dmatrix @ other.matrix + self.matrix @ other.dmatrix + else: + result.matrix = self.matrix @ other + result.dmatrix = self.dmatrix @ other + return result + + def __reduce__(self): + return self.__class__, (self.asdict(),) + + def is_unity(self): + return self.parameters == [1, 0, 0, 1, 0, 0] + + def copy(self): + return deepcopy(self) + + @staticmethod + def castImage(im): + if not isinstance(im, sitk.Image): + im = sitk.GetImageFromArray(im) + return im + + @staticmethod + def castArray(im): + if isinstance(im, sitk.Image): + im = sitk.GetArrayFromImage(im) + return im + + @property + def matrix(self): + return np.array(((*self.parameters[:2], self.parameters[4]), + (*self.parameters[2:4], self.parameters[5]), + (0, 0, 1))) + + @matrix.setter + def matrix(self, value): + value = np.asarray(value) + self.parameters = [*value[0, :2], *value[1, :2], *value[:2, 2]] + + @property + def dmatrix(self): + return np.array(((*self.dparameters[:2], self.dparameters[4]), + (*self.dparameters[2:4], self.dparameters[5]), + (0, 0, 0))) + + @dmatrix.setter + def dmatrix(self, value): + value = np.asarray(value) + self.dparameters = [*value[0, :2], *value[1, :2], *value[:2, 2]] + + @property + def parameters(self): + return self.transform.GetParameters() + + @parameters.setter + def parameters(self, value): + value = np.asarray(value) + self.transform.SetParameters(value.tolist()) + + @property + def origin(self): + return self.transform.GetFixedParameters() + + @origin.setter + def origin(self, value): + value = np.asarray(value) + self.transform.SetFixedParameters(value.tolist()) + + @property + def inverse(self): + if self._last is None or self._last != self.asdict(): + self._last = self.asdict() + self._inverse = Transform(self.asdict()) + self._inverse.transform = self._inverse.transform.GetInverse() + self._inverse._last = self._inverse.asdict() + self._inverse._inverse = self + return self._inverse + + def adapt(self, origin, shape): + self.origin -= np.array(origin) + (self.shape - np.array(shape)[:2]) / 2 + self.shape = shape[:2] + + def asdict(self): + return {'CenterOfRotationPoint': self.origin, 'Size': self.shape, 'TransformParameters': self.parameters, + 'dTransformParameters': np.nan_to_num(self.dparameters, nan=1e99).tolist()} + + def frame(self, im, default=0): + if self.is_unity(): + return im + else: + dtype = im.dtype + im = im.astype('float') + intp = sitk.sitkBSpline if np.issubdtype(dtype, np.floating) else sitk.sitkNearestNeighbor + return self.castArray(sitk.Resample(self.castImage(im), self.transform, intp, default)).astype(dtype) + + def coords(self, array, columns=None): + """ Transform coordinates in 2 column numpy array, + or in pandas DataFrame or Series objects in columns ['x', 'y'] + """ + if self.is_unity(): + return array.copy() + elif pp and isinstance(array, (DataFrame, Series)): + columns = columns or ['x', 'y'] + array = array.copy() + if isinstance(array, DataFrame): + array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy())) + elif isinstance(array, Series): + array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))[0] + return array + else: # somehow we need to use the inverse here to get the same effect as when using self.frame + return np.array([self.inverse.transform.TransformPoint(i.tolist()) for i in np.asarray(array)]) + + def save(self, file): + """ save the parameters of the transform calculated + with affine_registration to a yaml file + """ + if not file[-3:] == 'yml': + file += '.yml' + with open(file, 'w') as f: + yaml.safe_dump(self.asdict(), f, default_flow_style=None) + + def load(self, file): + """ load the parameters of a transform from a yaml file or a dict + """ + if isinstance(file, dict): + d = file + else: + if not file[-3:] == 'yml': + file += '.yml' + with open(file, 'r') as f: + d = yamlload(f) + self.origin = [float(i) for i in d['CenterOfRotationPoint']] + self.parameters = [float(i) for i in d['TransformParameters']] + self.dparameters = [float(i) for i in d['dTransformParameters']] \ + if 'dTransformParameters' in d else 6 * [np.nan] + self.shape = [float(i) for i in d['Size']] + + def register(self, fix, mov, kind=None): + """ kind: 'affine', 'translation', 'rigid' + """ + kind = kind or 'affine' + self.shape = fix.shape + fix, mov = self.castImage(fix), self.castImage(mov) + # TODO: implement RigidTransform + tfilter = sitk.ElastixImageFilter() + tfilter.LogToConsoleOff() + tfilter.SetFixedImage(fix) + tfilter.SetMovingImage(mov) + tfilter.SetParameterMap(sitk.GetDefaultParameterMap(kind)) + tfilter.Execute() + transform = tfilter.GetTransformParameterMap()[0] + if kind == 'affine': + self.parameters = [float(t) for t in transform['TransformParameters']] + self.shape = [float(t) for t in transform['Size']] + self.origin = [float(t) for t in transform['CenterOfRotationPoint']] + elif kind == 'translation': + self.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform['TransformParameters']] + self.shape = [float(t) for t in transform['Size']] + self.origin = [(t - 1) / 2 for t in self.shape] + else: + raise NotImplementedError(f'{kind} tranforms not implemented (yet)') + self.dparameters = 6 * [np.nan] diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..0a92eb7 --- /dev/null +++ b/setup.py @@ -0,0 +1,48 @@ +import setuptools +import platform +import os + +version = '2022.7.0' + +if platform.system().lower() == 'linux': + import pkg_resources + pkg_resources.require(['pip >= 20.3']) + +with open('README.md', 'r') as fh: + long_description = fh.read() + + +with open(os.path.join(os.path.dirname(__file__), 'ndbioimage', '_version.py'), 'w') as f: + f.write(f"__version__ = '{version}'\n") + try: + with open(os.path.join(os.path.dirname(__file__), '.git', 'refs', 'heads', 'master')) as h: + f.write("__git_commit_hash__ = '{}'\n".format(h.read().rstrip('\n'))) + except Exception: + f.write(f"__git_commit_hash__ = 'unknown'\n") + + +setuptools.setup( + name='ndbioimage', + version=version, + author='Wim Pomp', + author_email='w.pomp@nki.nl', + description='Bio image reading, metadata and some affine registration.', + long_description=long_description, + long_description_content_type='text/markdown', + url='https://github.com/wimpomp/ndbioimage', + packages=['ndbioimage', 'ndbioimage.readers'], + classifiers=[ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', + 'Operating System :: OS Independent', + ], + python_requires='>=3.7', + install_requires=['untangle', 'pandas', 'psutil', 'numpy', 'tqdm', 'tifffile', 'czifile', 'pyyaml', 'dill', + 'tiffwrite'], + extras_require={'transforms': 'SimpleITK-SimpleElastix', + 'bioformats': ['python-javabridge', 'python-bioformats']}, + tests_require=['pytest-xdist'], + entry_points={'console_scripts': ['ndbioimage=ndbioimage.ndbioimage:main']}, + package_data={'': ['transform.txt']}, + include_package_data=True, +)