- First commit
This commit is contained in:
82
README.md
Normal file
82
README.md
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# ndbioimage
|
||||||
|
|
||||||
|
Exposes (bio) images as a numpy ndarray like object, but without loading the whole
|
||||||
|
image into memory, reading from the file only when needed. Some metadata is read
|
||||||
|
and exposed as attributes to the Imread object (TODO: structure data in OME format).
|
||||||
|
Additionally, it can automatically calculate an affine transform that corrects for
|
||||||
|
chromatic abberrations etc. and apply it on the fly to the image.
|
||||||
|
|
||||||
|
Currently supports imagej tif files, czi files, micromanager tif sequences and anything
|
||||||
|
bioformats can handle.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
pip install ndbioimage@git+https://github.com/wimpomp/ndbioimage.git
|
||||||
|
|
||||||
|
### With bioformats (if java is properly installed)
|
||||||
|
|
||||||
|
pip install ndbioimage[bioformats]@git+https://github.com/wimpomp/ndbioimage.git
|
||||||
|
|
||||||
|
### With affine transforms (only for python 3.8, 3.9 and 3.10)
|
||||||
|
|
||||||
|
pip install ndbioimage[transforms]@git+https://github.com/wimpomp/ndbioimage.git
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
- Reading an image file and plotting the frame at channel=2, time=1
|
||||||
|
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from ndbioimage import imread
|
||||||
|
with imread('image_file.tif', axes='ctxy', dtype=int) as im:
|
||||||
|
plt.imshow(im[2, 1])
|
||||||
|
|
||||||
|
- Showing some image metadata
|
||||||
|
|
||||||
|
|
||||||
|
from ndbioimage import imread
|
||||||
|
from pprint import pprint
|
||||||
|
with imread('image_file.tif') as im:
|
||||||
|
pprint(im)
|
||||||
|
|
||||||
|
- Slicing the image without loading the image into memory
|
||||||
|
|
||||||
|
|
||||||
|
from ndbioimage import imread
|
||||||
|
with imread('image_file.tif', axes='cztxy') as im:
|
||||||
|
sliced_im = im[1, :, :, 100:200, 100:200]
|
||||||
|
|
||||||
|
sliced_im is an instance of imread which will load any image data from file only when needed
|
||||||
|
|
||||||
|
|
||||||
|
- Converting (part) of the image to a numpy ndarray
|
||||||
|
|
||||||
|
|
||||||
|
from ndbioimage import imread
|
||||||
|
import numpy as np
|
||||||
|
with imread('image_file.tif', axes='cztxy') as im:
|
||||||
|
array = np.asarray(im[0, 0])
|
||||||
|
|
||||||
|
## Adding more formats
|
||||||
|
Readers for image formats subclass Imread. When an image reader is imported, Imread will
|
||||||
|
automatically recognize it and use it to open the appropriate file format. Image readers
|
||||||
|
subclass Imread and are required to implement the following methods:
|
||||||
|
|
||||||
|
- staticmethod _can_open(path): return True if path can be opened by this reader
|
||||||
|
- \_\_metadata__(self): reads metadata from file and adds them to self as attributes,
|
||||||
|
- the shape of the data in the file needs to be set as self.shape = (X, Y, C, Z, T)
|
||||||
|
- other attributes like pxsize, acquisitiontime and title can be set here as well
|
||||||
|
- \_\_frame__(self, c, z, t): return the frame at channel=c, z-slice=z, time=t from the file
|
||||||
|
|
||||||
|
Optional methods:
|
||||||
|
- open(self): maybe open some file
|
||||||
|
- close(self): close any file handles
|
||||||
|
|
||||||
|
Optional fields:
|
||||||
|
- priority (int): Imread will try readers with a lower number first, default: 99
|
||||||
|
- do_not_pickle (strings): any attributes that should not be included when the object is pickled,
|
||||||
|
for example: any file handles
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
- structure the metadata in OME format tree
|
||||||
|
- re-implement transforms
|
||||||
8
ndbioimage/.gitignore
vendored
Normal file
8
ndbioimage/.gitignore
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
._*
|
||||||
|
*.pyc
|
||||||
|
/build/
|
||||||
|
*.egg-info
|
||||||
|
/venv/
|
||||||
|
.idea
|
||||||
|
/.pytest_cache/
|
||||||
|
/ndbioimage/_version.py
|
||||||
1483
ndbioimage/__init__.py
Executable file
1483
ndbioimage/__init__.py
Executable file
File diff suppressed because it is too large
Load Diff
2
ndbioimage/_version.py
Normal file
2
ndbioimage/_version.py
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
__version__ = '2022.7.0'
|
||||||
|
__git_commit_hash__ = 'unknown'
|
||||||
38
ndbioimage/jvm.py
Normal file
38
ndbioimage/jvm.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
try:
|
||||||
|
import javabridge
|
||||||
|
import bioformats
|
||||||
|
|
||||||
|
class JVM:
|
||||||
|
""" There can be only one java virtual machine per python process,
|
||||||
|
so this is a singleton class to manage the jvm.
|
||||||
|
"""
|
||||||
|
_instance = None
|
||||||
|
vm_started = False
|
||||||
|
vm_killed = False
|
||||||
|
|
||||||
|
def __new__(cls, *args):
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = object.__new__(cls)
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
def start_vm(self):
|
||||||
|
if not self.vm_started and not self.vm_killed:
|
||||||
|
javabridge.start_vm(class_path=bioformats.JARS, run_headless=True)
|
||||||
|
outputstream = javabridge.make_instance('java/io/ByteArrayOutputStream', "()V")
|
||||||
|
printstream = javabridge.make_instance('java/io/PrintStream', "(Ljava/io/OutputStream;)V", outputstream)
|
||||||
|
javabridge.static_call('Ljava/lang/System;', "setOut", "(Ljava/io/PrintStream;)V", printstream)
|
||||||
|
javabridge.static_call('Ljava/lang/System;', "setErr", "(Ljava/io/PrintStream;)V", printstream)
|
||||||
|
self.vm_started = True
|
||||||
|
log4j = javabridge.JClassWrapper("loci.common.Log4jTools")
|
||||||
|
log4j.enableLogging()
|
||||||
|
log4j.setRootLevel("ERROR")
|
||||||
|
|
||||||
|
if self.vm_killed:
|
||||||
|
raise Exception('The JVM was killed before, and cannot be restarted in this Python process.')
|
||||||
|
|
||||||
|
def kill_vm(self):
|
||||||
|
javabridge.kill_vm()
|
||||||
|
self.vm_started = False
|
||||||
|
self.vm_killed = True
|
||||||
|
except ImportError:
|
||||||
|
JVM = None
|
||||||
3
ndbioimage/readers/__init__.py
Normal file
3
ndbioimage/readers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
import os
|
||||||
|
__all__ = [os.path.splitext(os.path.basename(file))[0] for file in os.listdir(os.path.dirname(__file__))
|
||||||
|
if file.endswith('.py') and not file == os.path.basename(__file__)]
|
||||||
89
ndbioimage/readers/bfread.py
Normal file
89
ndbioimage/readers/bfread.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
from ndbioimage import Imread, XmlData, JVM
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
import untangle
|
||||||
|
|
||||||
|
if JVM is not None:
|
||||||
|
import bioformats
|
||||||
|
|
||||||
|
class Reader(Imread):
|
||||||
|
""" This class is used as a last resort, when we don't have another way to open the file. We don't like it
|
||||||
|
because it requires the java vm.
|
||||||
|
"""
|
||||||
|
priority = 99 # panic and open with BioFormats
|
||||||
|
do_not_pickle = 'reader', 'key', 'jvm'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _can_open(path):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def open(self):
|
||||||
|
self.jvm = JVM()
|
||||||
|
self.jvm.start_vm()
|
||||||
|
self.key = np.random.randint(1e9)
|
||||||
|
self.reader = bioformats.get_image_reader(self.key, self.path)
|
||||||
|
|
||||||
|
def __metadata__(self):
|
||||||
|
s = self.reader.rdr.getSeriesCount()
|
||||||
|
if self.series >= s:
|
||||||
|
print('Series {} does not exist.'.format(self.series))
|
||||||
|
self.reader.rdr.setSeries(self.series)
|
||||||
|
|
||||||
|
X = self.reader.rdr.getSizeX()
|
||||||
|
Y = self.reader.rdr.getSizeY()
|
||||||
|
C = self.reader.rdr.getSizeC()
|
||||||
|
Z = self.reader.rdr.getSizeZ()
|
||||||
|
T = self.reader.rdr.getSizeT()
|
||||||
|
self.shape = (X, Y, C, Z, T)
|
||||||
|
|
||||||
|
omexml = bioformats.get_omexml_metadata(self.path)
|
||||||
|
self.metadata = XmlData(untangle.parse(omexml))
|
||||||
|
|
||||||
|
image = list(self.metadata.search_all('Image').values())
|
||||||
|
if len(image) and self.series in image[0]:
|
||||||
|
image = XmlData(image[0][self.series])
|
||||||
|
else:
|
||||||
|
image = self.metadata
|
||||||
|
|
||||||
|
unit = lambda u: 10 ** {'nm': 9, 'µm': 6, 'um': 6, 'mm': 3, 'm': 0}[u]
|
||||||
|
|
||||||
|
pxsizeunit = image.search('PhysicalSizeXUnit')[0]
|
||||||
|
pxsize = image.search('PhysicalSizeX')[0]
|
||||||
|
if pxsize is not None:
|
||||||
|
self.pxsize = pxsize / unit(pxsizeunit) * 1e6
|
||||||
|
|
||||||
|
if self.zstack:
|
||||||
|
deltazunit = image.search('PhysicalSizeZUnit')[0]
|
||||||
|
deltaz = image.search('PhysicalSizeZ')[0]
|
||||||
|
if deltaz is not None:
|
||||||
|
self.deltaz = deltaz / unit(deltazunit) * 1e6
|
||||||
|
|
||||||
|
if self.path.endswith('.lif'):
|
||||||
|
self.title = os.path.splitext(os.path.basename(self.path))[0]
|
||||||
|
self.exposuretime = self.metadata.re_search(r'WideFieldChannelInfo\|ExposureTime', self.exposuretime)
|
||||||
|
if self.timeseries:
|
||||||
|
self.settimeinterval = \
|
||||||
|
self.metadata.re_search(r'ATLCameraSettingDefinition\|CycleTime', self.settimeinterval * 1e3)[
|
||||||
|
0] / 1000
|
||||||
|
if not self.settimeinterval:
|
||||||
|
self.settimeinterval = self.exposuretime[0]
|
||||||
|
self.pxsizecam = self.metadata.re_search(r'ATLCameraSettingDefinition\|TheoCamSensorPixelSizeX',
|
||||||
|
self.pxsizecam)
|
||||||
|
self.objective = self.metadata.re_search(r'ATLCameraSettingDefinition\|ObjectiveName', 'none')[0]
|
||||||
|
self.magnification = \
|
||||||
|
self.metadata.re_search(r'ATLCameraSettingDefinition\|Magnification', self.magnification)[0]
|
||||||
|
elif self.path.endswith('.ims'):
|
||||||
|
self.magnification = self.metadata.search('LensPower', 100)[0]
|
||||||
|
self.NA = self.metadata.search('NumericalAperture', 1.47)[0]
|
||||||
|
self.title = self.metadata.search('Name', self.title)
|
||||||
|
self.binning = self.metadata.search('BinningX', 1)[0]
|
||||||
|
|
||||||
|
def __frame__(self, *args):
|
||||||
|
frame = self.reader.read(*args, rescale=False).astype('float')
|
||||||
|
if frame.ndim == 3:
|
||||||
|
return frame[..., args[0]]
|
||||||
|
else:
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
bioformats.release_image_reader(self.key)
|
||||||
115
ndbioimage/readers/cziread.py
Normal file
115
ndbioimage/readers/cziread.py
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
from ndbioimage import Imread, XmlData, tolist
|
||||||
|
import czifile
|
||||||
|
import untangle
|
||||||
|
import numpy as np
|
||||||
|
import re
|
||||||
|
from functools import cached_property
|
||||||
|
|
||||||
|
|
||||||
|
class Reader(Imread):
|
||||||
|
priority = 0
|
||||||
|
do_not_pickle = 'reader', 'filedict'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _can_open(path):
|
||||||
|
return isinstance(path, str) and path.endswith('.czi')
|
||||||
|
|
||||||
|
def open(self):
|
||||||
|
self.reader = czifile.CziFile(self.path)
|
||||||
|
filedict = {}
|
||||||
|
for directory_entry in self.reader.filtered_subblock_directory:
|
||||||
|
idx = self.get_index(directory_entry, self.reader.start)
|
||||||
|
for c in range(*idx[self.reader.axes.index('C')]):
|
||||||
|
for z in range(*idx[self.reader.axes.index('Z')]):
|
||||||
|
for t in range(*idx[self.reader.axes.index('T')]):
|
||||||
|
if (c, z, t) in filedict:
|
||||||
|
filedict[(c, z, t)].append(directory_entry)
|
||||||
|
else:
|
||||||
|
filedict[(c, z, t)] = [directory_entry]
|
||||||
|
self.filedict = filedict
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.reader.close()
|
||||||
|
|
||||||
|
def __metadata__(self):
|
||||||
|
# TODO: make sure frame function still works when a subblock has data from more than one frame
|
||||||
|
self.shape = tuple([self.reader.shape[self.reader.axes.index(directory_entry)] for directory_entry in 'XYCZT'])
|
||||||
|
self.metadata = XmlData(untangle.parse(self.reader.metadata()))
|
||||||
|
|
||||||
|
image = [i for i in self.metadata.search_all('Image').values() if i]
|
||||||
|
if len(image) and self.series in image[0]:
|
||||||
|
image = XmlData(image[0][self.series])
|
||||||
|
else:
|
||||||
|
image = self.metadata
|
||||||
|
|
||||||
|
pxsize = image.search('ScalingX')[0]
|
||||||
|
if pxsize is not None:
|
||||||
|
self.pxsize = pxsize * 1e6
|
||||||
|
if self.zstack:
|
||||||
|
deltaz = image.search('ScalingZ')[0]
|
||||||
|
if deltaz is not None:
|
||||||
|
self.deltaz = deltaz * 1e6
|
||||||
|
|
||||||
|
self.title = self.metadata.re_search(('Information', 'Document', 'Name'), self.title)[0]
|
||||||
|
self.acquisitiondate = self.metadata.re_search(('Information', 'Document', 'CreationDate'),
|
||||||
|
self.acquisitiondate)[0]
|
||||||
|
self.exposuretime = self.metadata.re_search(('TrackSetup', 'CameraIntegrationTime'), self.exposuretime)
|
||||||
|
if self.timeseries:
|
||||||
|
self.settimeinterval = self.metadata.re_search(('Interval', 'TimeSpan', 'Value'),
|
||||||
|
self.settimeinterval * 1e3)[0] / 1000
|
||||||
|
if not self.settimeinterval:
|
||||||
|
self.settimeinterval = self.exposuretime[0]
|
||||||
|
self.pxsizecam = self.metadata.re_search(('AcquisitionModeSetup', 'PixelPeriod'), self.pxsizecam)
|
||||||
|
self.magnification = self.metadata.re_search('NominalMagnification', self.magnification)[0]
|
||||||
|
attenuators = self.metadata.search_all('Attenuator')
|
||||||
|
self.laserwavelengths = [[1e9 * float(i['Wavelength']) for i in tolist(attenuator)]
|
||||||
|
for attenuator in attenuators.values()]
|
||||||
|
self.laserpowers = [[float(i['Transmission']) for i in tolist(attenuator)]
|
||||||
|
for attenuator in attenuators.values()]
|
||||||
|
self.collimator = self.metadata.re_search(('Collimator', 'Position'))
|
||||||
|
detector = self.metadata.search(('Instrument', 'Detector'))
|
||||||
|
self.gain = [int(i.get('AmplificationGain', 1)) for i in detector]
|
||||||
|
self.powermode = self.metadata.re_search(('TrackSetup', 'FWFOVPosition'))[0]
|
||||||
|
optovar = self.metadata.re_search(('TrackSetup', 'TubeLensPosition'), '1x')
|
||||||
|
self.optovar = []
|
||||||
|
for o in optovar:
|
||||||
|
a = re.search(r'\d?\d*[,.]?\d+(?=x$)', o)
|
||||||
|
if hasattr(a, 'group'):
|
||||||
|
self.optovar.append(float(a.group(0).replace(',', '.')))
|
||||||
|
self.pcf = [2 ** self.metadata.re_search(('Image', 'ComponentBitCount'), 14)[0] / float(i)
|
||||||
|
for i in self.metadata.re_search(('Channel', 'PhotonConversionFactor'), 1)]
|
||||||
|
self.binning = self.metadata.re_search(('AcquisitionModeSetup', 'CameraBinning'), 1)[0]
|
||||||
|
self.objective = self.metadata.re_search(('AcquisitionModeSetup', 'Objective'))[0]
|
||||||
|
self.NA = self.metadata.re_search(('Instrument', 'Objective', 'LensNA'))[0]
|
||||||
|
self.filter = self.metadata.re_search(('TrackSetup', 'BeamSplitter', 'Filter'))[0]
|
||||||
|
self.tirfangle = [50 * i for i in self.metadata.re_search(('TrackSetup', 'TirfAngle'), 0)]
|
||||||
|
self.frameoffset = [self.metadata.re_search(('AcquisitionModeSetup', 'CameraFrameOffsetX'))[0],
|
||||||
|
self.metadata.re_search(('AcquisitionModeSetup', 'CameraFrameOffsetY'))[0]]
|
||||||
|
self.cnamelist = [c['DetectorSettings']['Detector']['Id'] for c in
|
||||||
|
self.metadata['ImageDocument']['Metadata']['Information']['Image'].search('Channel')]
|
||||||
|
try:
|
||||||
|
self.track, self.detector = zip(*[[int(i) for i in re.findall(r'\d', c)] for c in self.cnamelist])
|
||||||
|
except ValueError:
|
||||||
|
self.track = tuple(range(len(self.cnamelist)))
|
||||||
|
self.detector = (0,) * len(self.cnamelist)
|
||||||
|
|
||||||
|
def __frame__(self, c=0, z=0, t=0):
|
||||||
|
f = np.zeros(self.file_shape[:2], self.dtype)
|
||||||
|
for directory_entry in self.filedict[(c, z, t)]:
|
||||||
|
subblock = directory_entry.data_segment()
|
||||||
|
tile = subblock.data(resize=True, order=0)
|
||||||
|
index = [slice(i - j, i - j + k) for i, j, k in
|
||||||
|
zip(directory_entry.start, self.reader.start, tile.shape)]
|
||||||
|
index = tuple([index[self.reader.axes.index(i)] for i in 'XY'])
|
||||||
|
f[index] = tile.squeeze()
|
||||||
|
return f
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_index(directory_entry, start):
|
||||||
|
return [(i - j, i - j + k) for i, j, k in zip(directory_entry.start, start, directory_entry.shape)]
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def timeval(self):
|
||||||
|
tval = np.unique(list(filter(lambda x: x.attachment_entry.filename.startswith('TimeStamp'),
|
||||||
|
self.reader.attachments()))[0].data())
|
||||||
|
return sorted(tval[tval > 0])[:self.shape['t']]
|
||||||
29
ndbioimage/readers/ndread.py
Normal file
29
ndbioimage/readers/ndread.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
from ndbioimage import Imread
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class Reader(Imread):
|
||||||
|
priority = 20
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _can_open(path):
|
||||||
|
return isinstance(path, np.ndarray) and 1 <= path.ndim <= 5
|
||||||
|
|
||||||
|
def __metadata__(self):
|
||||||
|
self.base = np.array(self.path, ndmin=5)
|
||||||
|
self.title = self.path = 'numpy array'
|
||||||
|
self.axes = self.axes[:self.base.ndim]
|
||||||
|
self.shape = self.base.shape
|
||||||
|
self.acquisitiondate = 'now'
|
||||||
|
|
||||||
|
def __frame__(self, c, z, t):
|
||||||
|
xyczt = (slice(None), slice(None), c, z, t)
|
||||||
|
in_idx = tuple(xyczt['xyczt'.find(i)] for i in self.axes)
|
||||||
|
frame = self.base[in_idx]
|
||||||
|
if self.axes.find('y') < self.axes.find('x'):
|
||||||
|
return frame.T
|
||||||
|
else:
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.path
|
||||||
85
ndbioimage/readers/seqread.py
Normal file
85
ndbioimage/readers/seqread.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
from ndbioimage import Imread, XmlData
|
||||||
|
import os
|
||||||
|
import tifffile
|
||||||
|
import yaml
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class Reader(Imread):
|
||||||
|
priority = 10
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _can_open(path):
|
||||||
|
return isinstance(path, str) and os.path.splitext(path)[1] == ''
|
||||||
|
|
||||||
|
def __metadata__(self):
|
||||||
|
filelist = sorted([file for file in os.listdir(self.path) if re.search(r'^img_\d{3,}.*\d{3,}.*\.tif$', file)])
|
||||||
|
|
||||||
|
try:
|
||||||
|
with tifffile.TiffFile(os.path.join(self.path, filelist[0])) as tif:
|
||||||
|
self.metadata = XmlData({key: yaml.safe_load(value)
|
||||||
|
for key, value in tif.pages[0].tags[50839].value.items()})
|
||||||
|
except Exception: # fallback
|
||||||
|
with open(os.path.join(self.path, 'metadata.txt'), 'r') as metadatafile:
|
||||||
|
self.metadata = XmlData(json.loads(metadatafile.read()))
|
||||||
|
|
||||||
|
# compare channel names from metadata with filenames
|
||||||
|
cnamelist = self.metadata.search('ChNames')
|
||||||
|
cnamelist = [c for c in cnamelist if any([c in f for f in filelist])]
|
||||||
|
|
||||||
|
self.filedict = {}
|
||||||
|
maxc = 0
|
||||||
|
maxz = 0
|
||||||
|
maxt = 0
|
||||||
|
for file in filelist:
|
||||||
|
T = re.search(r'(?<=img_)\d{3,}', file)
|
||||||
|
Z = re.search(r'\d{3,}(?=\.tif$)', file)
|
||||||
|
C = file[T.end() + 1:Z.start() - 1]
|
||||||
|
t = int(T.group(0))
|
||||||
|
z = int(Z.group(0))
|
||||||
|
if C in cnamelist:
|
||||||
|
c = cnamelist.index(C)
|
||||||
|
else:
|
||||||
|
c = len(cnamelist)
|
||||||
|
cnamelist.append(C)
|
||||||
|
|
||||||
|
self.filedict[(c, z, t)] = file
|
||||||
|
if c > maxc:
|
||||||
|
maxc = c
|
||||||
|
if z > maxz:
|
||||||
|
maxz = z
|
||||||
|
if t > maxt:
|
||||||
|
maxt = t
|
||||||
|
self.cnamelist = [str(cname) for cname in cnamelist]
|
||||||
|
|
||||||
|
X = self.metadata.search('Width')[0]
|
||||||
|
Y = self.metadata.search('Height')[0]
|
||||||
|
self.shape = (int(X), int(Y), maxc + 1, maxz + 1, maxt + 1)
|
||||||
|
|
||||||
|
self.pxsize = self.metadata.re_search(r'(?i)pixelsize_?um', 0)[0]
|
||||||
|
if self.zstack:
|
||||||
|
self.deltaz = self.metadata.re_search(r'(?i)z-step_?um', 0)[0]
|
||||||
|
if self.timeseries:
|
||||||
|
self.settimeinterval = self.metadata.re_search(r'(?i)interval_?ms', 0)[0] / 1000
|
||||||
|
if 'Hamamatsu' in self.metadata.search('Core-Camera', '')[0]:
|
||||||
|
self.pxsizecam = 6.5
|
||||||
|
self.title = self.metadata.search('Prefix')[0]
|
||||||
|
self.acquisitiondate = self.metadata.search('Time')[0]
|
||||||
|
self.exposuretime = [i / 1000 for i in self.metadata.search('Exposure-ms')]
|
||||||
|
self.objective = self.metadata.search('ZeissObjectiveTurret-Label')[0]
|
||||||
|
self.optovar = []
|
||||||
|
for o in self.metadata.search('ZeissOptovar-Label'):
|
||||||
|
a = re.search(r'\d?\d*[,.]?\d+(?=x$)', o)
|
||||||
|
if hasattr(a, 'group'):
|
||||||
|
self.optovar.append(float(a.group(0).replace(',', '.')))
|
||||||
|
if self.pxsize == 0:
|
||||||
|
self.magnification = int(re.findall(r'(\d+)x', self.objective)[0]) * self.optovar[0]
|
||||||
|
self.pxsize = self.pxsizecam / self.magnification
|
||||||
|
else:
|
||||||
|
self.magnification = self.pxsizecam / self.pxsize
|
||||||
|
self.pcf = self.shape[2] * self.metadata.re_search(r'(?i)conversion\sfactor\scoeff', 1)
|
||||||
|
self.filter = self.metadata.search('ZeissReflectorTurret-Label', self.filter)[0]
|
||||||
|
|
||||||
|
def __frame__(self, c=0, z=0, t=0):
|
||||||
|
return tifffile.imread(os.path.join(self.path, self.filedict[(c, z, t)]))
|
||||||
50
ndbioimage/readers/tifread.py
Normal file
50
ndbioimage/readers/tifread.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
from ndbioimage import Imread, XmlData
|
||||||
|
import numpy as np
|
||||||
|
import tifffile
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
class Reader(Imread):
|
||||||
|
priority = 0
|
||||||
|
do_not_pickle = 'reader'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _can_open(path):
|
||||||
|
if isinstance(path, str) and (path.endswith('.tif') or path.endswith('.tiff')):
|
||||||
|
with tifffile.TiffFile(path) as tif:
|
||||||
|
return tif.is_imagej
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def open(self):
|
||||||
|
self.reader = tifffile.TiffFile(self.path)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.reader.close()
|
||||||
|
|
||||||
|
def __metadata__(self):
|
||||||
|
self.metadata = XmlData({key: yaml.safe_load(value) if isinstance(value, str) else value
|
||||||
|
for key, value in self.reader.imagej_metadata.items()})
|
||||||
|
P = self.reader.pages[0]
|
||||||
|
self.pndim = P.ndim
|
||||||
|
X = P.imagelength
|
||||||
|
Y = P.imagewidth
|
||||||
|
if self.pndim == 3:
|
||||||
|
C = P.samplesperpixel
|
||||||
|
self.transpose = [i for i in [P.axes.find(j) for j in 'SYX'] if i >= 0]
|
||||||
|
T = self.metadata.get('frames', 1) # // C
|
||||||
|
else:
|
||||||
|
C = self.metadata.get('channels', 1)
|
||||||
|
T = self.metadata.get('frames', 1)
|
||||||
|
Z = self.metadata.get('slices', 1)
|
||||||
|
self.shape = (X, Y, C, Z, T)
|
||||||
|
if 282 in P.tags and 296 in P.tags and P.tags[296].value == 1:
|
||||||
|
f = P.tags[282].value
|
||||||
|
self.pxsize = f[1] / f[0]
|
||||||
|
# TODO: more metadata
|
||||||
|
|
||||||
|
def __frame__(self, c, z, t):
|
||||||
|
if self.pndim == 3:
|
||||||
|
return np.transpose(self.reader.asarray(z + t * self.shape[3]), self.transpose)[c]
|
||||||
|
else:
|
||||||
|
return self.reader.asarray(c + z * self.shape[2] + t * self.shape[2] * self.shape[3])
|
||||||
7
ndbioimage/transform.txt
Normal file
7
ndbioimage/transform.txt
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#Insight Transform File V1.0
|
||||||
|
#Transform 0
|
||||||
|
Transform: CompositeTransform_double_2_2
|
||||||
|
#Transform 1
|
||||||
|
Transform: AffineTransform_double_2_2
|
||||||
|
Parameters: 1 0 0 1 0 0
|
||||||
|
FixedParameters: 255.5 255.5
|
||||||
261
ndbioimage/transforms.py
Normal file
261
ndbioimage/transforms.py
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
import yaml
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
from copy import deepcopy
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
try:
|
||||||
|
# best if SimpleElastix is installed: https://simpleelastix.readthedocs.io/GettingStarted.html
|
||||||
|
import SimpleITK as sitk
|
||||||
|
installed = True
|
||||||
|
except ImportError:
|
||||||
|
installed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
pp = True
|
||||||
|
from pandas import DataFrame, Series
|
||||||
|
except ImportError:
|
||||||
|
pp = False
|
||||||
|
|
||||||
|
|
||||||
|
if hasattr(yaml, 'full_load'):
|
||||||
|
yamlload = yaml.full_load
|
||||||
|
else:
|
||||||
|
yamlload = yaml.load
|
||||||
|
|
||||||
|
|
||||||
|
class Transforms(OrderedDict):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args[1:], **kwargs)
|
||||||
|
if len(args):
|
||||||
|
self.load(args[0])
|
||||||
|
|
||||||
|
def asdict(self):
|
||||||
|
return {f'{key[0]:.0f}:{key[1]:.0f}': value.asdict() for key, value in self.items()}
|
||||||
|
|
||||||
|
def load(self, file):
|
||||||
|
if isinstance(file, dict):
|
||||||
|
d = file
|
||||||
|
else:
|
||||||
|
if not file[-3:] == 'yml':
|
||||||
|
file += '.yml'
|
||||||
|
with open(file, 'r') as f:
|
||||||
|
d = yamlload(f)
|
||||||
|
for key, value in d.items():
|
||||||
|
self[tuple([int(k) for k in key.split(':')])] = Transform(value)
|
||||||
|
|
||||||
|
def __call__(self, channel, time, tracks, detectors):
|
||||||
|
track, detector = tracks[channel], detectors[channel]
|
||||||
|
if (track, detector) in self:
|
||||||
|
return self[track, detector]
|
||||||
|
elif (0, detector) in self:
|
||||||
|
return self[0, detector]
|
||||||
|
else:
|
||||||
|
return Transform()
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return self.__class__, (self.asdict(),)
|
||||||
|
|
||||||
|
def save(self, file):
|
||||||
|
if not file[-3:] == 'yml':
|
||||||
|
file += '.yml'
|
||||||
|
with open(file, 'w') as f:
|
||||||
|
yaml.safe_dump(self.asdict(), f, default_flow_style=None)
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return deepcopy(self)
|
||||||
|
|
||||||
|
def adapt(self, origin, shape):
|
||||||
|
for value in self.values():
|
||||||
|
value.adapt(origin, shape)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def inverse(self):
|
||||||
|
inverse = self.copy()
|
||||||
|
for key, value in self.items():
|
||||||
|
inverse[key] = value.inverse
|
||||||
|
return inverse
|
||||||
|
|
||||||
|
|
||||||
|
class Transform:
|
||||||
|
def __init__(self, *args):
|
||||||
|
if not installed:
|
||||||
|
raise ImportError('SimpleElastix is not installed: https://simpleelastix.readthedocs.io/GettingStarted.html')
|
||||||
|
self.transform = sitk.ReadTransform(os.path.join(os.path.dirname(__file__), 'transform.txt'))
|
||||||
|
self.dparameters = (0, 0, 0, 0, 0, 0)
|
||||||
|
self.shape = (512, 512)
|
||||||
|
self.origin = (255.5, 255.5)
|
||||||
|
if len(args) == 1: # load from file or dict
|
||||||
|
if isinstance(args[0], np.ndarray):
|
||||||
|
self.matrix = args[0]
|
||||||
|
else:
|
||||||
|
self.load(*args)
|
||||||
|
elif len(args) > 1: # make new transform using fixed and moving image
|
||||||
|
self.register(*args)
|
||||||
|
self._last = None
|
||||||
|
|
||||||
|
def __mul__(self, other): # TODO: take care of dmatrix
|
||||||
|
result = self.copy()
|
||||||
|
if isinstance(other, Transform):
|
||||||
|
result.matrix = self.matrix @ other.matrix
|
||||||
|
result.dmatrix = self.dmatrix @ other.matrix + self.matrix @ other.dmatrix
|
||||||
|
else:
|
||||||
|
result.matrix = self.matrix @ other
|
||||||
|
result.dmatrix = self.dmatrix @ other
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return self.__class__, (self.asdict(),)
|
||||||
|
|
||||||
|
def is_unity(self):
|
||||||
|
return self.parameters == [1, 0, 0, 1, 0, 0]
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return deepcopy(self)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def castImage(im):
|
||||||
|
if not isinstance(im, sitk.Image):
|
||||||
|
im = sitk.GetImageFromArray(im)
|
||||||
|
return im
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def castArray(im):
|
||||||
|
if isinstance(im, sitk.Image):
|
||||||
|
im = sitk.GetArrayFromImage(im)
|
||||||
|
return im
|
||||||
|
|
||||||
|
@property
|
||||||
|
def matrix(self):
|
||||||
|
return np.array(((*self.parameters[:2], self.parameters[4]),
|
||||||
|
(*self.parameters[2:4], self.parameters[5]),
|
||||||
|
(0, 0, 1)))
|
||||||
|
|
||||||
|
@matrix.setter
|
||||||
|
def matrix(self, value):
|
||||||
|
value = np.asarray(value)
|
||||||
|
self.parameters = [*value[0, :2], *value[1, :2], *value[:2, 2]]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dmatrix(self):
|
||||||
|
return np.array(((*self.dparameters[:2], self.dparameters[4]),
|
||||||
|
(*self.dparameters[2:4], self.dparameters[5]),
|
||||||
|
(0, 0, 0)))
|
||||||
|
|
||||||
|
@dmatrix.setter
|
||||||
|
def dmatrix(self, value):
|
||||||
|
value = np.asarray(value)
|
||||||
|
self.dparameters = [*value[0, :2], *value[1, :2], *value[:2, 2]]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def parameters(self):
|
||||||
|
return self.transform.GetParameters()
|
||||||
|
|
||||||
|
@parameters.setter
|
||||||
|
def parameters(self, value):
|
||||||
|
value = np.asarray(value)
|
||||||
|
self.transform.SetParameters(value.tolist())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def origin(self):
|
||||||
|
return self.transform.GetFixedParameters()
|
||||||
|
|
||||||
|
@origin.setter
|
||||||
|
def origin(self, value):
|
||||||
|
value = np.asarray(value)
|
||||||
|
self.transform.SetFixedParameters(value.tolist())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def inverse(self):
|
||||||
|
if self._last is None or self._last != self.asdict():
|
||||||
|
self._last = self.asdict()
|
||||||
|
self._inverse = Transform(self.asdict())
|
||||||
|
self._inverse.transform = self._inverse.transform.GetInverse()
|
||||||
|
self._inverse._last = self._inverse.asdict()
|
||||||
|
self._inverse._inverse = self
|
||||||
|
return self._inverse
|
||||||
|
|
||||||
|
def adapt(self, origin, shape):
|
||||||
|
self.origin -= np.array(origin) + (self.shape - np.array(shape)[:2]) / 2
|
||||||
|
self.shape = shape[:2]
|
||||||
|
|
||||||
|
def asdict(self):
|
||||||
|
return {'CenterOfRotationPoint': self.origin, 'Size': self.shape, 'TransformParameters': self.parameters,
|
||||||
|
'dTransformParameters': np.nan_to_num(self.dparameters, nan=1e99).tolist()}
|
||||||
|
|
||||||
|
def frame(self, im, default=0):
|
||||||
|
if self.is_unity():
|
||||||
|
return im
|
||||||
|
else:
|
||||||
|
dtype = im.dtype
|
||||||
|
im = im.astype('float')
|
||||||
|
intp = sitk.sitkBSpline if np.issubdtype(dtype, np.floating) else sitk.sitkNearestNeighbor
|
||||||
|
return self.castArray(sitk.Resample(self.castImage(im), self.transform, intp, default)).astype(dtype)
|
||||||
|
|
||||||
|
def coords(self, array, columns=None):
|
||||||
|
""" Transform coordinates in 2 column numpy array,
|
||||||
|
or in pandas DataFrame or Series objects in columns ['x', 'y']
|
||||||
|
"""
|
||||||
|
if self.is_unity():
|
||||||
|
return array.copy()
|
||||||
|
elif pp and isinstance(array, (DataFrame, Series)):
|
||||||
|
columns = columns or ['x', 'y']
|
||||||
|
array = array.copy()
|
||||||
|
if isinstance(array, DataFrame):
|
||||||
|
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))
|
||||||
|
elif isinstance(array, Series):
|
||||||
|
array[columns] = self.coords(np.atleast_2d(array[columns].to_numpy()))[0]
|
||||||
|
return array
|
||||||
|
else: # somehow we need to use the inverse here to get the same effect as when using self.frame
|
||||||
|
return np.array([self.inverse.transform.TransformPoint(i.tolist()) for i in np.asarray(array)])
|
||||||
|
|
||||||
|
def save(self, file):
|
||||||
|
""" save the parameters of the transform calculated
|
||||||
|
with affine_registration to a yaml file
|
||||||
|
"""
|
||||||
|
if not file[-3:] == 'yml':
|
||||||
|
file += '.yml'
|
||||||
|
with open(file, 'w') as f:
|
||||||
|
yaml.safe_dump(self.asdict(), f, default_flow_style=None)
|
||||||
|
|
||||||
|
def load(self, file):
|
||||||
|
""" load the parameters of a transform from a yaml file or a dict
|
||||||
|
"""
|
||||||
|
if isinstance(file, dict):
|
||||||
|
d = file
|
||||||
|
else:
|
||||||
|
if not file[-3:] == 'yml':
|
||||||
|
file += '.yml'
|
||||||
|
with open(file, 'r') as f:
|
||||||
|
d = yamlload(f)
|
||||||
|
self.origin = [float(i) for i in d['CenterOfRotationPoint']]
|
||||||
|
self.parameters = [float(i) for i in d['TransformParameters']]
|
||||||
|
self.dparameters = [float(i) for i in d['dTransformParameters']] \
|
||||||
|
if 'dTransformParameters' in d else 6 * [np.nan]
|
||||||
|
self.shape = [float(i) for i in d['Size']]
|
||||||
|
|
||||||
|
def register(self, fix, mov, kind=None):
|
||||||
|
""" kind: 'affine', 'translation', 'rigid'
|
||||||
|
"""
|
||||||
|
kind = kind or 'affine'
|
||||||
|
self.shape = fix.shape
|
||||||
|
fix, mov = self.castImage(fix), self.castImage(mov)
|
||||||
|
# TODO: implement RigidTransform
|
||||||
|
tfilter = sitk.ElastixImageFilter()
|
||||||
|
tfilter.LogToConsoleOff()
|
||||||
|
tfilter.SetFixedImage(fix)
|
||||||
|
tfilter.SetMovingImage(mov)
|
||||||
|
tfilter.SetParameterMap(sitk.GetDefaultParameterMap(kind))
|
||||||
|
tfilter.Execute()
|
||||||
|
transform = tfilter.GetTransformParameterMap()[0]
|
||||||
|
if kind == 'affine':
|
||||||
|
self.parameters = [float(t) for t in transform['TransformParameters']]
|
||||||
|
self.shape = [float(t) for t in transform['Size']]
|
||||||
|
self.origin = [float(t) for t in transform['CenterOfRotationPoint']]
|
||||||
|
elif kind == 'translation':
|
||||||
|
self.parameters = [1.0, 0.0, 0.0, 1.0] + [float(t) for t in transform['TransformParameters']]
|
||||||
|
self.shape = [float(t) for t in transform['Size']]
|
||||||
|
self.origin = [(t - 1) / 2 for t in self.shape]
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(f'{kind} tranforms not implemented (yet)')
|
||||||
|
self.dparameters = 6 * [np.nan]
|
||||||
48
setup.py
Normal file
48
setup.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
import setuptools
|
||||||
|
import platform
|
||||||
|
import os
|
||||||
|
|
||||||
|
version = '2022.7.0'
|
||||||
|
|
||||||
|
if platform.system().lower() == 'linux':
|
||||||
|
import pkg_resources
|
||||||
|
pkg_resources.require(['pip >= 20.3'])
|
||||||
|
|
||||||
|
with open('README.md', 'r') as fh:
|
||||||
|
long_description = fh.read()
|
||||||
|
|
||||||
|
|
||||||
|
with open(os.path.join(os.path.dirname(__file__), 'ndbioimage', '_version.py'), 'w') as f:
|
||||||
|
f.write(f"__version__ = '{version}'\n")
|
||||||
|
try:
|
||||||
|
with open(os.path.join(os.path.dirname(__file__), '.git', 'refs', 'heads', 'master')) as h:
|
||||||
|
f.write("__git_commit_hash__ = '{}'\n".format(h.read().rstrip('\n')))
|
||||||
|
except Exception:
|
||||||
|
f.write(f"__git_commit_hash__ = 'unknown'\n")
|
||||||
|
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
name='ndbioimage',
|
||||||
|
version=version,
|
||||||
|
author='Wim Pomp',
|
||||||
|
author_email='w.pomp@nki.nl',
|
||||||
|
description='Bio image reading, metadata and some affine registration.',
|
||||||
|
long_description=long_description,
|
||||||
|
long_description_content_type='text/markdown',
|
||||||
|
url='https://github.com/wimpomp/ndbioimage',
|
||||||
|
packages=['ndbioimage', 'ndbioimage.readers'],
|
||||||
|
classifiers=[
|
||||||
|
'Programming Language :: Python :: 3',
|
||||||
|
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
|
||||||
|
'Operating System :: OS Independent',
|
||||||
|
],
|
||||||
|
python_requires='>=3.7',
|
||||||
|
install_requires=['untangle', 'pandas', 'psutil', 'numpy', 'tqdm', 'tifffile', 'czifile', 'pyyaml', 'dill',
|
||||||
|
'tiffwrite'],
|
||||||
|
extras_require={'transforms': 'SimpleITK-SimpleElastix',
|
||||||
|
'bioformats': ['python-javabridge', 'python-bioformats']},
|
||||||
|
tests_require=['pytest-xdist'],
|
||||||
|
entry_points={'console_scripts': ['ndbioimage=ndbioimage.ndbioimage:main']},
|
||||||
|
package_data={'': ['transform.txt']},
|
||||||
|
include_package_data=True,
|
||||||
|
)
|
||||||
Reference in New Issue
Block a user