Skip to content

Commit

Permalink
write down processed images by zplane to avoid memeory issue
Browse files Browse the repository at this point in the history
  • Loading branch information
rx3fang committed Jul 31, 2024
1 parent a45eaa1 commit 157093c
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 58 deletions.
Binary file modified .DS_Store
Binary file not shown.
Binary file added merlin/analysis/._preprocess.py
Binary file not shown.
105 changes: 47 additions & 58 deletions merlin/analysis/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,19 +291,20 @@ class WriteDownProcessedImages(analysistask.ParallelAnalysisTask):

"""
Write down the preprocessed images prior to barcode calling.
lowpass_sigma - low pass filter to smooth the images.
feature_channels - the channel names for immnostaining
or other cellular features such as DAPI, polyT
lowpass_sigma - low pass filter to smooth the images
after deconvolution.
feature_channels - the channel names for immmnostaining
or other cellular features such as DAPI, polyT.
feature_channels can be empty.
warp_task - the warp task that aligns images from
different round.
preprocess_task - preprocessing task.
Optional: it is also possible to provide the optimization task
optimize_task. if this parameter is not given,
this task will ignore optimization task. This
provides an option.
this task will be ignored.
Rongxin Fang
7/30/2024
7/31/2024
"""

def __init__(self, dataSet, parameters=None, analysisName=None):
Expand All @@ -312,7 +313,7 @@ def __init__(self, dataSet, parameters=None, analysisName=None):
if 'lowpass_sigma' not in self.parameters:
self.parameters['lowpass_sigma'] = 1.0
if 'feature_channels' not in self.parameters:
self.parameters['feature_channels'] = None
self.parameters['feature_channels'] = []
self.warpTask = self.dataSet.load_analysis_task(
self.parameters['warp_task'])
self.preprocessTask = self.dataSet.load_analysis_task(
Expand Down Expand Up @@ -353,35 +354,16 @@ def get_feature_image_set(
self, fov, zIndex, featureChannels,
chromaticCorrector: aberration.ChromaticCorrector = None
) -> np.ndarray:
if len(featureChannels) == 0:
return None
return np.array([self.warpTask.get_aligned_image(
fov, self.dataSet.get_data_organization()
.get_data_channel_for_bit(b), zIndex, chromaticCorrector)
for b in featureChannels ])

def _save_processed_images(self, fov: int, zPositionCount: int,
processedImages: np.ndarray) -> None:
imageDescription = self.dataSet.analysis_tiff_description(
zPositionCount, processedImages.shape[1])
with self.dataSet.writer_for_analysis_images(
self, 'processed_', fov) as outputTif:
for i in range(zPositionCount):
outputTif.save(processedImages[i].astype(np.uint16),
photometric='MINISBLACK',
metadata=imageDescription)

def _save_feature_images(self, fov: int, zPositionCount: int,
featureImages: np.ndarray) -> None:
imageDescription = self.dataSet.analysis_tiff_description(
zPositionCount, featureImages.shape[1])
with self.dataSet.writer_for_analysis_images(
self, 'feature_', fov) as outputTif:
for i in range(zPositionCount):
outputTif.save(featureImages[i].astype(np.uint16),
photometric='MINISBLACK',
metadata=imageDescription)

def _run_analysis(self, fragmentIndex):

# if optimization task is given, load scalefactor, chromatic abberation
if 'optimize_task' in self.parameters:
optimizeTask = self.dataSet.load_analysis_task(
self.parameters['optimize_task'])
Expand All @@ -397,35 +379,42 @@ def _run_analysis(self, fragmentIndex):
self._get_reference_color())

zPositionCount = len(self.dataSet.get_z_positions())

# write down feature images if feature_channels is not None
if self.parameters['feature_channels'] is not None:
featureImages = np.array([
self.get_feature_image_set(
fragmentIndex, zIndex,
self.parameters['feature_channels'],
chromaticCorrector) \
for zIndex in range(zPositionCount) ])

self._save_feature_images(
fragmentIndex, zPositionCount, featureImages)

processedImages = np.array([ self.preprocessTask.get_processed_image_set(
fragmentIndex, zIndex, chromaticCorrector) \
for zIndex in range(zPositionCount) ])

# write this part more elegently
for i in range(processedImages.shape[0]): # z-positions
for j in range(processedImages.shape[1]): # Channels
processedImages[i,j,:,:] = \
imagefilters.low_pass_filter(
processedImages[i,j,:,:],
self.parameters['lowpass_sigma'])
channelCount = len(self.dataSet.get_z_positions()) + \
len(self.parameters['feature_channels'])

# TODO: enable saving the file as dax file
self._save_processed_images(
fragmentIndex, zPositionCount, processedImages)


# avoids creating a huge numpy matrix such as 100 x 22 x 2048 x 2048
# by proecessing every z indepedently. This is mostly for 3D-MERFISH
imageDescription = self.dataSet.analysis_tiff_description(
zPositionCount, channelCount)

with self.dataSet.writer_for_analysis_images(
self, 'image_', fragmentIndex) as outputTif:

for zIndex in range(zPositionCount):
imageSet_preproc = self.preprocessTask.get_processed_image_set(
fragmentIndex, zIndex, chromaticCorrector)

imageSet_preproc = np.array([
imagefilters.low_pass_filter(
imageSet_preproc[i,:,:],
self.parameters['lowpass_sigma']) \
for i in range(imageSet_preproc.shape[0]) ]
).astype(np.uint16)

# return None if feature_channels is []
imageSet_feature = self.get_feature_image_set(
fragmentIndex, zIndex,
self.parameters['feature_channels'],
chromaticCorrector)

if imageSet_feature is not None:
imageSet_feature = imageSet_feature.astype(np.uint16)
imageSet = np.concatenate([
imageSet_feature, imageSet_preproc], axis=0)
else:
imageSet = imageSet_preproc

for i in range(imageSet.shape[0]):
outputTif.save(imageSet[i],
photometric='MINISBLACK',
metadata=imageDescription)

0 comments on commit 157093c

Please sign in to comment.