-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrunner.py
More file actions
196 lines (165 loc) · 9.8 KB
/
runner.py
File metadata and controls
196 lines (165 loc) · 9.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
################################################################################################################
# get an interactive bash on 93xx and execute the deconvolution:
# 1. ssh hpcl9301
# 1. Move to a directory where you want to execute the code from
# 1. git clone https://github.com/CraignRush/3D-correlation-deconvonvolution
# 1. srun --nodes=1 --partition=p.hpcl93 --ntasks-per-node=1 --gres=gpu:4 --time=01:00:00 --pty bash -i
# 1. module load FLOWDEC
# 1. which python
# --> should yield /fs/pool/pool-bmapps/hpcl8/app/soft/FLOWDEC/12-04-2024/conda3/envs/flowdec/bin/python
# 1. "Plugin the correct paths below"
# 1. python runner.py
# Set directory of image stack
file_pattern = '/fs/pool/pool-pub/EMBO/FLM/PreImaged_Yeast/20240416_Yeast_CA_GE_18/20240416_Yeast_CA_GE_18_tiles.lif'#
#'/fs/pool/pool-pub/EMBO/FLM/PreImaged_Yeast/20240402_Yeast_CA_GE_05/CA_GE_05_tiles.lif'
output_folder = '/fs/pool/pool-pub/EMBO/FLM/PreImaged_Yeast/20240416_Yeast_CA_GE_18/20240416_Yeast_CA_GE_18_decon/'
#'/fs/pool/pool-pub/EMBO/FLM/PreImaged_Yeast/20240402_Yeast_CA_GE_05/'#
stack_naming_pattern = ['Pos','FOV'] #add your naming as you like
LIFFILE = True
##########################################################
filter_sigma = 1.5 # this parameter controls the blurring after deconvolution
iterations = 75 # this parameters controls the deconvolution iterations
### DON'T MODIFY ANYTHING BELOW HERE ###
# %%
import logging, sys
LOG_LEVEL = logging.INFO
logging.basicConfig(filename='current.log',encoding='utf-8',level=LOG_LEVEL, filemode = 'w', format='%(asctime)s-%(levelname)s: Process %(process)d said: %(message)s')
log = logging.getLogger()
log.addHandler(logging.StreamHandler(sys.stdout))
# %%
import os
if not os.path.isdir(output_folder):
logging.info("Couldn't find the output directory!")
sys.exit()
os.environ["CUDA_VISIBLE_DEVICES"]="0"
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
os.environ['NUMEXPR_MAX_THREADS'] = '256'
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
# %%
#HACK fix import and order
from skimage import exposure, io, img_as_uint
from flowdec import data as tfd_data
from flowdec import psf as tfd_psf
from flowdec import restoration as tfd_restoration
from scipy.ndimage import gaussian_filter
from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
import numpy as np
from datetime import datetime
from flm_utility_functions import tdct_reslice
from FOV import FOV
# %%
logging.info("Current File: {}".format(file_pattern))
# %%
# In case your GPU setup allows for a continuous oberserver or saving of intermediate steps (requires more memory, and is not used here)
imgs = []
scores = {}
def observer(img, i, *args):
#imgs.append(img)
#scores[i] = {
#'mse': mean_squared_error(processing_stack, img),
#'ssim': structural_similarity(processing_stack, img, data_range=1), #@TODO find out why SSIM doesn't work as expected
#'psnr': peak_signal_noise_ratio(processing_stack, img)
#}
if i % 5 == 0:
if i == 5:
logging.info('Observing iteration = {} (dtype = {}, max = {:.3f})'.format(i, img.dtype, img.max()))
else:
logging.info('Observing iteration = {}'.format(i))
#logging.info('Observing iteration = {} (MSE = {:.2f},SSIM = {:.2f}, PSNR = {:.2f})'.format(i, scores[i]['mse'],scores[i]['ssim'],scores[i]['psnr']))
#logging.info('Observing iteration = {} (MSE = {:.2f}, PSNR = {:.2f})'.format(i, scores[i]['mse'],scores[i]['psnr']))
algo = tfd_restoration.RichardsonLucyDeconvolver(n_dims=3,observer_fn=observer).initialize()#
# %%
# Import LIF data with a custom written handler class here
# FIXME Comment class properly
#HACK Loop over all FOVs
#HACK handle exceptions in class
#TODO calibrate images
#TODO in the very future optimize import, this takes ages!!!
#TODO map to overview
if LIFFILE:
test_fov = FOV(file_pattern,0)
# %%
#HACK loop over channels
for fov_num in range(test_fov.FOV_count):
test_fov = FOV(file_pattern,fov_num)
logging.info('Processing: ' + test_fov.FOV_name)
processing_stack = test_fov.get_channel_stack(channel_num=range(test_fov.num_channels))
if len(processing_stack.shape) == 4:
MULTICHANNEL = True
else:
MULTICHANNEL = False
logging.info('Input stack shape: {}, dtype: {}'.format(processing_stack.shape,processing_stack.dtype))
# %%
size_x,size_y,size_z = 256,256,256
if LIFFILE:
psf_var = tfd_psf.GibsonLanni(
na = test_fov.NA, # Numerical aperture
m = test_fov.mag, # Magnification
ni0 = 1.0, # Immersion RI
res_lateral = float(test_fov.resolution[test_fov.resolution['dimension_name'] == 'x']['resolution_nm'].values[0] / 1000) , # X/Y resolution
res_axial = float(test_fov.resolution[test_fov.resolution['dimension_name'] == 'z']['resolution_nm'].values[0] / 1000), # Axial resolution
wavelength = float(test_fov.channels[0]['center_wavelength'] / 1000), # Emission wavelength
size_x = int(np.max((size_x, int(processing_stack.shape[-1])))),
size_y = int(np.max((size_y, int(processing_stack.shape[-2])))),
size_z = int(np.min((size_z, int(processing_stack.shape[-3])))),
ns = 1.0, # specimen refractive index (RI)
ng0 = 1.0, # Refractive index of coverslip
ti0 = float(test_fov.working_distance_mm * 1000), # microns, working distance (immersion medium thickness) design value
tg0 = 0, # microns, coverslip thickness design value
)
psf_var.save('./current_psf.json')
psf = psf_var.generate().astype(np.uint8)
logging.info('PSF shape: {}, PSF dtype: {}'.format(psf.shape, psf.dtype))
logging.info('PSF: ' + psf_var.to_json())
else:
# This is meant to be representative of the Leica SP8 50x widefield image capture (all distance units are in microns)
psf = np.zeros_like(processing_stack)
psf = tfd_psf.GibsonLanni(
na=0.9, # Numerical aperture
m=52.5, # Magnification
ni0=1., # Immersion RI
res_lateral=0.085, # X/Y resolution
res_axial=0.3, # Axial resolution
wavelength=0.58, # Emission wavelength
size_x=np.max((size_x, int(processing_stack.shape[2]))),
size_y=np.max((size_y, int(processing_stack.shape[1]))),
size_z=np.min((size_z, int(processing_stack.shape[0]))),
ns = 1, # specimen refractive index (RI)
ng0 = 1, # Refractive index of coverslip
ti0 = 280, # microns, working distance (immersion medium thickness) design value
tg0 = 0,
).generate()
logging.info((psf.shape, psf.dtype))
# %%
# Run the deconvolution process and note that deconvolution initialization is best kept separate from
# execution since the "initialize" operation corresponds to creating a TensorFlow graph, which is a
# relatively expensive operation and should not be repeated across multiple executions
#
# In case you experience CUDA memory errors, remove the observer and add pad_mode='none'
logging.info("Starting GPU decon for {}".format(test_fov.FOV_name))
res = [algo.run(tfd_data.Acquisition(data=processing_stack[ch],kernel=psf), niter=iterations) for ch in range(processing_stack.shape[0])]#
logging.info("Finished successfully!")
decon_list = [res[i].data for i in range(len(res))]
decon_stack = np.array(decon_list)
#logging.info('A new np.array would have the shape: {} with dtype: {}'.format(decon_stack.shape,decon_stack.dtype))
# %%
if LIFFILE:
output_path_MIP = output_folder + test_fov.FOV_name + '_MIP_decon.tif'
io.imsave(output_path_MIP,np.max(decon_stack,axis=1))
logging.info('Saved MIPs under: {}'.format(output_path_MIP))
for i in range(decon_stack.shape[0]):
output_path_stack = output_folder + test_fov.FOV_name + '_ch{:02d}'.format(i) + '_decon.tif'
step_xy = test_fov.resolution[test_fov.resolution['dimension_name'] == 'x']['resolution_nm'].iloc[0]
step_z = test_fov.resolution[test_fov.resolution['dimension_name'] == 'z']['resolution_nm'].iloc[0]
resliced_stack = tdct_reslice(res[i].data, step_z, step_xy, interpolationmethod='linear', save_img=False)
filtered_stack = gaussian_filter(np.array(resliced_stack),filter_sigma)
io.imsave(output_path_stack, (filtered_stack * 255).astype(np.uint16), plugin='tifffile', photometric='minisblack')
logging.info('Saved stack under: {}'.format(output_path_stack))
logging.info('Resliced to: {:.2f} nm!'.format(step_xy))
if LOG_LEVEL == logging.DEBUG:
io.imsave(output_folder + '_' + test_fov.FOV_name +'_input.tif',processing_stack)
io.imsave('./' + datetime.today().strftime("%Y-%m-%d_%H-%M-%S_") + test_fov.FOV_name + '_decon.tif',res.data.astype(np.float16))
io.imsave('./' + datetime.today().strftime("%Y-%m-%d_%H-%M-%S_") + test_fov.FOV_name + '_input.tif',processing_stack)
io.imsave('./' + datetime.today().strftime("%Y-%m-%d_%H-%M-%S_") + test_fov.FOV_name + '_MIP_decon.tif',np.max(res.data.astype(np.float16),axis=0))
io.imsave('./' + datetime.today().strftime("%Y-%m-%d_%H-%M-%S_") + test_fov.FOV_name +'_MIP_input.tif',np.max(processing_stack,axis=0))