Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
fft[c] = np.fft.ifft2(self.data[curT, c])
fftWin = ImgDialog(fft, title=self.title + " FFT", shifted=True)
fftWin.show()
@QtCore.pyqtSlot()
def fftShiftChecked(self, checked):
self.data.setFFTshifted(checked)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
if len(sys.argv) > 1:
import tifffile as tf
path = sys.argv[1]
im = tf.imread(path)
if "fft" in sys.argv:
im = np.fft.fftshift(np.fft.fftn(np.fft.fftshift(im)))
path = os.path.basename(path)
else:
path = None
im = np.random.rand(4, 2, 10, 100, 100) * 32000
main = ImgDialog(im, title=path or "Figure")
main.show()
sys.exit(app.exec_())
imgs_blend = []
pattern = re.compile(r'(\d+)_')
tag_output = '{:d}_channel'.format(n_combined)
idx_set_old = None
for i, path in enumerate(paths_sources):
path_basename = os.path.basename(path)
match = pattern.search(path_basename)
if match is None:
print(path)
raise NotImplementedError
idx_set = int(match.groups()[0])
if i == 0:
idx_set_old = idx_set
if any(tag in path_basename for tag in opts.tags):
print('reading:', path)
imgs_blend.append(tifffile.imread(path))
if len(imgs_blend) == n_combined:
assert idx_set_old == idx_set
path_output = os.path.join(opts.path_output_dir, '{:03d}_{:s}.tif'.format(idx_set, tag_output))
img_combo = blend_ar(imgs_blend, (1/len(imgs_blend),)*len(imgs_blend))
tifffile.imsave(path_output, img_combo, photometric='rgb')
print('saved:', path_output)
imgs_blend = []
idx_set_old = idx_set
# process all files
for file_in in tqdm(file_list, disable=args.quiet or (n_tiles is not None and np.prod(n_tiles)>1)):
# construct output file name
file_out = Path(args.output_dir) / args.output_name.format (
file_path = str(file_in.relative_to(args.input_dir).parent),
file_name = file_in.stem, file_ext = file_in.suffix,
model_name = args.model_name, model_weights = Path(args.model_weights).stem if args.model_weights is not None else None
)
# checks
(file_in.suffix.lower() in ('.tif','.tiff') and
file_out.suffix.lower() in ('.tif','.tiff')) or _raise(ValueError('only tiff files supported.'))
# load and predict restored image
img = imread(str(file_in))
restored = model.predict(img, axes=args.input_axes, normalizer=normalizer, n_tiles=n_tiles)
# restored image could be multi-channel even if input image is not
axes_out = axes_check_and_normalize(args.input_axes)
if restored.ndim > img.ndim:
assert restored.ndim == img.ndim + 1
assert 'C' not in axes_out
axes_out += 'C'
# convert data type (if necessary)
restored = restored.astype(np.dtype(args.output_dtype), copy=False)
# save to disk
if not args.dry_run:
file_out.parent.mkdir(parents=True, exist_ok=True)
if args.imagej_tiff:
os.makedirs(path_out_dir)
print('making gif from images in "{:s}"', col)
idx_col = df.columns.get_loc(col)
imgs = []
for idx in indices_sample:
path_img = os.path.join(dirname, df.iloc[idx, idx_col])
print(path_img)
imgs.append(tifffile.imread(path_img))
print(np.percentile(imgs[-1], range_percentile))
range_val = np.percentile(imgs, range_percentile)
print('DEBUG: range_val:', range_val)
imgs_out = []
for idx in range(df.shape[0]):
path_img = os.path.join(dirname, df.iloc[idx, idx_col])
img = tifffile.imread(path_img)[:, z_slice, ]
img_uint8 = to_uint8(img, range_val)
path_save = os.path.join(
path_out_dir,
'{:03d}_{:s}_z{:02d}.tiff'.format(idx, tag, z_slice)
)
tifffile.imsave(path_save, img_uint8)
print('wrote:', path_save)
imgs_out.append(path_save)
path_gif = os.path.join(path_out_dir, '{:s}_z{:02d}.gif'.format(tag, z_slice))
cmd = 'convert -delay {:d} {:s} {:s}'.format(delay, ' '.join(imgs_out), path_gif)
subprocess.run(cmd, shell=True, check=True)
print('wrote:', path_gif)
def _gen():
for fx, fy in xy_name_pairs:
x, y = imread(str(fx)), imread(str(fy))
# x,y = x[:,256:-256,256:-256],y[:,256:-256,256:-256] #tmp
x.shape == y.shape or _raise(ValueError())
len(axes) >= x.ndim or _raise(ValueError())
yield x, y, axes[-x.ndim:], None
# corrected = camcor(interleaved)
start = time.time()
E.correct_flash(target="cuda", median=False)
end = time.time()
print("CUDA Time: " + str(end - start))
start = time.time()
E.correct_flash(target="cpu", median=False)
end = time.time()
print("Parallel Time: " + str(end - start))
else:
p = os.path.abspath(sys.argv[1]).replace("\\", "")
if os.path.isfile(p):
tf.imshow(deskewGPU(tf.imread(p), 0.5))
plt.show()
folder = plib.Path(folder)
if not folder.is_dir():
raise IOError('MIP folder does not exist: {}'.format(str(folder)))
try:
filelist = []
tiffs = []
channelCounts = []
c = 0
while True:
channelFiles = sorted(folder.glob('*ch{}_stack*MIP_{}.tif'.format(c, axis)))
if not len(channelFiles):
break # no MIPs in this channel
# this assumes that there are no gaps in the channels (i.e. ch1, ch3 but not 2)
for file in channelFiles:
tiffs.append(tf.imread(str(file)))
filelist.append(file)
channelCounts.append(len(channelFiles))
c += 1
if not len(filelist):
return None # there were no MIPs for this axis
if c > 0:
nt = np.max(channelCounts)
if (len(set(channelCounts)) > 1):
raise ValueError('Cannot merge MIPS with different number of '
'timepoints per channel')
if len(tiffs) != c * nt:
raise ValueError('Number of images does not equal nC * nT')
stack = np.stack(tiffs)
stack = stack.reshape((c, 1, nt,
break
idx_img = match.groups()[0]
if any(tag in path_basename for tag in TAGS_SIGNAL):
idx_col = 0
val_min, val_max = val_range_signal
entry_log['path_signal'] = path
if any(tag in path_basename for tag in TAGS_TARGET):
idx_col = 1
val_min, val_max = val_range_target
entry_log['path_target'] = path
if any(tag in path_basename for tag in TAGS_PREDICTION):
idx_col = 2
val_min, val_max = val_range_prediction
entry_log['path_prediction'] = path
if idx_col is not None:
ar_pre = tifffile.imread(path)
print('DEBUG: {:30s} {:6.2f} {:6.2f}'.format(path_basename, np.min(ar_pre), np.max(ar_pre)))
ar = to_uint8(ar_pre, val_min, val_max)
if idx_img != idx_old:
n_cols_done = 0
idx_old = idx_img
shape = (ar.shape[1]*n_z_per_img + (n_z_per_img - 1)*padding_h, ar.shape[2]*3 + 2*padding_v)
ar_fig = np.ones(shape, dtype=np.uint8)*255
inc = int(ar.shape[0]/(n_z_per_img + 1))
z_indices = list(range(inc, ar.shape[0], inc))[:n_z_per_img]
z_indices = np.flip(z_indices, axis=0)
entry_log['z_indices'] = z_indices
offset_x = idx_col*(ar.shape[2] + padding_h)
for idx_row, z_index in enumerate(z_indices):
offset_y = idx_row*(ar.shape[1] + padding_v)
img = ar[z_index, ].copy()
def main(infile, nx, nz, sig=1, pad=12):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
indat = tf.imread(infile)
except IOError:
print("File %s does not exist or is no readable.\n Quit" % infile)
return
mip = indat.max(0)
mipblur = gaussian_filter(mip, sig)
maxy, maxx = np.argwhere(mipblur == mipblur.max())[0]
print("bead detected at ({},{})".format(maxx, maxy))
beadslice = indat[:, maxy-pad:maxy+pad, maxx-pad:maxx+pad].astype(np.float)
background = indat[:, :, 2].mean(1)
beadsums = beadslice.sum((1, 2)) - (4 * pad * pad * background) # subtract background
xzpsf = np.reshape(beadsums, (int(nz), int(nx))).astype(np.float32)
tf.imsave(infile.replace('.tif', 'xzPSF_x%d_y%d.tif' % (maxx, maxy)), xzpsf)
return xzpsf