Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print("%s: error: the following arguments are required: %s" % (parser.prog,r), file=sys.stderr)
sys.exit(1)
# show effective arguments (including defaults)
if not args.quiet:
print('Arguments')
print('---------')
pprint(args_dict)
print()
sys.stdout.flush()
# logging function
log = (lambda *a,**k: None) if args.quiet else tqdm.write
# get list of input files and exit if there are none
file_list = list(Path(args.input_dir).glob(args.input_pattern))
if len(file_list) == 0:
log("No files to process in '%s' with pattern '%s'." % (args.input_dir,args.input_pattern))
sys.exit(0)
# delay imports after checking to all required arguments are provided
from tifffile import imread, imsave
from csbdeep.utils.tf import keras_import
K = keras_import('backend')
from csbdeep.models import CARE
from csbdeep.data import PercentileNormalizer
sys.stdout.flush()
sys.stderr.flush()
# limit gpu memory
if args.gpu_memory_limit is not None:
from csbdeep.utils.tf import limit_gpu_memory
s = slice(i*n_patches_per_image,(i+1)*n_patches_per_image)
X[s], Y[s] = normalization(_X,_Y, x,y,mask,channel)
if shuffle:
shuffle_inplace(X,Y)
axes = 'SC'+axes.replace('C','')
if channel is None:
X = np.expand_dims(X,1)
Y = np.expand_dims(Y,1)
else:
X = np.moveaxis(X, 1+channel, 1)
Y = np.moveaxis(Y, 1+channel, 1)
if save_file is not None:
print('Saving data to %s.' % str(Path(save_file)))
save_training_data(save_file, X, Y, axes)
return X,Y,axes
│ ├── imageB.tif
│ └── imageC.tif
├── source1
│ ├── imageA.tif
│ └── imageB.tif
└── source2
├── imageA.tif
└── imageC.tif
>>> data = RawData.from_folder(basepath='data', source_dirs=['source1','source2'], target_dir='GT', axes='YX')
>>> n_images = data.size
>>> for source_x, target_y, axes, mask in data.generator():
... pass
"""
p = Path(basepath)
pairs = [(f, p/target_dir/f.name) for f in chain(*((p/source_dir).glob(pattern) for source_dir in source_dirs))]
len(pairs) > 0 or _raise(FileNotFoundError("Didn't find any images."))
consume(t.exists() or _raise(FileNotFoundError(t)) for s,t in pairs)
axes = axes_check_and_normalize(axes)
n_images = len(pairs)
description = "{p}: target='{o}', sources={s}, axes='{a}', pattern='{pt}'".format(p=basepath, s=list(source_dirs),
o=target_dir, a=axes, pt=pattern)
def _gen():
for fx, fy in pairs:
x, y = imread(str(fx)), imread(str(fy))
len(axes) >= x.ndim or _raise(ValueError())
yield x, y, axes[-x.ndim:], None
return RawData(_gen, n_images, description)
model.load_weights(args.model_weights)
normalizer = PercentileNormalizer(pmin=args.norm_pmin, pmax=args.norm_pmax, do_after=args.norm_undo)
n_tiles = args.n_tiles
if n_tiles is not None and len(n_tiles)==1:
n_tiles = n_tiles[0]
processed = []
# process all files
for file_in in tqdm(file_list, disable=args.quiet or (n_tiles is not None and np.prod(n_tiles)>1)):
# construct output file name
file_out = Path(args.output_dir) / args.output_name.format (
file_path = str(file_in.relative_to(args.input_dir).parent),
file_name = file_in.stem, file_ext = file_in.suffix,
model_name = args.model_name, model_weights = Path(args.model_weights).stem if args.model_weights is not None else None
)
# checks
(file_in.suffix.lower() in ('.tif','.tiff') and
file_out.suffix.lower() in ('.tif','.tiff')) or _raise(ValueError('only tiff files supported.'))
# load and predict restored image
img = imread(str(file_in))
restored = model.predict(img, axes=args.input_axes, normalizer=normalizer, n_tiles=n_tiles)
# restored image could be multi-channel even if input image is not
axes_out = axes_check_and_normalize(args.input_axes)
if restored.ndim > img.ndim:
assert restored.ndim == img.ndim + 1
assert 'C' not in axes_out
axes_out += 'C'
def export_TF(self, name, description, authors, test_img, axes, patch_shape, fname=None):
"""
name: String
Name of the model.
description: String
A short description of the model e.g. on what data it was trained.
authors: String
Comma seperated list of author names.
patch_shape: The shape of the patches used in model.train().
"""
if fname is None:
fname = self.logdir / 'export.bioimage.io.zip'
else:
fname = Path(fname)
input_n_dims = len(test_img.shape)
if 'C' in axes:
input_n_dims -=1
assert input_n_dims == self.config.n_dim, 'Input and network dimensions do not match.'
assert test_img.shape[axes.index('X')] == test_img.shape[axes.index('Y')], 'X and Y dimensions are not of same length.'
test_output = self.predict(test_img, axes)
# Extract central slice of Z-Stack
if 'Z' in axes:
z_dim = axes.index('Z')
if z_dim != 0:
test_output = np.moveaxis(test_output, z_dim, 0)
test_output = test_output[int(test_output.shape[0]/2)]
# CSBDeep Export
meta = {
def export_imagej_rois(fname, polygons, set_position=True, compression=ZIP_DEFLATED):
""" polygons assumed to be a list of arrays with shape (id,2,c) """
if isinstance(polygons,np.ndarray):
polygons = (polygons,)
fname = Path(fname)
if fname.suffix == '.zip':
fname = Path(fname.stem)
with ZipFile(str(fname)+'.zip', mode='w', compression=compression) as roizip:
for pos,polygroup in enumerate(polygons,start=1):
for i,poly in enumerate(polygroup,start=1):
roi = polyroi_bytearray(poly[1],poly[0], pos=(pos if set_position else None))
roizip.writestr('{pos:03d}_{i:03d}.roi'.format(pos=pos,i=i), roi)
model = CARE(config=None, name=args.model_name, basedir=args.model_basedir)
if args.model_weights is not None:
print("Loading network weights from '%s'." % args.model_weights)
model.load_weights(args.model_weights)
normalizer = PercentileNormalizer(pmin=args.norm_pmin, pmax=args.norm_pmax, do_after=args.norm_undo)
n_tiles = args.n_tiles
if n_tiles is not None and len(n_tiles)==1:
n_tiles = n_tiles[0]
processed = []
# process all files
for file_in in tqdm(file_list, disable=args.quiet or (n_tiles is not None and np.prod(n_tiles)>1)):
# construct output file name
file_out = Path(args.output_dir) / args.output_name.format (
file_path = str(file_in.relative_to(args.input_dir).parent),
file_name = file_in.stem, file_ext = file_in.suffix,
model_name = args.model_name, model_weights = Path(args.model_weights).stem if args.model_weights is not None else None
)
# checks
(file_in.suffix.lower() in ('.tif','.tiff') and
file_out.suffix.lower() in ('.tif','.tiff')) or _raise(ValueError('only tiff files supported.'))
# load and predict restored image
img = imread(str(file_in))
restored = model.predict(img, axes=args.input_axes, normalizer=normalizer, n_tiles=n_tiles)
# restored image could be multi-channel even if input image is not
axes_out = axes_check_and_normalize(args.input_axes)
if restored.ndim > img.ndim: