Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
perform the axes permutation of `x`, `y`, and `mask`.
"""
axes = axes_check_and_normalize(axes)
def _generator(inputs):
for x, y, axes_in, mask in inputs:
axes_in = axes_check_and_normalize(axes_in)
if axes_in != axes:
# print('permuting axes from %s to %s' % (axes_in,axes))
x = move_image_axes(x, axes_in, axes, True)
y = move_image_axes(y, axes_in, axes, True)
if mask is not None:
mask = move_image_axes(mask, axes_in, axes)
yield x, y, axes, mask
return Transform('Permute axes to %s' % axes, _generator, 1)
"""
## images and transforms
if transforms is None:
transforms = []
transforms = list(transforms)
if patch_axes is not None:
transforms.append(permute_axes(patch_axes))
if len(transforms) == 0:
transforms.append(Transform.identity())
if normalization is None:
normalization = lambda patches_x, patches_y, x, y, mask, channel: (patches_x, patches_y)
image_pairs, n_raw_images = raw_data.generator(), raw_data.size
tf = Transform(*zip(*transforms)) # convert list of Transforms into Transform of lists
image_pairs = compose(*tf.generator)(image_pairs) # combine all transformations with raw images as input
n_transforms = np.prod(tf.size)
n_images = n_raw_images * n_transforms
n_patches = n_images * n_patches_per_image
n_required_memory_bytes = 2 * n_patches*np.prod(patch_size) * 4
## memory check
_memory_check(n_required_memory_bytes)
## summary
if verbose:
print('='*66)
print('%5d raw images x %4d transformations = %5d images' % (n_raw_images,n_transforms,n_images))
print('%5d images x %4d patches per image = %5d patches in total' % (n_images,n_patches_per_image,n_patches))
print('='*66)
print('Input data:')
Returns
-------
Transform
Returns a :class:`Transform` object whose `generator` will
perform image cropping of `x`, `y`, and `mask`.
"""
slices = tuple(slices)
def _generator(inputs):
for x, y, axes, mask in inputs:
axes = axes_check_and_normalize(axes)
len(axes) == len(slices) or _raise(ValueError())
yield x[slices], y[slices], axes, (mask[slices] if mask is not None else None)
return Transform('Crop images (%s)' % str(slices), _generator, 1)
Returns
-------
Transform
Returns a :class:`Transform` object whose `generator` will
perform broadcasting of `y` to match the shape of `x`.
"""
def _generator(inputs):
for x, y, axes_x, mask in inputs:
if target_axes is not None:
axes_y = axes_check_and_normalize(target_axes,length=y.ndim)
y = move_image_axes(y, axes_y, axes_x, True)
yield x, np.broadcast_to(y,x.shape), axes_x, mask
return Transform('Broadcast target image to the shape of source', _generator, 1)
def identity():
"""
Returns
-------
Transform
Identity transformation that passes every input through unchanged.
"""
def _gen(inputs):
for d in inputs:
yield d
return Transform('Identity', _gen, 1)
>>> X, Y, XY_axes = create_patches(raw_data, patch_size=(32,128,128), n_patches_per_image=16)
Todo
----
- Save created patches directly to disk using :class:`numpy.memmap` or similar?
Would allow to work with large data that doesn't fit in memory.
"""
## images and transforms
if transforms is None:
transforms = []
transforms = list(transforms)
if patch_axes is not None:
transforms.append(permute_axes(patch_axes))
if len(transforms) == 0:
transforms.append(Transform.identity())
if normalization is None:
normalization = lambda patches_x, patches_y, x, y, mask, channel: (patches_x, patches_y)
image_pairs, n_raw_images = raw_data.generator(), raw_data.size
tf = Transform(*zip(*transforms)) # convert list of Transforms into Transform of lists
image_pairs = compose(*tf.generator)(image_pairs) # combine all transformations with raw images as input
n_transforms = np.prod(tf.size)
n_images = n_raw_images * n_transforms
n_patches = n_images * n_patches_per_image
n_required_memory_bytes = 2 * n_patches*np.prod(patch_size) * 4
## memory check
_memory_check(n_required_memory_bytes)
## summary
if _subsample != subsample:
warnings.warn('changing subsample from %s to %s' % (str(_subsample),str(subsample)))
target = _make_divisible_by_subsample(target,subsample_size)
x = _make_divisible_by_subsample(x, subsample_size)
x = _scale_down_up(x,subsample)
assert x.shape == target.shape, (x.shape, target.shape)
target = _normalize_data(target,undo=True)
x = _normalize_data(x, undo=True)
yield x, target, axes, mask
return Transform('Anisotropic distortion (along %s axis)' % subsample_axis, _generator, 1)
if transforms is None:
transforms = []
transforms = list(transforms)
transforms.insert(0,broadcast_target(target_axes))
kwargs['transforms'] = transforms
save_file = kwargs.pop('save_file',None)
if any(s is None for s in patch_size):
patch_axes = kwargs.get('patch_axes')
if patch_axes is not None:
_transforms = list(transforms)
_transforms.append(permute_axes(patch_axes))
else:
_transforms = transforms
tf = Transform(*zip(*_transforms))
image_pairs = compose(*tf.generator)(raw_data.generator())
x,y,axes,mask = next(image_pairs) # get the first entry from the generator
patch_size = list(patch_size)
for i,(a,s) in enumerate(zip(axes,patch_size)):
if s is not None: continue
a in reduction_axes or _raise(ValueError("entry of patch_size is None for non reduction axis %s." % a))
patch_size[i] = x.shape[i]
patch_size = tuple(patch_size)
del x,y,axes,mask
X,Y,axes = create_patches (
raw_data = raw_data,
patch_size = patch_size,
n_patches_per_image = n_patches_per_image,
**kwargs
)