Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
nii_files[i] = nii_files[i][2:]
for f in nii_files:
image, _ = load(os.path.join(image_dir, f))
label, _ = load(os.path.join(label_dir, f.replace('_0000', '')))
print(f)
for i in range(classes):
class_stats[i] += np.sum(label == i)
total += np.sum(label == i)
# normalize images
image = (image - image.min())/(image.max()-image.min())
image = pad_nd_image(image, (image.shape[0], y_shape, z_shape), "constant", kwargs={'constant_values': image.min()})
label = pad_nd_image(label, (image.shape[0], y_shape, z_shape), "constant", kwargs={'constant_values': label.min()})
result = np.stack((image, label))
np.save(os.path.join(output_dir, f.split('.')[0]+'.npy'), result)
print(f)
print(total)
for i in range(classes):
print(class_stats[i], class_stats[i]/total)
for f in nii_files:
image, _ = load(os.path.join(image_dir, f))
label, _ = load(os.path.join(label_dir, f.replace('_0000', '')))
print(f)
for i in range(classes):
class_stats[i] += np.sum(label == i)
total += np.sum(label == i)
# normalize images
image = (image - image.min())/(image.max()-image.min())
image = pad_nd_image(image, (image.shape[0], y_shape, z_shape), "constant", kwargs={'constant_values': image.min()})
label = pad_nd_image(label, (image.shape[0], y_shape, z_shape), "constant", kwargs={'constant_values': label.min()})
result = np.stack((image, label))
np.save(os.path.join(output_dir, f.split('.')[0]+'.npy'), result)
print(f)
print(total)
for i in range(classes):
print(class_stats[i], class_stats[i]/total)
def preprocessing(self, sample, training=True):
# Access data
img_data = sample.img_data
seg_data = sample.seg_data
# Transform data from channel-last to channel-first structure
img_data = np.moveaxis(img_data, -1, 0)
if training : seg_data = np.moveaxis(seg_data, -1, 0)
# Pad imaging data
img_data, crop_coords = pad_nd_image(img_data, self.min_size,
mode=self.pad_mode,
kwargs={"constant_values": self.pad_value_img},
return_slicer=True,
shape_must_be_divisible_by=self.shape_must_be_divisible_by)
if training:
seg_data = pad_nd_image(seg_data, self.min_size,
mode=self.pad_mode,
kwargs={"constant_values": self.pad_value_seg},
return_slicer=False,
shape_must_be_divisible_by=self.shape_must_be_divisible_by)
# Cache current crop coordinates for later postprocessing
if not training : self.original_coords = crop_coords
# Transform data from channel-first back to channel-last structure
img_data = np.moveaxis(img_data, 0, -1)
if training : seg_data = np.moveaxis(seg_data, 0, -1)
# Save resampled imaging data to sample
sample.img_data = img_data
sample.seg_data = seg_data
def preprocessing(self, sample, training=True):
# Access data
img_data = sample.img_data
seg_data = sample.seg_data
# Transform data from channel-last to channel-first structure
img_data = np.moveaxis(img_data, -1, 0)
if training : seg_data = np.moveaxis(seg_data, -1, 0)
# Pad imaging data
img_data, crop_coords = pad_nd_image(img_data, self.min_size,
mode=self.pad_mode,
kwargs={"constant_values": self.pad_value_img},
return_slicer=True,
shape_must_be_divisible_by=self.shape_must_be_divisible_by)
if training:
seg_data = pad_nd_image(seg_data, self.min_size,
mode=self.pad_mode,
kwargs={"constant_values": self.pad_value_seg},
return_slicer=False,
shape_must_be_divisible_by=self.shape_must_be_divisible_by)
# Cache current crop coordinates for later postprocessing
if not training : self.original_coords = crop_coords
# Transform data from channel-first back to channel-last structure
img_data = np.moveaxis(img_data, 0, -1)
if training : seg_data = np.moveaxis(seg_data, 0, -1)
# Save resampled imaging data to sample
def pad_patch(patch, patch_shape, return_slicer=False):
# Initialize stat length to overwrite batchgenerators default
kwargs = {"stat_length": None}
# Transform prediction from channel-last to channel-first structure
patch = np.moveaxis(patch, -1, 1)
# Run padding
padding_results = pad_nd_image(patch, new_shape=patch_shape,
mode="minimum", return_slicer=return_slicer,
kwargs=kwargs)
# Return padding results
if return_slicer:
# Transform data from channel-first back to channel-last structure
padded_patch = np.moveaxis(padding_results[0], 1, -1)
return padded_patch, padding_results[1]
else:
# Transform data from channel-first back to channel-last structure
padding_results = np.moveaxis(padding_results, 1, -1)
return padding_results
# x = center_crop_2D_image_batched(x, self.Config.INPUT_DIM)
# y = center_crop_2D_image_batched(y, self.Config.INPUT_DIM)
# If want to convert e.g. 1.25mm (HCP) image to 2mm image (bb)
# x, y = self._zoom_x_and_y(x, y, 0.67) # very slow -> try spatial_transform, should be fast
if self.Config.PAD_TO_SQUARE:
#Crop and pad to input size
x, y = crop(x, y, crop_size=self.Config.INPUT_DIM) # does not work with img with batches and channels
else:
# Works -> results as good?
# Will pad each axis to be multiple of 16. (Each sample can end up having different dimensions. Also x and y
# can be different)
# This is needed for Schizo dataset
x = pad_nd_image(x, shape_must_be_divisible_by=(16, 16), mode='constant', kwargs={'constant_values': 0})
y = pad_nd_image(y, shape_must_be_divisible_by=(16, 16), mode='constant', kwargs={'constant_values': 0})
# Does not make it slower
x = x.astype(np.float32)
y = y.astype(np.float32)
# possible optimization: sample slices from different patients and pad all to same size (size of biggest)
data_dict = {"data": x, # (batch_size, channels, x, y, [z])
"seg": y,
"slice_dir": slice_direction} # (batch_size, channels, x, y, [z])
return data_dict
# y = pad_nd_image(y, self.Config.INPUT_DIM, mode='constant', kwargs={'constant_values': 0})
# x = center_crop_2D_image_batched(x, self.Config.INPUT_DIM)
# y = center_crop_2D_image_batched(y, self.Config.INPUT_DIM)
# If want to convert e.g. 1.25mm (HCP) image to 2mm image (bb)
# x, y = self._zoom_x_and_y(x, y, 0.67) # very slow -> try spatial_transform, should be fast
if self.Config.PAD_TO_SQUARE:
#Crop and pad to input size
x, y = crop(x, y, crop_size=self.Config.INPUT_DIM) # does not work with img with batches and channels
else:
# Works -> results as good?
# Will pad each axis to be multiple of 16. (Each sample can end up having different dimensions. Also x and y
# can be different)
# This is needed for Schizo dataset
x = pad_nd_image(x, shape_must_be_divisible_by=(16, 16), mode='constant', kwargs={'constant_values': 0})
y = pad_nd_image(y, shape_must_be_divisible_by=(16, 16), mode='constant', kwargs={'constant_values': 0})
# Does not make it slower
x = x.astype(np.float32)
y = y.astype(np.float32)
# possible optimization: sample slices from different patients and pad all to same size (size of biggest)
data_dict = {"data": x, # (batch_size, channels, x, y, [z])
"seg": y,
"slice_dir": slice_direction} # (batch_size, channels, x, y, [z])
return data_dict