Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def create_reader(path, is_training, input_dim, label_dim):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features = C.io.StreamDef(field='x', shape=input_dim, is_sparse=True),
labels = C.io.StreamDef(field='y', shape=label_dim, is_sparse=False)
)), randomize=is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
scale_mode='fill'),
]
else:
# TODO resize bounding boxes
transforms += [
xforms.scale(width=img_width, height=img_height, channels=img_channels, interpolations='linear',
scale_mode='pad', pad_value=114),
]
image_source = ImageDeserializer(image_file, StreamDefs(features=StreamDef(field='image', transforms=transforms)))
# read rois and labels
roi_source = CTFDeserializer(roi_file, StreamDefs(label=StreamDef(field='rois', shape=output_size)))
rc = MinibatchSource([image_source, roi_source], randomize=False, trace_level=TraceLevel.Error,
multithreaded_deserializer=multithreaded_deserializer, max_samples=max_samples)#, max_epochs=max_epochs)
return rc
# read images
nrImages = len(readTable(map_file))
transforms = [scale(width=img_width, height=img_height, channels=3,
scale_mode="pad", pad_value=114, interpolations='linear')]
image_source = ImageDeserializer(map_file, StreamDefs(features = StreamDef(field='image', transforms=transforms)))
# read rois and labels
rois_dim = 4 * n_rois
label_dim = n_classes * n_rois
roi_source = CTFDeserializer(roi_file, StreamDefs(
rois = StreamDef(field='rois', shape=rois_dim, is_sparse=False)))
label_source = CTFDeserializer(label_file, StreamDefs(
roiLabels = StreamDef(field='roiLabels', shape=label_dim, is_sparse=False)))
# define a composite reader
mb = MinibatchSource([image_source, roi_source, label_source], max_samples=sys.maxsize, randomize=randomize)
return (mb, nrImages)
def create_reader(path, is_training, input_dim, label_dim):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features=C.io.StreamDef(field='features', shape=input_dim),
labels=C.io.StreamDef(field='labels', shape=label_dim)
)), randomize=is_training, max_sweeps=C.io.INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features = StreamDef(field='features', shape=input_dim, is_sparse=False),
labels = StreamDef(field='labels', shape=label_dim, is_sparse=False)
)), randomize=False, max_sweeps = INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim):
return cntk.io.MinibatchSource(cntk.io.CTFDeserializer(path, cntk.io.StreamDefs(
features = cntk.io.StreamDef(field='features', shape=input_dim),
labels = cntk.io.StreamDef(field='labels', shape=label_dim)
)), randomize=is_training, max_sweeps = cntk.io.INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features=StreamDef(field='features', shape=input_dim, is_sparse=False),
labels=StreamDef(field='labels', shape=label_dim, is_sparse=False)
)), randomize=is_training, epoch_size=INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP)
raise RuntimeError("File '%s' does not exist." %
(map_file))
# transformation pipeline for the features has jitter/crop only when training
transforms = []
if train:
transforms += [
xforms.crop(crop_type='randomside', side_ratio=0.8, jitter_type='uniratio') # train uses jitter
]
transforms += [
xforms.scale(width=par_image_width, height=par_image_height, channels=par_num_channels, interpolations='linear')
]
# deserializer
return cntk.io.MinibatchSource(
cntk.io.ImageDeserializer(map_file, cntk.io.StreamDefs(
features = cntk.io.StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
labels = cntk.io.StreamDef(field='label', shape=par_num_classes))), # and second as 'label'
randomize=train,
max_samples=total_number_of_samples,
multithreaded_deserializer = True)
def create_mb_source(map_file, image_width, image_height, num_channels, num_classes, boTrain):
transforms = []
if boTrain:
# Scale to square-sized image. without this the cropping transform would chop the larger dimension of an
# image to make it squared, and then take 0.9 crops from within the squared image.
transforms += [xforms.scale(width=2*image_width, height=2*image_height, channels=num_channels,
interpolations='linear', scale_mode='pad', pad_value=114)]
transforms += [xforms.crop(crop_type='randomside', side_ratio=0.9, jitter_type='uniratio')] # Randomly crop square area
transforms += [xforms.scale(width=image_width, height=image_height, channels=num_channels, # Scale down and pad
interpolations='linear', scale_mode='pad', pad_value=114)]
if boTrain:
transforms += [xforms.color(brightness_radius=0.2, contrast_radius=0.2, saturation_radius=0.2)]
return MinibatchSource(ImageDeserializer(map_file, StreamDefs(
features = StreamDef(field='image', transforms=transforms),
labels = StreamDef(field='label', shape=num_classes))),
randomize = boTrain,
multithreaded_deserializer=True)