Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
enc_node_name = "pooling_node"
input_node_name = "input_node"
output_node_name = "output_node"
# define location of output, model and data and check existence
output_path = os.path.join(abs_path, "Output")
model_file = os.path.join(model_path, model_file_name)
data_file = os.path.join(data_path, "Test-28x28_cntk_text.txt")
if not (os.path.exists(model_file) and os.path.exists(data_file)):
print("Cannot find required data or model. "
"Please get the MNIST data set and run 'cntk configFile=07_Deconvolution_BS.cntk' or 'python 07_Deconvolution_PY.py' to create the model.")
exit(0)
# create minibatch source
minibatch_source = MinibatchSource(CTFDeserializer(data_file, StreamDefs(
features = StreamDef(field='features', shape=(28*28)),
labels = StreamDef(field='labels', shape=10)
)), randomize=False, max_sweeps = 1)
# use this to print all node names in the model
# print_all_node_names(model_file, use_brain_script_model)
# load model and pick desired nodes as output
loaded_model = load_model(model_file)
output_nodes = combine(
[loaded_model.find_by_name(input_node_name).owner,
loaded_model.find_by_name(enc_node_name).owner,
loaded_model.find_by_name(output_node_name).owner])
# evaluate model save output
features_si = minibatch_source['features']
with open(os.path.join(output_path, decoder_output_file_name), 'wb') as decoder_text_file:
(map_file, roi_file, label_file))
# read images
transforms = [scale(width=img_width, height=img_height, channels=img_channels,
scale_mode="pad", pad_value=114, interpolations='linear')]
image_source = ImageDeserializer(map_file, StreamDefs(
features = StreamDef(field='image', transforms=transforms)))
# read rois and labels
roi_source = CTFDeserializer(roi_file, StreamDefs(
rois = StreamDef(field=roi_stream_name, shape=rois_dim, is_sparse=False)))
label_source = CTFDeserializer(label_file, StreamDefs(
roiLabels = StreamDef(field=label_stream_name, shape=label_dim, is_sparse=False)))
gt_source = CTFDeserializer(gt_file, StreamDefs(
gts = StreamDef(field=gt_stream_name, shape=gt_dim)))
# define a composite reader
return MinibatchSource([image_source, roi_source, label_source, gt_source], max_samples=sys.maxsize, randomize=data_set == "train", trace_level=TraceLevel.Error,)
def create_reader(map_file, is_training, randomize=False):
if not os.path.exists(map_file):
raise RuntimeError("File '%s' does not exist. Please run install_cifar10.py from DataSets/CIFAR-10 to fetch them" %
(map_file))
transforms = []
if is_training:
transforms += [
xforms.crop(crop_type='center', crop_size=32)
]
transforms += [
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear')
]
# deserializer
return ct.io.MinibatchSource(ct.io.ImageDeserializer(map_file, ct.io.StreamDefs(
features = ct.io.StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
labels = ct.io.StreamDef(field='label', shape=num_classes))), # and second as 'label'
randomize=randomize)
def create_reader(path, is_training, input_dim, label_dim):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features=StreamDef(field='x', shape=input_dim, is_sparse=True),
labels=StreamDef(field='y', shape=label_dim, is_sparse=False)
)), randomize=is_training,
max_sweeps=INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features = StreamDef(field='features', shape=input_dim, is_sparse=False),
labels = StreamDef(field='labels', shape=label_dim, is_sparse=False)
)), randomize=is_training, epoch_size = INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP)
xforms.color(brightness_radius=0.4, contrast_radius=0.4, saturation_radius=0.4)
]
else:
transforms += [
C.io.transforms.crop(crop_type='center', side_ratio=0.875) # test has no jitter
]
transforms += [
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='cubic'),
xforms.mean(mean_file)
]
# deserializer
return C.io.MinibatchSource(
C.io.ImageDeserializer(map_file, C.io.StreamDefs(
features=C.io.StreamDef(field='image', transforms=transforms), # 1st col in mapfile referred to as 'image'
labels=C.io.StreamDef(field='label', shape=num_classes))), # and second as 'label'
randomize=train,
max_samples=total_number_of_samples,
multithreaded_deserializer=True)
xforms.crop(crop_type='randomside', side_ratio=0.88671875, jitter_type='uniratio') # train uses jitter
]
else:
transforms += [
xforms.crop(crop_type='center', side_ratio=0.88671875) # test has no jitter
]
transforms += [
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
]
# deserializer
return MinibatchSource(
ImageDeserializer(map_file, StreamDefs(
features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
labels = StreamDef(field='label', shape=num_classes))), # and second as 'label'
randomize = is_training,
max_samples=total_number_of_samples,
multithreaded_deserializer = True)
# transformation pipeline for the features has jitter/crop only when training
transforms = []
if train:
transforms += [
xforms.crop(crop_type='randomside', side_ratio=0.8, jitter_type='uniratio') # train uses jitter
]
transforms += [
xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),
xforms.mean(mean_file)
]
# deserializer
return cntk.io.MinibatchSource(
cntk.io.ImageDeserializer(map_file, cntk.io.StreamDefs(
features = cntk.io.StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'
labels = cntk.io.StreamDef(field='label', shape=num_classes))), # and second as 'label'
randomize=train,
max_samples=total_number_of_samples,
multithreaded_deserializer = True)
if train:
transforms += [
xforms.crop(crop_type='randomside', side_ratio=0.8)
]
transforms += [
xforms.scale(
width=image_width,
height=image_height,
channels=num_channels,
interpolations='linear'
),
xforms.mean(mean_file)
]
return C.io.MinibatchSource(C.io.ImageDeserializer(map_file, C.io.StreamDefs(
features=C.io.StreamDef(field='image', transforms=transforms),
labels=C.io.StreamDef(field='label', shape=num_classes)
)))
def create_mb_and_map(func, data_file, polymath, randomize=True, repeat=True):
mb_source = C.io.MinibatchSource(
C.io.CTFDeserializer(
data_file,
C.io.StreamDefs(
context_g_words = C.io.StreamDef('cgw', shape=polymath.wg_dim, is_sparse=True),
query_g_words = C.io.StreamDef('qgw', shape=polymath.wg_dim, is_sparse=True),
context_ng_words = C.io.StreamDef('cnw', shape=polymath.wn_dim, is_sparse=True),
query_ng_words = C.io.StreamDef('qnw', shape=polymath.wn_dim, is_sparse=True),
answer_begin = C.io.StreamDef('ab', shape=polymath.a_dim, is_sparse=False),
answer_end = C.io.StreamDef('ae', shape=polymath.a_dim, is_sparse=False),
context_chars = C.io.StreamDef('cc', shape=polymath.word_size, is_sparse=False),
query_chars = C.io.StreamDef('qc', shape=polymath.word_size, is_sparse=False))),
randomize=randomize,
max_sweeps=C.io.INFINITELY_REPEAT if repeat else 1)
input_map = {
argument_by_name(func, 'cgw'): mb_source.streams.context_g_words,
argument_by_name(func, 'qgw'): mb_source.streams.query_g_words,
argument_by_name(func, 'cnw'): mb_source.streams.context_ng_words,
argument_by_name(func, 'qnw'): mb_source.streams.query_ng_words,
argument_by_name(func, 'cc' ): mb_source.streams.context_chars,
argument_by_name(func, 'qc' ): mb_source.streams.query_chars,
argument_by_name(func, 'ab' ): mb_source.streams.answer_begin,
argument_by_name(func, 'ae' ): mb_source.streams.answer_end
}
return mb_source, input_map