Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def create_reader(path, is_training):
return cntk.io.MinibatchSource(cntk.io.CTFDeserializer(path, cntk.io.StreamDefs(
query = cntk.io.StreamDef(field='S0', shape=vocab_size, is_sparse=True),
intent_labels = cntk.io.StreamDef(field='S1', shape=num_intents, is_sparse=True), # (used for intent classification variant)
slot_labels = cntk.io.StreamDef(field='S2', shape=num_labels, is_sparse=True)
)), randomize=is_training, max_sweeps = cntk.io.INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim):
return cntk.io.MinibatchSource(cntk.io.CTFDeserializer(path, cntk.io.StreamDefs(
features = cntk.io.StreamDef(field='features', shape=input_dim),
labels = cntk.io.StreamDef(field='labels', shape=label_dim)
)), randomize=is_training, max_sweeps = cntk.io.INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim):
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features = StreamDef(field='features', shape=input_dim, is_sparse=False),
labels = StreamDef(field='labels', shape=label_dim, is_sparse=False)
)), randomize=is_training, epoch_size = INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP)
def create_reader(path, is_training, input_dim, label_dim):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features = C.io.StreamDef(field='features', shape=input_dim),
labels = C.io.StreamDef(field='labels', shape=label_dim)
)), randomize=is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
def create_reader(path, randomize, size=INFINITELY_REPEAT):
if not os.path.exists(path):
raise RuntimeError("File '%s' does not exist." % (path))
return MinibatchSource(CTFDeserializer(path, StreamDefs(
features = StreamDef(field='S0', shape=input_vocab_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=label_vocab_dim, is_sparse=True)
)), randomize=randomize, max_samples = size)
gt_file = os.path.join(p.imgDir, "mappings", "test2007_rois_abs-xyxy_noPad_skipDif.txt")
size_file = os.path.join(p.imgDir, "mappings", "test_size_file2007.txt")
rois_file = os.path.join(p.cntkFilesDir, "test.rois.ds.txt")
# read images
transforms = [scale(width=img_width, height=img_height, channels=img_channels,
scale_mode="pad", pad_value=114, interpolations='linear')]
image_source = ImageDeserializer(map_file, StreamDefs(
features=StreamDef(field='image', transforms=transforms)))
# read rois and labels
gt_source = CTFDeserializer(gt_file, StreamDefs(
gts=StreamDef(field='roiAndLabel', shape=gt_dim, is_sparse=False)))
size_source = CTFDeserializer(size_file, StreamDefs(
size=StreamDef(field='size', shape=2, is_sparse=False)))
rois_source = CTFDeserializer(rois_file, StreamDefs(
rois = StreamDef(field='rois', shape = rois_dim, is_sparse=False)))
# define a composite reader
return MinibatchSource([image_source, gt_source, size_source, rois_source], max_samples=sys.maxsize, randomize=False,
trace_level=TraceLevel.Error)
def create_reader(path, is_training):
return cntk.io.MinibatchSource(cntk.io.CTFDeserializer(path, cntk.io.StreamDefs(
query = cntk.io.StreamDef(field='S0', shape=vocab_size, is_sparse=True),
intent_labels = cntk.io.StreamDef(field='S1', shape=num_intents, is_sparse=True), # (used for intent classification variant)
slot_labels = cntk.io.StreamDef(field='S2', shape=num_labels, is_sparse=True)
)), randomize=is_training, max_sweeps = cntk.io.INFINITELY_REPEAT if is_training else 1)
def create_reader(path, is_training, input_dim, label_dim, total_number_of_samples):
"""Define the reader for both training and evaluation action."""
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features=C.io.StreamDef(field='features', shape=input_dim),
labels=C.io.StreamDef(field='labels', shape=label_dim)
)), randomize=is_training, max_samples=total_number_of_samples)
def create_mb_and_map(func, data_file, polymath, randomize=True, repeat=True):
mb_source = C.io.MinibatchSource(
C.io.CTFDeserializer(
data_file,
C.io.StreamDefs(
context_g_words = C.io.StreamDef('cgw', shape=polymath.wg_dim, is_sparse=True),
query_g_words = C.io.StreamDef('qgw', shape=polymath.wg_dim, is_sparse=True),
context_ng_words = C.io.StreamDef('cnw', shape=polymath.wn_dim, is_sparse=True),
query_ng_words = C.io.StreamDef('qnw', shape=polymath.wn_dim, is_sparse=True),
answer_begin = C.io.StreamDef('ab', shape=polymath.a_dim, is_sparse=False),
answer_end = C.io.StreamDef('ae', shape=polymath.a_dim, is_sparse=False),
context_chars = C.io.StreamDef('cc', shape=polymath.word_size, is_sparse=False),
query_chars = C.io.StreamDef('qc', shape=polymath.word_size, is_sparse=False))),
randomize=randomize,
epoch_size=C.io.INFINITELY_REPEAT if repeat else C.io.FULL_DATA_SWEEP)
input_map = {
argument_by_name(func, 'cgw'): mb_source.streams.context_g_words,
argument_by_name(func, 'qgw'): mb_source.streams.query_g_words,
"Please run install_fastrcnn.py from Examples/Image/Detection/FastRCNN to fetch them" %
(map_file, roi_file, label_file))
# read images
transforms = [scale(width=img_width, height=img_height, channels=img_channels,
scale_mode="pad", pad_value=114, interpolations='linear')]
image_source = ImageDeserializer(map_file, StreamDefs(
features = StreamDef(field='image', transforms=transforms)))
# read rois and labels
roi_source = CTFDeserializer(roi_file, StreamDefs(
rois = StreamDef(field=roi_stream_name, shape=rois_dim, is_sparse=False)))
label_source = CTFDeserializer(label_file, StreamDefs(
roiLabels = StreamDef(field=label_stream_name, shape=label_dim, is_sparse=False)))
gt_source = CTFDeserializer(gt_file, StreamDefs(
gts = StreamDef(field=gt_stream_name, shape=gt_dim)))
# define a composite reader
return MinibatchSource([image_source, roi_source, label_source, gt_source], max_samples=sys.maxsize, randomize=data_set == "train", trace_level=TraceLevel.Error,)