Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@under_name_scope()
def channel_shuffle(l, group):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
assert in_channel % group == 0, in_channel
l = tf.reshape(l, [-1, in_channel // group, group] + in_shape[-2:])
l = tf.transpose(l, [0, 2, 1, 3, 4])
l = tf.reshape(l, [-1, in_channel] + in_shape[-2:])
return l
@under_name_scope
def context_module(self, x, channels, name):
# see Figure 4 (SSH Context Module)
with tf.variable_scope(name):
with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
c1 = tf.layers.conv2d(x, channels // 2, name='conv1')
# upper path
c2 = tf.layers.conv2d(c1, channels // 2, name='conv2')
# lower path
c3 = tf.layers.conv2d(c1, channels // 2, name='conv3a')
c3 = tf.layers.conv2d(c3, channels // 2, name='conv3b')
return tf.concatenate([c2, c3], axis=-1)
###############################################################################
def eval_model_multithread(pred, nr_eval, get_player_fn):
"""
Args:
pred (OfflinePredictor): state -> Qvalue
"""
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
with pred.sess.as_default():
mean_score, max_score, mean_dist, max_dist = eval_with_funcs([pred] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}; Average Distance: {}; Max Distance: {}".format(mean_score, max_score, mean_dist, max_dist))
###############################################################################
class Evaluator(Callback):
def __init__(self, nr_eval, input_names, output_names,
get_player_fn, directory, files_list = None):
self.directory = directory
self.files_list = files_list
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
ds = MapDataComponent(ds, pose_flip)
ds = MapDataComponent(ds, pose_resize_shortestedge_random)
ds = MapDataComponent(ds, pose_crop_random)
ds = MapData(ds, pose_to_img)
# augs = [
# imgaug.RandomApplyAug(imgaug.RandomChooseAug([
# imgaug.GaussianBlur(max_size=3)
# ]), 0.7)
# ]
# ds = AugmentImageComponent(ds, augs)
ds = PrefetchData(ds, 1000, multiprocessing.cpu_count()-1)
else:
ds = MultiThreadMapData(ds, nr_thread=16, map_func=read_image_url, buffer_size=1000)
ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
ds = MapDataComponent(ds, pose_crop_center)
ds = MapData(ds, pose_to_img)
ds = PrefetchData(ds, 100, multiprocessing.cpu_count() // 4)
return ds
assert name in ['train', 'val', 'test']
assert datadir is not None
assert isinstance(augmentors, list)
isTrain = name == 'train'
#parallel = 1
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if isTrain:
ds = dataset.ILSVRC12(datadir, name, meta_dir=meta_dir, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warn("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, meta_dir= meta_dir, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
return ds
dtype='float32')[::-1, ::-1]
)]),
imgaug.Clip(),
imgaug.Flip(horiz=True),
imgaug.ToUint8()
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256),
imgaug.CenterCrop((input_size, input_size)),
imgaug.ToUint8()
]
ds = AugmentImageComponent(ds, augmentors, copy=False)
if do_multiprocess:
ds = PrefetchDataZMQ(ds, min(24, multiprocessing.cpu_count()))
ds = BatchData(ds, options.batch_size // options.nr_gpu, remainder=not isTrain)
return ds
assert name in ['train', 'val', 'test']
isTrain = name == 'train'
assert datadir is not None
if augmentors is None:
augmentors = fbresnet_augmentor(isTrain)
assert isinstance(augmentors, list)
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if isTrain:
ds = dataset.ILSVRC12(datadir, name, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warn("DataFlow may become the bottleneck when too few processes are used.")
ds = MultiProcessRunnerZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
ds = MultiProcessRunnerZMQ(ds, 1)
return ds
def get_augmented_speech_commands_data(subset, options,
do_multiprocess=True, shuffle=True):
isTrain = subset == 'train' and do_multiprocess
shuffle = shuffle if shuffle is not None else isTrain
ds = SpeechCommandsDataFlow(os.path.join(options.data_dir, 'speech_commands_v0.02'),
subset, shuffle, None)
if isTrain:
add_noise_func = functools.partial(_add_noise, noises=ds.noises)
ds = MapDataComponent(ds, _pad_or_clip_to_desired_sample, index=0)
ds = MapDataComponent(ds, _to_float, index=0)
if isTrain:
ds = MapDataComponent(ds, _time_shift, index=0)
ds = MapData(ds, add_noise_func)
ds = BatchData(ds, options.batch_size // options.nr_gpu, remainder=not isTrain)
if do_multiprocess:
ds = PrefetchData(ds, 4, 4)
return ds
Returns: A DataFlow which produces BGR images and labels.
See explanations in the tutorial:
http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html
"""
assert name in ['train', 'val', 'test']
isTrain = name == 'train'
assert datadir is not None
if augmentors is None:
augmentors = fbresnet_augmentor(isTrain)
assert isinstance(augmentors, list)
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if isTrain:
ds = dataset.ILSVRC12(datadir, name, shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logger.warn("DataFlow may become the bottleneck when too few processes are used.")
ds = MultiProcessRunnerZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
ds = BatchData(ds, batch_size, remainder=True)
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
builder = tfv1.saved_model.builder.SavedModelBuilder(filename)
prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_signatures,
outputs=outputs_signatures,
method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
sess, tags,
signature_def_map={signature_name: prediction_signature})
builder.save()
logger.info("SavedModel created at {}.".format(filename))