Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _get_batch(self):
"""
Load data/label from dataset
"""
batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))
batch_label = []
for i in range(self.batch_size):
if (self._current + i) >= self._size:
if not self.is_train:
continue
# use padding from middle in each epoch
idx = (self._current + i + self._size // 2) % self._size
index = self._index[idx]
else:
index = self._index[self._current + i]
# index = self.debug_index
im_path = self._imdb.image_path_from_index(index)
with open(im_path, 'rb') as fp:
img_content = fp.read()
img = mx.img.imdecode(img_content)
#print(img)
def test_sync_push_pull():
nrepeat = 2
for i in range(nrepeat):
kv.push(3, mx.nd.ones(shape)*(my_rank+1))
kv.push(99, mx.nd.ones(big_shape)*(my_rank+1))
kv._wait([3, 99])
num = (nworker + 1 ) * nworker * rate / 2 * nrepeat + 1
val = mx.nd.zeros(shape)
kv.pull(3, out = val)
check_diff_to_scalar(val, num)
val2 = mx.nd.zeros(big_shape)
kv.pull(99, out = val2)
check_diff_to_scalar(val2, num)
# print val.asnumpy()
def test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:
"""
Test to check that maximizing the likelihood recovers the parameters
"""
# test instance
mu, b = mu_b
# generate samples
mus = mx.nd.zeros((NUM_SAMPLES,)) + mu
bs = mx.nd.zeros((NUM_SAMPLES,)) + b
laplace_distr = Laplace(mu=mus, b=bs)
samples = laplace_distr.sample()
init_biases = [
mu - START_TOL_MULTIPLE * TOL * mu,
inv_softplus(b + START_TOL_MULTIPLE * TOL * b),
]
mu_hat, b_hat = maximum_likelihood_estimate_sgd(
LaplaceOutput(), samples, hybridize=hybridize, init_biases=init_biases
)
assert (
np.abs(mu_hat - mu) < TOL * mu
), f"mu did not match: mu = {mu}, mu_hat = {mu_hat}"
def test_module_initializer():
def regression_model(m):
x = mx.symbol.var("data", stype='csr')
v = mx.symbol.var("v", shape=(m, 1), init=mx.init.Uniform(scale=.1),
stype='row_sparse')
model = mx.symbol.dot(lhs=x, rhs=v)
y = mx.symbol.Variable("label")
model = mx.symbol.LinearRegressionOutput(data=model, label=y, name="out")
return model
n, m = 128, 100
model = regression_model(m)
data = mx.nd.zeros(shape=(n, m), stype='csr')
label = mx.nd.zeros((n, 1))
iterator = mx.io.NDArrayIter(data=data, label={'label':label},
batch_size=n, last_batch_handle='discard')
# create module
mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])
mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)
mod.init_params()
v = mod._arg_params['v']
assert(v.stype == 'row_sparse')
assert(np.sum(v.asnumpy()) != 0)
def _init2(shape, dtype, ctx, ids):
return 2 + mx.nd.zeros(shape, dtype=dtype, ctx=ctx)
g.set_n_initializer(_init2, 'h')
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
def _loss_mask_LP(self, label_batch, gpu_index):
"""Generate training targets given predictions and label_batch.
label_batch: bs*object*[class, cent_y, cent_x, box_h, box_w, rotate]
"""
bs = label_batch.shape[0]
ctx = self.ctx[gpu_index]
h_ = self.size[0] / 2**self.num_downsample
w_ = self.size[1] / 2**self.num_downsample
score = nd.zeros((bs, h_, w_, 1), ctx=ctx)
mask = nd.zeros((bs, h_, w_, 1), ctx=ctx)
pose_xy = nd.zeros((bs, h_, w_, 2), ctx=ctx)
pose_z = nd.zeros((bs, h_, w_, 1), ctx=ctx)
pose_r = nd.zeros((bs, h_, w_, 3), ctx=ctx)
LP_class = nd.zeros((bs, h_, w_, self.LP_num_class), ctx=ctx)
for b in range(bs):
for L in label_batch[b]: # all object in the image
if L[0] < 0:
continue
else:
(h_f, w_f), p_6D = self._find_best_LP(L, gpu_index)
score[b, h_f, w_f, :] = 1.0 # others are zero
mask[b, h_f, w_f, :] = 1.0 # others are zero
pose_xy[b, h_f, w_f, :] = p_6D[:2]
pose_z[b, h_f, w_f, :] = p_6D[2]
pose_r[b, h_f, w_f, :] = p_6D[3:]
LP_class[b, h_f, w_f, L[-1]] = 1
aux_params = None
#print(aaa)
#print(aux_shape)
arg_shape_dict = dict(zip(arg_name, arg_shape))
aux_shape_dict = dict(zip(aux_name, aux_shape))
#print(aux_shape)
#print(aux_params)
#print(arg_shape_dict)
for k,v in arg_shape_dict.iteritems():
if k.startswith('conv') and k.endswith('_weight'):
if not k.find('_1_')>=0:
if num_layers<100:
arg_params[k] = mx.random.normal(0, 0.01, shape=v)
print('init', k)
if k.endswith('_bias'):
arg_params[k] = mx.nd.zeros(shape=v)
print('init', k)
return arg_params, aux_params
def eval(data_source, ctx):
total_L = 0.0
ntotal = 0
hidden_states = [
model.begin_state(func=mx.nd.zeros, batch_size=int(args.batch_size/len(ctx)), ctx=ctx[i])
for i in range(len(ctx))
]
for i in range(0, data_source.shape[0] - 1, args.bptt):
data_batch, target_batch = get_batch(data_source, i)
data = gluon.utils.split_and_load(data_batch, ctx_list=ctx, batch_axis=1)
target = gluon.utils.split_and_load(target_batch, ctx_list=ctx, batch_axis=1)
for (d, t) in zip(data, target):
hidden = hidden_states[d.context.device_id]
output, hidden = model(d, hidden)
L = loss(output, t.reshape((-1,)))
total_L += mx.nd.sum(L).asscalar()
ntotal += L.size
return total_L / ntotal
def init_weight_rcnn(self, cfg, arg_params, aux_params):
'''arg_params['conv_new_1_bn_beta'] = mx.nd.zeros(shape=self.arg_shape_dict['conv_new_1_bn_beta'])
arg_params['conv_new_1_bn_gamma'] = mx.nd.ones(shape=self.arg_shape_dict['conv_new_1_bn_gamma'])
aux_params['conv_new_1_bn_moving_mean'] = mx.nd.zeros(shape=self.aux_shape_dict['conv_new_1_bn_moving_mean'])
aux_params['conv_new_1_bn_moving_var'] = mx.nd.ones(shape=self.aux_shape_dict['conv_new_1_bn_moving_var'])'''
arg_params['conv5_x__1_c3x3-b__offset_weight'] = mx.nd.zeros(
shape=self.arg_shape_dict['conv5_x__1_c3x3-b__offset_weight'])
arg_params['conv5_x__1_c3x3-b__offset_bias'] = mx.nd.zeros(
shape=self.arg_shape_dict['conv5_x__1_c3x3-b__offset_bias'])
arg_params['conv5_x__2_c3x3-b__offset_weight'] = mx.nd.zeros(
shape=self.arg_shape_dict['conv5_x__2_c3x3-b__offset_weight'])
arg_params['conv5_x__2_c3x3-b__offset_bias'] = mx.nd.zeros(
shape=self.arg_shape_dict['conv5_x__2_c3x3-b__offset_bias'])
arg_params['conv5_x__3_c3x3-b__offset_weight'] = mx.nd.zeros(
shape=self.arg_shape_dict['conv5_x__3_c3x3-b__offset_weight'])
arg_params['conv5_x__3_c3x3-b__offset_bias'] = mx.nd.zeros(
shape=self.arg_shape_dict['conv5_x__3_c3x3-b__offset_bias'])
arg_params['conv_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['conv_new_1_weight'])
arg_params['conv_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['conv_new_1_bias'])
arg_params['offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['offset_weight'])
arg_params['offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['offset_bias'])
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])