Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
chainer.Variable: Subsampled vector of xs.
chainer.Variable: Subsampled vector of ilens.
"""
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# x: utt x frame x dim
xs = F.pad_sequence(xs)
# x: utt x 1 (input channel num) x frame x dim
xs = F.swapaxes(
xs.reshape(xs.shape[0], xs.shape[1], self.in_channel, xs.shape[2] // self.in_channel), 1, 2)
xs = F.relu(self.conv1_1(xs))
xs = F.relu(self.conv1_2(xs))
xs = F.max_pooling_2d(xs, 2, stride=2)
xs = F.relu(self.conv2_1(xs))
xs = F.relu(self.conv2_2(xs))
xs = F.max_pooling_2d(xs, 2, stride=2)
# change ilens accordingly
ilens = self.xp.array(self.xp.ceil(self.xp.array(
ilens, dtype=np.float32) / 2), dtype=np.int32)
ilens = self.xp.array(self.xp.ceil(self.xp.array(
ilens, dtype=np.float32) / 2), dtype=np.int32)
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs = F.swapaxes(xs, 1, 2)
xs = xs.reshape(xs.shape[0], xs.shape[1], xs.shape[2] * xs.shape[3])
xs = [xs[i, :ilens[i], :] for i in range(len(ilens))]
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x = inputs[0]
n, c, h, w = x.shape
kh = h // self.ksize
kw = w // self.ksize
kc = c // (self.ksize**2)
y = xp.zeros((n, kc, h, w), dtype=np.float32)
for j in xrange(self.ksize):
for i in xrange(self.ksize):
y1 = kh * j
y2 = y1 + kh
x1 = kw * i
x2 = x1 + kw
c1 = kc * (j * self.ksize + i)
c2 = c1 + kc
y[:, :, y1:y2, x1:x2] = x[:, c1:c2, y1:y2, x1:x2]
return y,
def __init__(self, size, first_pooling_size=1):
last_size = size // (2 ** 4) // first_pooling_size
super(Discriminator, self).__init__(
c0=chainer.links.Convolution2D(3, 64, 4, stride=2, pad=1),
c1=chainer.links.Convolution2D(64, 128, 4, stride=2, pad=1),
c2=chainer.links.Convolution2D(128, 256, 4, stride=2, pad=1),
c3=chainer.links.Convolution2D(256, 512, 4, stride=2, pad=1),
bn0=chainer.links.BatchNormalization(64),
bn1=chainer.links.BatchNormalization(128),
bn2=chainer.links.BatchNormalization(256),
bn3=chainer.links.BatchNormalization(512),
l0z=chainer.functions.Linear(last_size ** 2 * 512, 1, wscale=0.02 * math.sqrt(last_size ** 2 * 512)),
)
if first_pooling_size > 1:
self.first_pooling = chainer.functions.AveragePooling2D(first_pooling_size, stride=first_pooling_size)
else:
self.first_pooling = lambda x: x # through pass
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, groups, channels_per_group, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
stage = self.max_stage
stage = min(stage, self.max_stage)
alpha = stage - math.floor(stage)
stage = math.floor(stage)
z_shape = (len(z), self.n_hidden, 1, 1)
if (skip_hs is not None) and (skip_hs[-1].shape == z_shape):
h = skip_hs.pop(-1)
else:
h = chainer.functions.reshape(z, z_shape)
h = chainer.functions.leaky_relu(feature_vector_normalization(self.c0(h)))
h = chainer.functions.leaky_relu(feature_vector_normalization(self.c1(h)))
for i in range(1, int(stage // 2 + 1)):
if skip_hs is not None: # conditional
h = chainer.functions.concat([h, skip_hs[- i]], axis=1)
h = self.bs[i](h)
if int(stage) % 2 == 0:
out = self.outs[int(stage // 2)]
# print(h.shape)
x = out(h)
else:
out_prev = self.outs[stage // 2]
out_curr = self.outs[stage // 2 + 1]
b_curr = self.bs[stage // 2 + 1]
x_0 = out_prev(chainer.functions.unpooling_2d(h, 2, 2, 0, outsize=(2 * h.shape[2], 2 * h.shape[3])))
if skip_hs is not None: # conditional
skip_hs_original = skip_hs[- int(stage // 2 + 1)]
h = chainer.functions.concat([h, skip_hs_original], axis=1)
h = b_curr(h)
def forward_one(x, target, label):
# make input window vector
distance = window // 2
char_vecs = list()
x = list(x)
for i in range(distance):
x.append('')
x.insert(0,'<s>')
for i in range(-distance, distance + 1):
char = x[target + i]
char_id = char2id[char]
char_vec = model.embed(get_onehot(char_id))
char_vecs.append(char_vec)
concat = F.concat(tuple(char_vecs))
hidden = model.hidden1(F.sigmoid(concat))
pred = model.output(hidden)
correct = get_onehot(label)
return np.argmax(pred), F.softmax_cross_entropy(pred, correct)
</s>
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channels,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
def TrainUNet(Xlist, Ylist, epoch=40, savefile="unet.model"):
assert(len(Xlist) == len(Ylist))
unet = UNet()
model = UNetTrainmodel(unet)
model.to_gpu(0)
opt = optimizers.Adam()
opt.setup(model)
config.train = True
config.enable_backprop = True
itemcnt = len(Xlist)
itemlength = [x.shape[1] for x in Xlist]
subepoch = sum(itemlength) // const.PATCH_LENGTH // const.BATCH_SIZE * 4
for ep in range(epoch):
sum_loss = 0.0
for subep in range(subepoch):
X = np.zeros((const.BATCH_SIZE, 1, 512, const.PATCH_LENGTH),
dtype="float32")
Y = np.zeros((const.BATCH_SIZE, 1, 512, const.PATCH_LENGTH),
dtype="float32")
idx_item = np.random.randint(0, itemcnt, const.BATCH_SIZE)
for i in range(const.BATCH_SIZE):
randidx = np.random.randint(
def __init__(self, in_channels, out_channels,
stride=1, splits_left=2, initialW=None):
super(ShuffleNetV2Block, self).__init__()
with self.init_scope():
if stride == 2:
self.conv1 = L.Convolution2D(
in_channels, in_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn1 = L.BatchNormalization(in_channels)
self.conv2 = L.DepthwiseConvolution2D(
in_channels, 1, 3, 1, 1,
initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(in_channels)
self.conv3 = L.Convolution2D(
in_channels, out_channels // 2, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn3 = L.BatchNormalization(out_channels // 2)
self.conv4 = L.DepthwiseConvolution2D(
in_channels, 1, 3, 1, 1,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(in_channels)
self.conv5 = L.Convolution2D(
in_channels, out_channels // 2, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn5 = L.BatchNormalization(out_channels // 2)
elif stride == 1:
self.in_channels = in_channels - in_channels // splits_left
self.conv1 = L.Convolution2D(
self.in_channels, self.in_channels, 1, 1, 0,
def test_forward_gpu_test(self):
self.rnn.to_gpu()
with chainer.using_config('use_cudnn', 'always'), \
chainer.using_config('train', False):
self.check_forward(
cuda.to_gpu(self.h),
[cuda.to_gpu(x) for x in self.xs])