Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
pad=False,
strides=1,
bias=True,
init_bias=0,
op_name='BinaryConvolution', name=''):
""" arguments:
operand: tensor to convolve
filter_shape: tuple indicating filter size
num_filters: number of filters to use
channels: number of incoming channels
init: type of initialization to use for weights
"""
kernel_shape = (num_filters, channels) + filter_shape
W = C.parameter(shape=kernel_shape, init=init, name="filter")
binary_convolve_operand_p = C.placeholder(operand.shape, operand.dynamic_axes, name="operand")
binary_convolve = C.convolution(CustomMultibit(W, 1), CustomMultibit(binary_convolve_operand_p, 1), auto_padding=[False, pad, pad], strides=[strides])
r = C.as_block(binary_convolve, [(binary_convolve_operand_p, operand)], 'binary_convolve')
bias_shape = (num_filters, 1, 1)
b = C.parameter(shape=bias_shape, init=init_bias, name="bias")
r = r + b
# apply learnable param relu
P = C.parameter(shape=r.shape, init=init, name="prelu")
r = C.param_relu(P, r)
return r
def input_layer(self,cgw,cc,qgw,qc,qnw,cnw):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=self.elmo_dim + self.hidden_dim + self.convs,
highway_layers=self.highway_layers)(embedded)
highway_drop = C.layers.Dropout(self.dropout)(highway)
processed = OptimizedRnnStack(self.hidden_dim,
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=2*self.hidden_dim, highway_layers=self.highway_layers)(embedded)
highway_drop = C.layers.Dropout(self.dropout)(highway)
processed = OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='input_rnn')(highway_drop)
qce = C.one_hot(qc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=2*self.hidden_dim, highway_layers=self.highway_layers)(embedded)
highway_drop = C.layers.Dropout(self.dropout)(highway)
processed = OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='input_rnn')(highway_drop)
qce = C.one_hot(qc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
cce = C.one_hot(cc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
q_processed = processed.clone(C.CloneMethod.share, {input_chars:qce, input_glove_words:qgw_ph, input_nonglove_words:qnw_ph})
c_processed = processed.clone(C.CloneMethod.share, {input_chars:cce, input_glove_words:cgw_ph, input_nonglove_words:cnw_ph})
def lightlstm(input_dim, cell_dim):
x = C.placeholder(name='x')
dh = C.placeholder(name='dh')
dc = C.placeholder(name='dc')
x1 = C.slice(x, -1, input_dim * 0, input_dim * 1)
x2 = C.slice(x, -1, input_dim * 1, input_dim * 2)
def LSTMCell(x, y, dh, dc):
'''LightLSTM Cell'''
b = C.parameter(shape=(4 * cell_dim), init=0)
W = C.parameter(shape=(input_dim, 4 * cell_dim), init=glorot_uniform())
H = C.parameter(shape=(cell_dim, 4 * cell_dim), init=glorot_uniform())
# projected contribution from input x, hidden, and bias
proj4 = b + C.times(x, W) + C.times(dh, H)
it_proj = C.slice(proj4, -1, 0 * cell_dim, 1 * cell_dim)
bit_proj = C.slice(proj4, -1, 1 * cell_dim, 2 * cell_dim)
ft_proj = C.slice(proj4, -1, 2 * cell_dim, 3 * cell_dim)
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=2*self.hidden_dim, highway_layers=self.highway_layers)(embedded)
highway_drop = C.layers.Dropout(self.dropout)(highway)
processed = OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='input_rnn')(highway_drop)
def input_layer(self,cgw,cc,qgw,qc,qnw,cnw):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=self.elmo_dim + self.hidden_dim + self.convs,
highway_layers=self.highway_layers)(embedded)
highway_drop = C.layers.Dropout(self.dropout)(highway)
processed = OptimizedRnnStack(self.hidden_dim,
num_layers=1,
def LocalResponseNormalization(k, n, alpha, beta, name=''):
x = C.placeholder(name='lrn_arg')
x2 = C.square(x)
# reshape to insert a fake singleton reduction dimension after the 3th axis (channel axis). Note Python axis order and BrainScript are reversed.
x2s = C.reshape(x2, (1, C.InferredDimension), 0, 1)
W = C.constant(alpha/(2*n+1), (1,2*n+1,1,1), name='W')
# 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
y = C.convolution (W, x2s)
# reshape back to remove the fake singleton reduction dimension
b = C.reshape(y, C.InferredDimension, 0, 2)
den = C.exp(beta * C.log(k + b))
apply_x = C.element_divide(x, den)
return apply_x
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
highway = HighwayNetwork(dim=2*self.hidden_dim, highway_layers=self.highway_layers)(embedded)
def modeling_layer(self, attention_context):
att_context = C.placeholder(shape=(8*self.hidden_dim,))
#modeling layer
# todo: use dropout in optimized_rnn_stack from cudnn once API exposes it
mod_context = C.layers.Sequential([
C.layers.Dropout(self.dropout),
OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='model_rnn0'),
C.layers.Dropout(self.dropout),
OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='model_rnn1')])(att_context)
return C.as_block(
mod_context,
[(att_context, attention_context)],
'modeling_layer',
'modeling_layer')