Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
self.n_hierarchy = n_hierarchy
self.mode = mode
self.bi_direction_num = 2 if self.bi_direction else 1
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.drop_out = nn.Dropout(self.drop_prob)
rnn_params = (self.n_hidden, self.n_layer, self.drop_prob, self.bi_direction, self.rnn_type)
self.rnn = nn.ModuleList([layer.RNN_layer(self.emb_dim, *rnn_params)])
self.att = nn.ModuleList([layer.self_attention_layer(self.bi_direction_num * self.n_hidden)])
for _ in range(self.n_hierarchy - 1):
self.rnn.append(layer.RNN_layer(self.bi_direction_num * self.n_hidden, *rnn_params))
if self.use_attention:
self.att.append(layer.self_attention_layer(self.bi_direction_num * self.n_hidden))
self.predict = layer.softmax_layer(self.n_hidden * self.bi_direction_num, self.n_class)
def __init__(self, emb_matrix, args):
super(LSTM_model, self).__init__()
# Embedding layer
self.emb_mat = layer.embedding_layer(emb_mat, 'const')
# Drop out layer
self.drop_out = nn.Dropout(args.drop_prob)
# LSTM layer
self.lstm = layer.RNN_layer(args.emb_dim, args.n_hidden, args.n_layer,
args.drop_prob, args.bi_direction, mode="LSTM")
# SoftMax layer
bi_direction_num = 2 if args.bi_direction else 1
self.predictor = layer.softmax_layer(bi_direction_num * args.n_hidden, args.n_class)
self.n_time = n_time
self.bi_direction_num = 2 if self.bi_direction else 1
out_n_hidden = self.n_hidden * self.bi_direction_num
self.drop_out = nn.Dropout(self.drop_prob)
self.embedding_layer(emb_matrix)
self.extractors = nn.ModuleList()
self.attentions = nn.ModuleList()
self.predictors = nn.ModuleList()
for _ in range(n_time):
self.extractors.append(
nn.ModuleList([layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw) for kw in range(1, 3)])
) # index 0 -> (nt-1)
self.attentions.append(layer.self_attention_layer(out_n_hidden))
self.predictors.append(layer.softmax_layer(out_n_hidden, self.n_class)) # index 0 -> (nt-1)
self.connections = nn.ModuleList()
self.connections.append(None)
for _ in range(n_time - 1):
self.connections.append(
nn.Sequential(
nn.Linear(2 * out_n_hidden, out_n_hidden, bias=False),
nn.Sigmoid()
)
def __init__(self, emb_matrix, args, kernel_widths):
"""
Initilize the model data and layer
* emb_matrix [np.array]: word embedding matrix
* args [dict]: all model arguments
* kernel_widths [list]: list of kernel widths for cnn kernel
"""
nn.Module.__init__(self)
base.base.__init__(self, args)
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.drop_out = nn.Dropout(self.drop_prob)
self.cnn = nn.ModuleList()
for kw in kernel_widths:
self.cnn.append(layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw))
self.predict = layer.softmax_layer(self.n_hidden * len(kernel_widths), self.n_class)
def __init__(self, emb_matrix, args):
"""
Initilize the model data and layer
* emb_matrix [np.array]: word embedding matrix
* args [dict]: all model arguments
"""
nn.Module.__init__(self)
base.base.__init__(self, args)
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.pos_emb_mat = layer.positional_embedding_layer(self.n_hidden)
self.drop_out = nn.Dropout(self.drop_prob)
self.transformer = nn.ModuleList([
layer.transformer_layer(self.emb_dim, self.n_hidden, self.n_head) for _ in range(self.n_layer)
])
self.predict = layer.softmax_layer(self.emb_dim, self.n_class)