Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def init_weights(self):
for m in self.modules():
if isinstance(m, layer.RNN_layer):
m.init_weights()
if isinstance(m, layer.self_attention_layer):
m.init_weights()
if isinstance(m, layer.softmax_layer):
m.init_weights()
base.base.__init__(self, args)
self.n_time = n_time
self.bi_direction_num = 2 if self.bi_direction else 1
out_n_hidden = self.n_hidden * self.bi_direction_num
self.drop_out = nn.Dropout(self.drop_prob)
self.embedding_layer(emb_matrix)
self.extractors = nn.ModuleList()
self.attentions = nn.ModuleList()
self.predictors = nn.ModuleList()
for _ in range(n_time):
self.extractors.append(
nn.ModuleList([layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw) for kw in range(1, 3)])
) # index 0 -> (nt-1)
self.attentions.append(layer.self_attention_layer(out_n_hidden))
self.predictors.append(layer.softmax_layer(out_n_hidden, self.n_class)) # index 0 -> (nt-1)
self.connections = nn.ModuleList()
self.connections.append(None)
for _ in range(n_time - 1):
self.connections.append(
nn.Sequential(
nn.Linear(2 * out_n_hidden, out_n_hidden, bias=False),
nn.Sigmoid()
)
* args [dict]: all model arguments
* mode [str]: use 'classify'/'sequence' to get the result
"""
nn.Module.__init__(self)
base.base.__init__(self, args)
self.n_hierarchy = n_hierarchy
self.mode = mode
self.bi_direction_num = 2 if self.bi_direction else 1
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.drop_out = nn.Dropout(self.drop_prob)
rnn_params = (self.n_hidden, self.n_layer, self.drop_prob, self.bi_direction, self.rnn_type)
self.rnn = nn.ModuleList([layer.RNN_layer(self.emb_dim, *rnn_params)])
self.att = nn.ModuleList([layer.self_attention_layer(self.bi_direction_num * self.n_hidden)])
for _ in range(self.n_hierarchy - 1):
self.rnn.append(layer.RNN_layer(self.bi_direction_num * self.n_hidden, *rnn_params))
if self.use_attention:
self.att.append(layer.self_attention_layer(self.bi_direction_num * self.n_hidden))
self.predict = layer.softmax_layer(self.n_hidden * self.bi_direction_num, self.n_class)
base.base.__init__(self, args)
self.n_hierarchy = n_hierarchy
self.mode = mode
self.bi_direction_num = 2 if self.bi_direction else 1
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.drop_out = nn.Dropout(self.drop_prob)
rnn_params = (self.n_hidden, self.n_layer, self.drop_prob, self.bi_direction, self.rnn_type)
self.rnn = nn.ModuleList([layer.RNN_layer(self.emb_dim, *rnn_params)])
self.att = nn.ModuleList([layer.self_attention_layer(self.bi_direction_num * self.n_hidden)])
for _ in range(self.n_hierarchy - 1):
self.rnn.append(layer.RNN_layer(self.bi_direction_num * self.n_hidden, *rnn_params))
if self.use_attention:
self.att.append(layer.self_attention_layer(self.bi_direction_num * self.n_hidden))
self.predict = layer.softmax_layer(self.n_hidden * self.bi_direction_num, self.n_class)