Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Some extend models written by contributor
Ubuntu 16.04 & PyTorch 1.0.0
Last update: KzXuan, 2019.04.09
"""
import torch
import numpy as np
import torch.nn as nn
import easy_function as ef
import torch.nn.functional as F
from copy import deepcopy
from step_print import table_print
from dnn.pytorch import base, layer, exec
from predict_analysis import predict_analysis
class RNN_diachronic_model(nn.Module, base.base):
def __init__(self, emb_matrix, args, n_time):
nn.Module.__init__(self)
base.base.__init__(self, args)
self.n_time = n_time
self.bi_direction_num = 2 if self.bi_direction else 1
out_n_hidden = self.n_hidden * self.bi_direction_num
self.drop_out = nn.Dropout(self.drop_prob)
self.embedding_layer(emb_matrix)
self.extractors = nn.ModuleList()
self.attentions = nn.ModuleList()
self.predictors = nn.ModuleList()
for _ in range(n_time):
self.extractors.append(
nn.ModuleList([layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw) for kw in range(1, 3)])
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Some common models for deep neural network
Ubuntu 16.04 & PyTorch 1.0
Last update: KzXuan, 2019.04.09
"""
import torch
import numpy as np
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
from dnn.pytorch import base, layer
class CNN_model(nn.Module, base.base):
def __init__(self, emb_matrix, args, kernel_widths):
"""
Initilize the model data and layer
* emb_matrix [np.array]: word embedding matrix
* args [dict]: all model arguments
* kernel_widths [list]: list of kernel widths for cnn kernel
"""
nn.Module.__init__(self)
base.base.__init__(self, args)
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.drop_out = nn.Dropout(self.drop_prob)
self.cnn = nn.ModuleList()
for kw in kernel_widths:
self.cnn.append(layer.CNN_layer(self.emb_dim, 1, self.n_hidden, kw))
self.predict = layer.softmax_layer(self.n_hidden * len(kernel_widths), self.n_class)
outputs = torch.reshape(outputs, [-1, max_seq_len[hi], outputs.size(-1)])
now_seq_len = torch.reshape(seq_len[hi], [-1])
if self.mode == 'classify':
if self.use_attention:
outputs = self.rnn[hi](outputs, now_seq_len, out_type='all')
outputs = self.att[hi](outputs, now_seq_len)
else:
outputs = self.rnn[hi](outputs, now_seq_len, out_type='last') # batch_size * (2)n_hidden
elif self.mode == 'sequence':
outputs = self.rnn[hi](outputs, now_seq_len, out_type='all') # batch_size * max_seq_len * (2)n_hidden
pred = self.predict(outputs)
return pred
class transformer_model(nn.Module, base.base):
def __init__(self, emb_matrix, args):
"""
Initilize the model data and layer
* emb_matrix [np.array]: word embedding matrix
* args [dict]: all model arguments
"""
nn.Module.__init__(self)
base.base.__init__(self, args)
self.emb_mat = layer.embedding_layer(emb_matrix, self.emb_type)
self.pos_emb_mat = layer.positional_embedding_layer(self.n_hidden)
self.drop_out = nn.Dropout(self.drop_prob)
self.transformer = nn.ModuleList([
layer.transformer_layer(self.emb_dim, self.n_hidden, self.n_head) for _ in range(self.n_layer)
])
self.predict = layer.softmax_layer(self.emb_dim, self.n_class)