Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
emb_mat = w2v.get_matrix()
print("- Embedding matrix size:", emb_mat.shape)
sl.start("* Load data")
data_dict = {
'x': np.load(dir.TRAIN + "index(4519,30).npy"),
'y': np.load(dir.TRAIN + "y(4519,4).npy"),
'len': [np.load(dir.TRAIN + "len(4519,).npy")],
'tx': np.load(dir.TEST + "index(1049,30).npy"),
'ty': np.load(dir.TEST + "y(1049,4).npy"),
'tlen': [np.load(dir.TEST + "len(1049,).npy")],
}
sl.stop()
ef.print_shape(data_dict)
args = default_args()
args.emb_type = 'const'
args.emb_dim = w2v.vector_size
args.n_class = 4
args.n_hidden = 20
args.learning_rate = 0.001
args.l2_reg = 0.0
args.batch_size = 64
args.iter_times = 30
args.display_step = 1
args.drop_porb = 0.1
class_name = ['support', 'deny', 'query', 'comment']
nn = CNN_classify(data_dict, emb_mat, args, kernel_widths=[1, 2, 3], class_name=class_name)
nn.train_test()
rt.stop()
emb_mat = w2v.get_matrix()
print("- Embedding matrix size:", emb_mat.shape)
sl.start("* Load data")
data_dict = {
'x': np.load(dir.TRAIN + "index(4519,30).npy"),
'y': np.load(dir.TRAIN + "y(4519,4).npy"),
'len': [np.load(dir.TRAIN + "len(4519,).npy")],
'tx': np.load(dir.TEST + "index(1049,30).npy"),
'ty': np.load(dir.TEST + "y(1049,4).npy"),
'tlen': [np.load(dir.TEST + "len(1049,).npy")],
}
sl.stop()
ef.print_shape(data_dict)
args = default_args()
args.emb_type = 'const'
args.emb_dim = w2v.vector_size
args.n_class = 4
args.n_hidden = 50
args.learning_rate = 0.0001
args.l2_reg = 0.0
args.batch_size = 32
args.iter_times = 30
args.display_step = 1
args.drop_porb = 0.1
args.n_layer = 2
args.n_head = 8
class_name = ['support', 'deny', 'query', 'comment']
nn = transformer_classify(data_dict, emb_mat, args, class_name=class_name)
nn.train_test()
sl.start("* Load data")
data_dict = {
'x': np.load(dir.TRAIN + "index(4519,3,30).npy"),
'y': np.load(dir.TRAIN + "y(4519,3,4).npy"),
'len': [np.load(dir.TRAIN + "len_sen(4519,3).npy"), np.load(dir.TRAIN + "len_seq(4519,).npy")],
'id': ef.load_list(dir.TRAIN + "id.txt"),
'tx': np.load(dir.TEST + "index(1049,3,30).npy"),
'ty': np.load(dir.TEST + "y(1049,3,4).npy"),
'tlen': [np.load(dir.TEST + "len_sen(1049,3).npy"), np.load(dir.TEST + "len_seq(1049,).npy")],
'tid': ef.load_list(dir.TEST + "id.txt"),
}
sl.stop()
ef.print_shape(data_dict)
args = default_args()
args.emb_type = 'const'
args.emb_dim = w2v.vector_size
args.n_class = 4
args.n_hidden = 50
args.learning_rate = 0.001
args.l2_reg = 0.0
args.batch_size = 64
args.iter_times = 20
args.display_step = 1
args.drop_porb = 0.1
args.rnn_type = 'LSTM'
args.use_attention = True
class_name = ['support', 'deny', 'query', 'comment']
nn = RNN_sequence(data_dict, emb_mat, args, vote=True, class_name=class_name)
nn.train_test()
emb_mat = w2v.get_matrix()
print("- Embedding matrix size:", emb_mat.shape)
sl.start("* Load data")
data_dict = {
'x': np.load(dir.TRAIN + "index(4519,3,30).npy"),
'y': np.load(dir.TRAIN + "y(4519,4).npy"),
'len': [np.load(dir.TRAIN + "len_sen(4519,3).npy"), np.load(dir.TRAIN + "len_seq(4519,).npy")],
'tx': np.load(dir.TEST + "index(1049,3,30).npy"),
'ty': np.load(dir.TEST + "y(1049,4).npy"),
'tlen': [np.load(dir.TEST + "len_sen(1049,3).npy"), np.load(dir.TEST + "len_seq(1049,).npy")],
}
sl.stop()
ef.print_shape(data_dict)
args = default_args()
args.emb_type = 'const'
args.emb_dim = w2v.vector_size
args.n_class = 4
args.n_hidden = 50
args.learning_rate = 0.001
args.l2_reg = 0.0
args.batch_size = 64
args.iter_times = 20
args.display_step = 1
args.drop_porb = 0.1
args.rnn_type = 'GRU'
args.use_attention = True
class_name = ['support', 'deny', 'query', 'comment']
nn = RNN_classify(data_dict, emb_mat, args, class_name=class_name)
nn.train_test()
w2v = wv.load_word2vec(dir.W2V_GOOGLE, type='txt')
emb_mat = w2v.get_matrix()
print("- Embedding matrix size:", emb_mat.shape)
""" Load data """
# train data
x = torch.tensor(np.load(dir.TRAIN + "index(4519,30).npy"), dtype=torch.float, device=torch.device(0))
y = torch.tensor(np.load(dir.TRAIN + "y(4519,4).npy"), dtype=torch.int, device=torch.device(0))
l = torch.tensor(np.load(dir.TRAIN + "len(4519,).npy"), dtype=torch.int, device=torch.device(0))
# test data
tx = torch.tensor(np.load(dir.TEST + "index(1049,30).npy"), dtype=torch.float, device=torch.device(0))
ty = torch.tensor(np.load(dir.TEST + "y(1049,4).npy"), dtype=torch.int, device=torch.device(0))
tl = torch.tensor(np.load(dir.TEST + "len(1049,).npy"), dtype=torch.int, device=torch.device(0))
""" Init arguments """
args = base.default_args()
args.emb_type = 'const'
args.emb_dim = w2v.vector_size
args.n_class = 4
args.n_hidden = 50
args.learning_rate = 0.001
args.l2_reg = 0.0
args.batch_size = 128
args.iter_times = 20
args.display_step = 1
args.drop_porb = 0.2
""" Build Model """
class LSTM_model(nn.Module):
def __init__(self, emb_matrix, args):
super(LSTM_model, self).__init__()