Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, s_date, n_frame):
self.n_epoch = 20
prev_bd = int(s_date[:6])-1
prev_ed = int(s_date[9:15])-1
if prev_bd%100 == 0: prev_bd -= 98
if prev_ed%100 == 0: prev_ed -= 98
pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date
tf.reset_default_graph()
tflearn.init_graph(gpu_memory_fraction=0.1)
input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
dense1n = tflearn.batch_normalization(dense1, name='BN1')
dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
dense2n = tflearn.batch_normalization(dense2, name='BN2')
dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
output = tflearn.single_unit(dense3)
regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
metric='R2', learning_rate=0.001)
self.estimators = tflearn.DNN(regression)
if os.path.exists('%s/model.tfl' % prev_model):
self.estimators.load('%s/model.tfl' % prev_model)
self.n_epoch = 10
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
net = tflearn.conv_2d(net, 256, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.fully_connected(net, 4096, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 4096, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 17, activation='softmax')
net = tflearn.regression(net, optimizer='rmsprop',
loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.DNN(net, checkpoint_path='models/vgg_net',
max_checkpoints=1, tensorboard_verbose=3)
m.fit(X, Y, n_epoch=500, shuffle=True,
show_metric=True, batch_size=32, snapshot_step=500,
snapshot_epoch=False, run_id='vgg_net')
m.save('models/vgg_net.tfl')
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper(n_history=n_history, epoch=10, squeeze=True, use_sparse_labels=True)
rnn.fit(28, 10, generator, n_iter=28)
print("Time Cost: {}".format(time.time() - t))
if draw:
rnn.draw_err_logs()
print("=" * 60, "\n" + "Tflearn", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, 28, 28])
net = tf.concat(tflearn.lstm(net, 128, return_seq=True)[-n_history:], axis=1)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='adam', batch_size=64,
loss='categorical_crossentropy', name="output1")
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)
print("Time Cost: {}".format(time.time() - t))
def addOutputs(self):
'''
Adds output layers to the graph
'''
# Outputs
w_init = tflearn.initializations.xavier()
self.outputs = tflearn.fully_connected(self.features, self.c.outputs, weights_init=w_init)
self.maxOutputs = tf.reduce_max(self.outputs, axis=1)
self.outputsIndices = tf.placeholder('int32', [None, None], 'outputsIndices')
self.outputsUsingIndices = tf.gather_nd(self.outputs, self.outputsIndices)
self.actions = tf.argmax(self.outputs, axis=1)
# SimHash Add-on
self.A = tf.get_variable('A', [self.K, 1024], tf.float32,\
tf.random_normal_initializer(stddev=1.0))
self.simHash = tf.sign(tf.matmul(self.A, self.features, transpose_b=True))
def generator(x, reuse=False):
with tf.variable_scope('Generator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
return x
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
x = tflearn.dropout(x, 0.5, name='dropout1')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
x = tflearn.dropout(x, 0.5, name='dropout2')
#x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8')
x = tflearn.fully_connected(x, 100, activation='softmax', scope='fc8', restore=False)
return x
def create_discriminator_network(self):
with tf.variable_scope(self.scope + '-gan_d'):
inputs = tflearn.input_data(shape=[None, FEATURE_NUM])
net = tflearn.fully_connected(inputs, 128, activation='relu')
net = tflearn.fully_connected(inputs, 64, activation='relu')
out = tflearn.fully_connected(net, 1, activation='linear')
return inputs, out
lambda: deconv_value_est_reward)
tf_aux_pc_nstep_rewards = tf_util.nstep_rewards_nd(reward_pixel_difference, estimated_reward,
self.aux_pc_q_discount, rewards_shape=[20, 20])
aux_pixel_loss_not_agg = tf_flatten(tf_aux_pc_nstep_rewards - deconv_q_s_a)
# not sure if original paper uses mse or mse * 0.5
# TODO: not sure if gradients are summed or meaned
aux_pixel_loss_weight_placeholder = tf.placeholder(tf.float32)
aux_pixel_loss = tf.reduce_sum(tf.reduce_mean(tf.square(aux_pixel_loss_not_agg), axis=1))
aux_pixel_summaries = tf.summary.merge([reward_pixel_diff_summary, deconv_value_summary,
deconv_advantage_summary, tf.summary.scalar('aux-pixel-loss', aux_pixel_loss)])
with tf.name_scope('reward-prediction'):
cnn_encoding = tf.reshape(cnn_encoding, (1, 3*32*9*9))
rp_fc4 = tflearn.fully_connected(cnn_encoding, 128, activation='relu', scope='rp-fc4')
reward_prediction = tflearn.fully_connected(rp_fc4, 3, activation='softmax', scope='reward-pred-output')
# TODO: this is hack because rewards are clipped to -1 and 1
one_hot_reward_classes = tf.one_hot(tf.cast(x_rewards, tf.int32) + 1, 3, on_value=1.0, off_value=0.0, dtype=tf.float32)
rp_loss = tf.reduce_sum(tflearn.categorical_crossentropy(reward_prediction, one_hot_reward_classes))
reward_prediction_loss_summary = tf.summary.scalar('reward-prediction-loss', rp_loss)
# optimizer
with tf.name_scope('shared-optimizer'):
tf_learning_rate = tf.placeholder(tf.float32)
optimizer = self._optimizer_fn(learning_rate=tf_learning_rate)
# only train the network vars
with tf.name_scope('compute-clip-grads'):
gradients = optimizer.compute_gradients(total_loss)
clipped_grads_tensors = tf_util.global_norm_clip_grads_vars(gradients, self.global_norm_clipping)
tf_train_step = optimizer.apply_gradients(clipped_grads_tensors)
# summarizer.summarize_gradients(clipped_grads_tensors)
# TODO: it's unknown whether we keep the same rmsprop vars for auxiliary tasks
# BasicLSTMCell lists state size as tuple so we need to pass tuple into dynamic_rnn
lstm_state_size = tuple([[1, x] for x in l_lstm.state_size])
# has to specifically be the same type tf.python.ops.rnn_cell.LSTMStateTuple
from tensorflow.python.ops.nn import rnn_cell as _rnn_cell
initial_lstm_state = _rnn_cell.LSTMStateTuple(tf.placeholder(tf.float32, shape=lstm_state_size[0], name='initial_lstm_state1'),
tf.placeholder(tf.float32, shape=lstm_state_size[1], name='initial_lstm_state2'))
# dynamically get the sequence length
sequence_length = tf.reshape(tf.shape(l_hid3)[0], [1])
l_lstm4, new_lstm_state = tf.nn.dynamic_rnn(l_lstm, l_hid3_reshape,
initial_state=initial_lstm_state, sequence_length=sequence_length,
time_major=False, scope='lstm4')
# reshape lstm back to (batch_size, 256)
l_lstm4_reshape = tf.reshape(l_lstm4, [-1, 256])
actor_out = tflearn.fully_connected(l_lstm4_reshape, output_num, activation='softmax', scope='actorout')
critic_out = tflearn.fully_connected(l_lstm4_reshape, 1, activation='linear', scope='criticout')
return actor_out, critic_out, initial_lstm_state, new_lstm_state