Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_feed_dict_no_None(self):
X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]
with tf.Graph().as_default():
g = tflearn.input_data(shape=[None, 4], name="X_in")
g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
g = tflearn.conv_2d(g, 4, 2)
g = tflearn.conv_2d(g, 4, 1)
g = tflearn.max_pool_2d(g, 2)
g = tflearn.fully_connected(g, 2, activation='softmax')
g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)
m = tflearn.DNN(g)
def do_fit():
m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False)
self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
def test_conv_layers(self):
X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]
with tf.Graph().as_default():
g = tflearn.input_data(shape=[None, 4])
g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
g = tflearn.conv_2d(g, 4, 2, activation='relu')
g = tflearn.max_pool_2d(g, 2)
g = tflearn.fully_connected(g, 2, activation='softmax')
g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)
m = tflearn.DNN(g)
m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
# TODO: Fix test
#self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)
# Bulk Tests
with tf.Graph().as_default():
g = tflearn.input_data(shape=[None, 4])
g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
g = tflearn.conv_2d(g, 4, 2)
g = tflearn.conv_2d(g, 4, 1)
g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
g = tflearn.max_pool_2d(g, 2)
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
X = X[:20, :, :, :]
Y = Y[:20, :]
testX = testX[:10, :, :, :]
testY = testY[:10, :]
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=3)
model.fit({'input': X}, {'target': Y}, n_epoch=1,
batch_size=10,
validation_set=({'input': testX}, {'target': testY}),
validation_batch_size=5,
snapshot_step=10, show_metric=True, run_id='convnet_mnist_vbs')
data_dir='path_to_test_data'
datanp=[] #images
truenp=[] #labels
for file in os.listdir(data_dir):
data=np.load(os.path.join(data_dir,file))
datanp.append((data[0][0]))
truenp.append(data[0][1])
sh=datanp.shape
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, sh[1], sh[2], sh[3], sh[4]])
net = tflearn.conv_3d(net, 16,5,strides=2,activation='leaky_relu', padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.max_pool_3d(net, kernel_size = 3, strides=2, padding='VALID')
net = tflearn.conv_3d(net, 32,3,strides=2, padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu (net)
net = tflearn.max_pool_3d(net, kernel_size = 2, strides=2, padding='VALID')
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 1024,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.1,beta=0.1)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.6)
net = tflearn.fully_connected(net, 512,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.2,beta=0.2)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 128,weights_init='xavier',regularizer='L2')
def test_regression_placeholder(self):
'''
Check that regression does not duplicate placeholders
'''
with tf.Graph().as_default():
g = tflearn.input_data(shape=[None, 2])
g_nand = tflearn.fully_connected(g, 1, activation='linear')
with tf.name_scope("Y"):
Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
tflearn.regression(g_nand, optimizer='sgd',
placeholder=Y_in,
learning_rate=2.,
loss='binary_crossentropy',
op_name="regression1",
name="Y")
# for this test, just use the same default trainable_vars
# in practice, this should be different for the two regressions
tflearn.regression(g_nand, optimizer='adam',
placeholder=Y_in,
learning_rate=2.,
loss='binary_crossentropy',
op_name="regression2",
name="Y")
self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
sh=datanp.shape
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, sh[1], sh[2], sh[3], sh[4]])
net = tflearn.conv_3d(net, 16,5,strides=2,activation='leaky_relu', padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.max_pool_3d(net, kernel_size = 3, strides=2, padding='VALID')
net = tflearn.conv_3d(net, 32,3,strides=2, padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu (net)
net = tflearn.max_pool_3d(net, kernel_size = 2, strides=2, padding='VALID')
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 1024,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.1,beta=0.1)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.6)
net = tflearn.fully_connected(net, 512,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.2,beta=0.2)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 128,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.4,beta=0.4)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 3,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.3,beta=0.3)
net = tflearn.activations.softmax(net)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
model = tflearn.DNN(net, checkpoint_path = 'drive/model/model.tfl.ckpt',max_checkpoints=3) #model definition
ckpt='path_to_latest_checkpoint'
model.load(ckpt) #loading checkpoints
def __init__(self, s_date, n_frame):
self.n_epoch = 20
prev_bd = int(s_date[:6])-1
prev_ed = int(s_date[9:15])-1
if prev_bd%100 == 0: prev_bd -= 98
if prev_ed%100 == 0: prev_ed -= 98
pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date
tf.reset_default_graph()
tflearn.init_graph(gpu_memory_fraction=0.1)
input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
dense1n = tflearn.batch_normalization(dense1, name='BN1')
dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
dense2n = tflearn.batch_normalization(dense2, name='BN2')
dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
output = tflearn.single_unit(dense3)
regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
metric='R2', learning_rate=0.001)
self.estimators = tflearn.DNN(regression)
if os.path.exists('%s/model.tfl' % prev_model):
self.estimators.load('%s/model.tfl' % prev_model)
self.n_epoch = 10
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
net = tflearn.conv_2d(net, 256, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.fully_connected(net, 4096, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 4096, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 17, activation='softmax')
net = tflearn.regression(net, optimizer='rmsprop',
loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.DNN(net, checkpoint_path='models/vgg_net',
max_checkpoints=1, tensorboard_verbose=3)
m.fit(X, Y, n_epoch=500, shuffle=True,
show_metric=True, batch_size=32, snapshot_step=500,
snapshot_epoch=False, run_id='vgg_net')
m.save('models/vgg_net.tfl')
generator = MnistGenerator(one_hot=False)
t = time.time()
tf.reset_default_graph()
rnn = RNNWrapper(n_history=n_history, epoch=10, squeeze=True, use_sparse_labels=True)
rnn.fit(28, 10, generator, n_iter=28)
print("Time Cost: {}".format(time.time() - t))
if draw:
rnn.draw_err_logs()
print("=" * 60, "\n" + "Tflearn", "\n" + "-" * 60)
generator = MnistGenerator()
t = time.time()
tf.reset_default_graph()
net = tflearn.input_data(shape=[None, 28, 28])
net = tf.concat(tflearn.lstm(net, 128, return_seq=True)[-n_history:], axis=1)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='adam', batch_size=64,
loss='categorical_crossentropy', name="output1")
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(*generator.gen(0), n_epoch=10, validation_set=generator.gen(0, True), show_metric=True)
print("Time Cost: {}".format(time.time() - t))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample, placeholder=None, optimizer='adam',
loss=gen_loss, trainable_vars=gen_vars,
batch_size=64, name='target_gen', op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real, placeholder=None, optimizer='adam',
loss=disc_loss, trainable_vars=disc_vars,
batch_size=64, name='target_disc', op_name='DISC')
# Define GAN model, that output the generated images.
gan = tflearn.DNN(gen_model)
# Training
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[total_samples, z_dim])
# Start training, feed both noise and real images.
gan.fit(X_inputs={gen_input: z, disc_input: X},
Y_targets=None,
n_epoch=100)
# Generate images from noise, using the generator network.
f, a = plt.subplots(2, 10, figsize=(10, 4))
for i in range(10):
for j in range(2):
# Noise input.
z = np.random.uniform(-1., 1., size=[1, z_dim])
# Generate image from noise. Extend to 3 channels for matplot figure.