How to use the tflearn.dropout function in tflearn

To help you get started, we’ve selected a few tflearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github RishalAggarwal / 3D-Convnet-for-Alzheimer-s-Detection / 3D Convolutional Network for Alzheimer's Detection / test cnn.py View on Github external
sh=datanp.shape

tf.reset_default_graph()

net = tflearn.input_data(shape=[None, sh[1], sh[2], sh[3], sh[4]])
net = tflearn.conv_3d(net, 16,5,strides=2,activation='leaky_relu', padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.max_pool_3d(net, kernel_size = 3, strides=2, padding='VALID')
net = tflearn.conv_3d(net, 32,3,strides=2, padding='VALID',weights_init='xavier',regularizer='L2',weight_decay=0.01)
net = tflearn.normalization.batch_normalization(net)
net = tflearn.activations.leaky_relu (net)
net = tflearn.max_pool_3d(net, kernel_size = 2, strides=2, padding='VALID')
net = tflearn.dropout(net,0.5)
net = tflearn.fully_connected(net, 1024,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.1,beta=0.1)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.6)
net = tflearn.fully_connected(net, 512,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.2,beta=0.2)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 128,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.4,beta=0.4)
net = tflearn.activations.leaky_relu (net)
net = tflearn.dropout(net,0.7)
net = tflearn.fully_connected(net, 3,weights_init='xavier',regularizer='L2')
net = tflearn.normalization.batch_normalization(net,gamma=1.3,beta=0.3)
net = tflearn.activations.softmax(net)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
model = tflearn.DNN(net, checkpoint_path = 'drive/model/model.tfl.ckpt',max_checkpoints=3)                      #model definition

ckpt='path_to_latest_checkpoint'
model.load(ckpt)                                                                                                #loading checkpoints
github kengz / openai_lab / nn / gen_cityname_lstm.py View on Github external
def run():
    # imagine cnn, the third dim is like the 'chnl'
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_us_cities')

    for i in range(40):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='us_cities')
github tflearn / tflearn / examples / nlp / lstm_generator_textfile.py View on Github external
maxlen = args['length']
model_name=path.split('.')[0]  # create model name from textfile input

if not os.path.isfile(path):
    print("Couldn't find the text file. Are you sure the you passed is correct?")

X, Y, char_idx = \
    textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3)

g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                       learning_rate=0.001)

m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                              seq_maxlen=maxlen,
                              clip_gradients=5.0,
                              checkpoint_path='model_'+ model_name)

for i in range(50):
    seed = random_sequence_from_textfile(path, maxlen)
    m.fit(X, Y, validation_set=0.1, batch_size=128,
          n_epoch=1, run_id=model_name)
    print("-- TESTING...")
    if args['temp'] is not None:
        temp = args['temp'][0]
github Naresh1318 / DiagnosisPredictor / Predictor_word2vec / load_dense_fully_connected_1.py View on Github external
y = to_categorical(y, nb_classes=2)  # Convert label to categorical to train with tflearn

            # Train and test data
            X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)

            # Standardize the data
            sc = StandardScaler()
            sc.fit(X_train)
            X_test_sd = sc.transform(X_test)

            # Model
            input_layer = tflearn.input_data(shape=[None, 100], name='input')
            dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
            dropout1 = tflearn.dropout(dense1, 0.8)
            dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
            dropout2 = tflearn.dropout(dense2, 0.8)
            output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
            regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy',
                                            learning_rate=.001)

            # Define model with checkpoint (autosave)
            model = tflearn.DNN(regression, tensorboard_verbose=3)

            # load the previously trained model
            model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))

            # Find the probability of outputs
            y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
            # Find the predicted class
            y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
            # Predicted class is the 2nd column in Y_test
            Y_test_dia = Y_test[:, 1]
github pannous / tensorflow-speech-recognition / speaker_classifier_tflearn.py View on Github external
quit() # why? works on Mac?

speakers = data.get_speakers()
number_classes=len(speakers)
print("speakers",speakers)

batch=data.wave_batch_generator(batch_size=1000, source=data.Source.DIGIT_WAVES, target=data.Target.speaker)
X,Y=next(batch)


# Classification
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

net = tflearn.input_data(shape=[None, 8192]) #Two wave chunks
net = tflearn.fully_connected(net, 64)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, number_classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=100)

# demo_file = "8_Vicki_260.wav"
demo_file = "8_Bruce_260.wav"
demo=data.load_wav_file(data.path + demo_file)
result=model.predict([demo])
result=data.one_hot_to_item(result,speakers)
print("predicted speaker for %s : result = %s "%(demo_file,result)) # ~ 97% correct
github kengz / openai_lab / nn / gen_shakespeare_lstm.py View on Github external
def run():
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_shakespeare')

    for i in range(50):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='shakespeare')
github tflearn / tflearn / examples / basics / weights_loading_scope.py View on Github external
def make_core_network(network):
        dense1 = tflearn.fully_connected(network, 64, activation='tanh',
                                         regularizer='L2', weight_decay=0.001, name="dense1")
        dropout1 = tflearn.dropout(dense1, 0.8)
        dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                         regularizer='L2', weight_decay=0.001, name="dense2")
        dropout2 = tflearn.dropout(dense2, 0.8)
        softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
        return softmax
github tflearn / tflearn / tflearn / layers / conv.py View on Github external
# 1x1 Transition Conv
        if batch_norm:
            densenet = tflearn.batch_normalization(densenet)
        densenet = tflearn.activation(densenet, activation)
        densenet = conv_2d(densenet, nb_filter=growth,
                           filter_size=1,
                           bias=bias,
                           weights_init=weights_init,
                           bias_init=bias_init,
                           regularizer=regularizer,
                           weight_decay=weight_decay,
                           trainable=trainable,
                           restore=restore)
        if dropout:
            densenet = tflearn.dropout(densenet, keep_prob=dropout_keep_prob)

        # Downsampling
        if downsample:
            densenet = tflearn.avg_pool_2d(densenet, kernel_size=2,
                                           strides=downsample_strides)

    return densenet
github kengz / openai_lab / nn / extending_tensorflow_layers_cnn.py View on Github external
# for "encourage some kind of inhibition and boost the neurons with
    # relatively larger activations"
    net = tflearn.local_response_normalization(net)
    # The dropout method is introduced to prevent overfitting. At each training stage, individual nodes are either "dropped out" of the net with probability {\displaystyle 1-p} 1-p or kept with probability {\displaystyle p} p, so that a reduced network is left
    # keep_prob=0.8
    net = tflearn.dropout(net, 0.8)

    # 64 filters
    net = tflearn.conv_2d(net, 64, 3, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.local_response_normalization(net)
    net = tflearn.dropout(net, 0.8)

    # FC
    net = tflearn.fully_connected(net, 128, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 256, activation='tanh')
    net = tflearn.dropout(net, 0.8)
    net = tflearn.fully_connected(net, 10, activation='softmax')

    # --------------------------------------
    # really manual tf way
    # # Defining other ops using Tensorflow
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, Y))
    # optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    # optimizer_minop = optimizer.minimize(loss)

    # # start
    # init = tf.initialize_all_variables()

    # with tf.Session() as sess:
    #     sess.run(init)
github eleurent / make-lstm-great-again / lstm.py View on Github external
def build_model(maxlen, char_idx, checkpoint_path):
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                           learning_rate=0.001)

    return tflearn.SequenceGenerator(g, dictionary=char_idx,
                                     seq_maxlen=maxlen,
                                     clip_gradients=5.0,
                                     checkpoint_path=checkpoint_path)