How to use the tflearn.regression function in tflearn

To help you get started, we’ve selected a few tflearn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tflearn / tflearn / tests / test_layers.py View on Github external
def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
github tflearn / tflearn / tests / test_layers.py View on Github external
def test_regression_placeholder(self):
        '''
        Check that regression does not duplicate placeholders
        '''

        with tf.Graph().as_default():

            g = tflearn.input_data(shape=[None, 2])
            g_nand = tflearn.fully_connected(g, 1, activation='linear')
            with tf.name_scope("Y"):
                Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
            tflearn.regression(g_nand, optimizer='sgd',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression1",
                               name="Y")
            # for this test, just use the same default trainable_vars
            # in practice, this should be different for the two regressions
            tflearn.regression(g_nand, optimizer='adam',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression2",
                               name="Y")

            self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
github fabriciojoc / malware-machinelearning / classification_year_gw.py View on Github external
elif classifier == "linearsvc":
        clf = LinearSVC()
    elif classifier == "knn":
        clf = KNeighborsClassifier(5)
    elif classifier == 'decisiontree':
        clf = DecisionTreeClassifier()
    elif classifier == 'randomforest':
        clf = RandomForestClassifier()
    elif classifier == 'mlp':
        labels_train = hot_encondig(labels_train)
        # Building deep neural network
        net = tflearn.input_data(shape=[None, X_train.shape[1]])
        net = tflearn.fully_connected(net, X_train.shape[1]/2, activation='relu')
        net = tflearn.fully_connected(net, X_train.shape[1]/3, activation='relu')
        net = tflearn.fully_connected(net, 2, activation='softmax')
        net = tflearn.regression(net)
        # Training
        clf = tflearn.DNN(net, tensorboard_verbose=0)

    clf.fit(X_train, labels_train)

    fprs = []
    fnrs = []
    thresholds = []
    if THRESHOLD:
        if classifier == 'mlp':
            pred = clf.predict(X_test)
        else:
            pred = clf.predict_proba(X_test)
        for t in np.arange(0,1,0.0005):
            pred_t = prob_to_class_threshold(pred, t)
            print "Threshold:",t
github matthewswogger / Surge-Forecast-with-RNN / run_model.py View on Github external
for i, _ in enumerate(X):
    time_chunk = X[i:i+STEPS_OF_HISTORY,:]
    if time_chunk.shape == (STEPS_OF_HISTORY, FEATURES):
        my_x[i,:,:] = time_chunk
    else:
        my_x[i,:,:] = np.ones((STEPS_OF_HISTORY,FEATURES))

trainX, testX = my_x[:SPLIT,:], my_x[SPLIT:,:]
trainY, testY = y[:SPLIT,:], y[SPLIT:,:]

# Build my neural net
net = tf.input_data(shape=[None, STEPS_OF_HISTORY, FEATURES])
net = tf.lstm(net, n_units=128, activation='softsign', return_seq=False)
net = tf.fully_connected(net, FEATURES, activation='linear')
net = tf.regression(net, optimizer='sgd', loss='mean_square', learning_rate=0.3)

# Define model
model = tf.DNN(net, clip_gradients=0.0, tensorboard_verbose=0)

# Training
# EPOCHS = 10
# epochs_performed = 0
# for _ in xrange(20):
#     # Fit model
#     model.fit(trainX, trainY, n_epoch=EPOCHS, validation_set=0.1, batch_size=128)
#     # Save model
#     epochs_performed += 10
#     model.save("saved_model/{}_epoch_act_softsign_nunits_128.tfl".format(epochs_performed))

# Load a model
model.load("saved_model/150_epoch_act_softsign_nunits_128.tfl")
github kengz / openai_lab / nn / gen_shakespeare_lstm.py View on Github external
def run():
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_shakespeare')

    for i in range(50):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='shakespeare')
        print("-- TESTING...")
        print("-- Test with temperature of 1.0 --")
        print(m.generate(600, temperature=1.0, seq_seed=seed))
        print("-- Test with temperature of 0.5 --")
github kengz / openai_lab / nn / gen_cityname_lstm.py View on Github external
def run():
    # imagine cnn, the third dim is like the 'chnl'
    g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam',
                           loss='categorical_crossentropy',
                           learning_rate=0.001)

    m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                  seq_maxlen=maxlen,
                                  clip_gradients=5.0,
                                  checkpoint_path='models/model_us_cities')

    for i in range(40):
        seed = random_sequence_from_textfile(path, maxlen)
        m.fit(X, Y, validation_set=0.1, batch_size=128,
              n_epoch=1, run_id='us_cities')
        print("-- TESTING...")
        print("-- Test with temperature of 1.2 --")
        print(m.generate(30, temperature=1.2, seq_seed=seed))
        print("-- Test with temperature of 1.0 --")
github Sentdex / pygta5 / models.py View on Github external
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
github trailbehind / DeepOSM / src / single_layer_network.py View on Github external
elif neural_net_type == 'two_layer_relu_conv':
        network = conv_2d(network, 64, 12, strides=4, activation='relu')
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation='relu')
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation='softmax')

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(
        learning_rate=.005, momentum=0.9,
        lr_decay=0.0002, name='Momentum')

    net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')

    return tflearn.DNN(net, tensorboard_verbose=0)
github nimeshabuddhika / Tensorflow-Chatbot / Bot / ChatBot.py View on Github external
if self.instance is not None:
            raise ValueError("Did you forgot to call getBot function ? ")

        self.stemmer = LancasterStemmer()
        data = pickle.load(open(path.getPath('trained_data'), "rb"))
        self.words = data['words']
        self.classes = data['classes']
        train_x = data['train_x']
        train_y = data['train_y']
        with open(path.getJsonPath()) as json_data:
            self.intents = json.load(json_data)
        net = tflearn.input_data(shape=[None, len(train_x[0])])
        net = tflearn.fully_connected(net, 8)
        net = tflearn.fully_connected(net, 8)
        net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
        net = tflearn.regression(net)
        self.model = tflearn.DNN(net, tensorboard_dir=path.getPath('train_logs'))
        self.model.load(path.getPath('model.tflearn'))
github tflearn / tflearn / examples / others / recommender_wide_and_deep.py View on Github external
trainable_vars = tf.trainable_variables()
        tv_deep = [v for v in trainable_vars if v.name.startswith('deep_')]
        tv_wide = [v for v in trainable_vars if v.name.startswith('wide_')]

        if self.verbose:
            print ("DEEP trainable_vars")
            for v in tv_deep:
                print ("  Variable %s: %s" % (v.name, v))
            print ("WIDE trainable_vars")
            for v in tv_wide:
                print ("  Variable %s: %s" % (v.name, v))

        if 'wide' in self.model_type:
            if not 'deep' in self.model_type:
                tv_wide.append(central_bias)
            tflearn.regression(wide_network_with_bias, 
                               placeholder=Y_in,
                               optimizer='sgd', 
                               #loss='roc_auc_score',
                               loss='binary_crossentropy',
                               metric="accuracy",
                               learning_rate=learning_rate[0],
                               validation_monitors=vmset,
                               trainable_vars=tv_wide,
                               op_name="wide_regression",
                               name="Y")

        if 'deep' in self.model_type:
            if not 'wide' in self.model_type:
                tv_wide.append(central_bias)
            tflearn.regression(deep_network_with_bias, 
                               placeholder=Y_in,