How to use keras - 10 common examples

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github brjathu / deepcaps / train.py View on Github external
tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs', batch_size=args.batch_size, histogram_freq=int(args.debug), write_grads=False)
    checkpoint1 = CustomModelCheckpoint(model, args.save_dir + '/best_weights_1' + appendix + '.h5', monitor='val_capsnet_acc', 
                                        save_best_only=False, save_weights_only=True, verbose=1)

    checkpoint2 = CustomModelCheckpoint(model, args.save_dir + '/best_weights_2' + appendix + '.h5', monitor='val_capsnet_acc',
                                        save_best_only=True, save_weights_only=True, verbose=1)

    lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * 0.5**(epoch // 10))

    if(args.numGPU > 1):
        parallel_model = multi_gpu_model(model, gpus=args.numGPU)
    else:
        parallel_model = model

    if(not hard_training):
        parallel_model.compile(optimizer=optimizers.Adam(lr=args.lr), loss=[margin_loss, 'mse'], loss_weights=[1, 0.4], metrics={'capsnet': "accuracy"})
    else:
        parallel_model.compile(optimizer=optimizers.Adam(lr=args.lr), loss=[margin_loss_hard, 'mse'], loss_weights=[1, 0.4], metrics={'capsnet': "accuracy"})

    # Begin: Training with data augmentation
    def train_generator(x, y, batch_size, shift_fraction=args.shift_fraction):
        train_datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False,
                                           samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0.1,
                                           width_shift_range=0.1, height_shift_range=0.1, shear_range=0.0,
                                           zoom_range=0.1, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=True,
                                           vertical_flip=False, rescale=None, preprocessing_function=None,
                                           data_format=None)  # shift up to 2 pixel for MNIST
        train_datagen.fit(x)
        generator = train_datagen.flow(x, y, batch_size=batch_size, shuffle=True)
        while True:
            x_batch, y_batch = generator.next()
            yield ([x_batch, y_batch], [y_batch, x_batch])
github Arkidillo / generic-incremental-classifier / generic-incremental-classifier / src / svm_classifier / train-classifier.py View on Github external
labels_list = labels.tolist()



	# Create ImageDataGenerator
	aug = ImageDataGenerator(rotation_range=rotation_range, width_shift_range=width_shift_range,
							 height_shift_range=height_shift_range, shear_range=shear_range, zoom_range=zoom_range,
							 horizontal_flip=horizontal_flip, fill_mode=fill_mode)

	# Setup or Load model
	try:
		net = load_model("model.net")
	except:
		net = Net.build(width=scale_size[0], height=scale_size[1], depth=3, classes=2)
	
	opt = Adam(lr=init_lr, decay=init_lr / epochs)
	net.compile(loss="binary_crossentropy", optimizer=opt)

	# Train
	# print("Training a Linear SVM Classifier")
	net.fit_generator(aug.flow(x=np.asarray(ims_list), y=labels, batch_size=bs), steps_per_epoch=len(ims) // bs, epochs=epochs)
	# net.fit(x=np.asarray(ims_list), y=labels,epochs=epochs)

	# If feature directories don't exist, create them
	if not os.path.isdir(model_path):
	    os.makedirs(model_path)


	# im = cv2.imread("mac.jpeg")
	# im = cv2.resize(im, scale_size)
	# im = img_to_array(im)
	# im = np.expand_dims(im, axis=0)
github tatsy / keras-generative / models / cvae.py View on Github external
def build_decoder(self):
        z_inputs = Input(shape=(self.z_dims,))
        c_inputs = Input(shape=(self.num_attrs,))
        z = Concatenate()([z_inputs, c_inputs])

        w = self.input_shape[0] // (2 ** 3)
        x = Dense(w * w * 256)(z)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        x = Reshape((w, w, 256))(x)

        x = BasicDeconvLayer(filters=256, strides=(2, 2))(x)
        x = BasicDeconvLayer(filters=128, strides=(2, 2))(x)
        x = BasicDeconvLayer(filters=64, strides=(2, 2))(x)
        x = BasicDeconvLayer(filters=3, strides=(1, 1), bnorm=False, activation='tanh')(x)

        return Model([z_inputs, c_inputs], x)
github titu1994 / neural-image-assessment / utils / nasnet.py View on Github external
p1 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_1_%s' % id, kernel_initializer='he_normal')(p1)

                p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = AveragePooling2D((1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % id)(p2)
                p2 = Conv2D(filters // 2, (1, 1), padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
                            name='adjust_conv_2_%s' % id, kernel_initializer='he_normal')(p2)

                p = concatenate([p1, p2], axis=channel_dim)
                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)

        elif p._keras_shape[channel_dim] != filters:
            with K.name_scope('adjust_projection_block_%s' % id):
                p = Activation('relu')(p)
                p = Conv2D(filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % id,
                           use_bias=False, kernel_regularizer=l2(weight_decay), kernel_initializer='he_normal')(p)
                p = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                                       name='adjust_bn_%s' % id)(p)
    return p
github OlafenwaMoses / ImageAI / imageai / Detection / __init__.py View on Github external
self.__yolo_model_image_size = (416, 416)
            elif (detection_speed == "fastest"):
                self.__yolo_model_image_size = (320, 320)
            elif (detection_speed == "flash"):
                self.__yolo_model_image_size = (272, 272)

        if (self.__modelLoaded == False):
            if (self.__modelType == ""):
                raise ValueError("You must set a valid model type before loading the model.")
            elif (self.__modelType == "retinanet"):
                model = resnet50_retinanet(num_classes=80)
                model.load_weights(self.modelPath)
                self.__model_collection.append(model)
                self.__modelLoaded = True
            elif (self.__modelType == "yolov3"):
                model = yolo_main(Input(shape=(None, None, 3)), len(self.__yolo_anchors) // 3,
                                  len(self.numbers_to_names))
                model.load_weights(self.modelPath)

                hsv_tuples = [(x / len(self.numbers_to_names), 1., 1.)
                              for x in range(len(self.numbers_to_names))]
                self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
                self.colors = list(
                    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                        self.colors))
                np.random.seed(10101)
                np.random.shuffle(self.colors)
                np.random.seed(None)

                self.__yolo_input_image_shape = K.placeholder(shape=(2,))
                self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes = yolo_eval(model.output,
                                                                                       self.__yolo_anchors,
github SunQinghui / T-CONV / T-CONV / src / Models / conn-local.py View on Github external
e_taxi_id = Embedding(448, 10, embeddings_initializer='glorot_uniform')(input_5)

        mlp_input0 = concatenate([flatten, Flatten()(e_week_of_year)])
        mlp_input1 = concatenate([mlp_input0, Flatten()(e_day_of_week)])
        mlp_input2 = concatenate([mlp_input1, Flatten()(e_qhour_of_day)])
        mlp_input = concatenate([mlp_input2, Flatten()(e_taxi_id)])

        # mlp_input = Dropout(0.2)(mlp_input)
        hidden_layer = Dense(500, activation='relu', kernel_initializer='glorot_uniform')(mlp_input)

        #hidden_layer = Dropout(0.1)(hidden_layer)

        output_layer = Dense(config.tgtcls.shape[0], activation='softmax', kernel_initializer='glorot_uniform')(
            hidden_layer)

        output_1 = Lambda(dot, name='output_1')(output_layer)
        # model=Model(inputs=[inputs,inputs_e_week_of_year,inputs_e_day_of_week,inputs_e_qhour_of_day,inputs_e_taxi_id], outputs=output)
        model = Model(inputs=[input_1, input_2, input_3, input_4, input_5], outputs=output_1)
        model.compile(loss=my_loss_train, optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
        result1 = model.predict([snapshot_train, week_of_year, day_of_week, qhour_of_day, taxi_id])
        train_loss = my_loss(result1, train_dest)
        print("train reault is %s" % train_loss)
        model.fit_generator(
            train_data_generator(taxi_id, week_of_year, day_of_week, qhour_of_day, snapshot_train, train_dest,
                                 batch_size), steps_per_epoch=(train_dest.shape[0] // batch_size), epochs=3,
            validation_data=(
            [snapshot_valid, week_of_year_valid, day_of_week_valid, qhour_of_day_valid, taxi_id__valid], [valid_dest]))
        result = model.predict(
            [snapshot_valid, week_of_year_valid, day_of_week_valid, qhour_of_day_valid, taxi_id__valid])
        loss = my_loss(result, valid_dest)
        print("result is %s" % loss)
        if (math.isnan(loss)):
github deepfakes / faceswap / lib / model / nn_blocks.py View on Github external
def self_attn_block(inp, n_c, squeeze_factor=8):
    """ GAN Self Attention Block
    Code borrows from https://github.com/taki0112/Self-Attention-GAN-Tensorflow
    """
    msg = "Input channels must be >= {}, recieved nc={}".format(squeeze_factor, n_c)
    assert n_c // squeeze_factor > 0, msg
    var_x = inp
    shape_x = var_x.get_shape().as_list()

    var_f = Conv2D(n_c // squeeze_factor, 1,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_g = Conv2D(n_c // squeeze_factor, 1,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_h = Conv2D(n_c, 1, kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)

    shape_f = var_f.get_shape().as_list()
    shape_g = var_g.get_shape().as_list()
    shape_h = var_h.get_shape().as_list()
    flat_f = Reshape((-1, shape_f[-1]))(var_f)
    flat_g = Reshape((-1, shape_g[-1]))(var_g)
    flat_h = Reshape((-1, shape_h[-1]))(var_h)

    var_s = Lambda(lambda var_x: K.batch_dot(var_x[0],
                                             Permute((2, 1))(var_x[1])))([flat_g, flat_f])

    beta = Softmax(axis=-1)(var_s)
    var_o = Lambda(lambda var_x: K.batch_dot(var_x[0], var_x[1]))([beta, flat_h])
    var_o = Reshape(shape_x[1:])(var_o)
    var_o = Scale()(var_o)
github farizrahman4u / recurrentshop / tests / test_recurrent_model.py View on Github external
def test_model():
    x = Input((5,))
    h_tm1 = Input((10,))
    h = add([Dense(10)(x), Dense(10, use_bias=False)(h_tm1)])
    h = Activation('tanh')(h)
    a = Input((7, 5))

    rnn = RecurrentModel(input=x, output=h, initial_states=h_tm1, final_states=h)
    b = rnn(a)
    model = Model(a, b)

    model.compile(loss='mse', optimizer='sgd')
    model.fit(np.random.random((32, 7, 5)), np.random.random((32, 10)))
    model.predict(np.zeros((32, 7, 5)))
github keras-team / keras / tests / keras / test_sequential_model.py View on Github external
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
github onnx / onnxmltools / tests / end2end / test_single_operator_with_cntk_backend.py View on Github external
N, C, D = 2, 3, 3
        x = create_tensor(N, C)

        sub_input1 = Input(shape=(C,))
        sub_mapped1 = Dense(D)(sub_input1)
        sub_output1 = Activation('sigmoid')(sub_mapped1)
        sub_model1 = Model(inputs=sub_input1, outputs=sub_output1)

        sub_input2 = Input(shape=(C,))
        sub_mapped2 = sub_model1(sub_input2)
        sub_output2 = Activation('tanh')(sub_mapped2)
        sub_model2 = Model(inputs=sub_input2, outputs=sub_output2)

        input1 = Input(shape=(D,))
        input2 = Input(shape=(D,))
        mapped1_1 = Activation('tanh')(input1)
        mapped2_1 = Activation('sigmoid')(input2)
        mapped1_2 = sub_model1(mapped1_1)
        mapped1_3 = sub_model1(mapped1_2)
        mapped2_2 = sub_model2(mapped2_1)
        sub_sum = Add()([mapped1_3, mapped2_2])
        model = Model(inputs=[input1, input2], outputs=sub_sum)
        # coremltools can't convert this kind of model.
        self._test_one_to_one_operator_keras(model, [x, 2 * x])