Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if train_epochs:
train_t_begin = time.time()
model.fit(xs, ys, batch_size=batch_size, epochs=train_epochs)
train_t_end = time.time()
# Perform predict() burn-in.
for _ in range(_PREDICT_BURNINS):
model.predict(xs)
# Time predict() by averaging.
predict_t_begin = time.time()
for _ in range(_PREDICT_RUNS):
model.predict(xs)
predict_t_end = time.time()
# Save the model and weights.
tfjs.converters.save_keras_model(model, artifacts_dir)
# Save data about the model and benchmark results.
if train_epochs:
train_time = (train_t_end - train_t_begin) / train_epochs
else:
train_time = None
predict_time = (predict_t_end - predict_t_begin) / _PREDICT_RUNS
data = {
'name': model_name,
'description': description,
'optimizer': optimizer.__class__.__name__,
'loss': loss,
'input_shape': input_shape,
'target_shape': target_shape,
'batch_size': batch_size,
'train_epochs': train_epochs,
def testSaveKerasModel(self):
with self.test_session():
# First create a toy keras model.
model = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model, self._tmp_dir)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json')) as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
def testLoadKerasModel(self):
# Use separate tf.Graph and tf.compat.v1.Session contexts to prevent name collision.
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
model1 = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model1, self._tmp_dir)
model1_weight_values = model1.get_weights()
with tf.Graph().as_default(), tf.compat.v1.Session():
# Load the model from saved artifacts.
model2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
# Compare the loaded model with the original one.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
for model1_weight_value, model2_weight_value in zip(
model1_weight_values, model2_weight_values):
self.assertAllClose(model1_weight_value, model2_weight_value)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(self._tmp_dir, 'group*-*')))
dense2 = keras.layers.Dense(
3, use_bias=True, name='Dense2', activation='softmax')(dense1)
# pylint:disable=redefined-variable-type
model = keras.models.Model(inputs=[iris_x], outputs=[dense2])
# pylint:enable=redefined-variable-type
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(data_x, data_y, batch_size=8, epochs=epochs)
# Run prediction on the training set.
pred_ys = np.argmax(model.predict(data_x), axis=1)
true_ys = np.argmax(data_y, axis=1)
final_train_accuracy = np.mean((pred_ys == true_ys).astype(np.float32))
print('Accuracy on the training set: %g' % final_train_accuracy)
tfjs.converters.save_keras_model(model, artifacts_dir)
return final_train_accuracy
num_encoder_tokens, num_decoder_tokens,
__, target_token_index,
encoder_input_data, decoder_input_data, decoder_target_data) = read_data()
(encoder_inputs, encoder_states, decoder_inputs, decoder_lstm,
decoder_dense, model) = seq2seq_model(
num_encoder_tokens, num_decoder_tokens, FLAGS.latent_dim)
# Run training.
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=FLAGS.batch_size,
epochs=FLAGS.epochs,
validation_split=0.2)
tfjs.converters.save_keras_model(model, FLAGS.artifacts_dir)
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(FLAGS.latent_dim,))
decoder_state_input_c = Input(shape=(FLAGS.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
'index_from': INDEX_FROM,
'max_len': FLAGS.max_len,
'model_type': FLAGS.model_type,
'vocabulary_size': FLAGS.vocabulary_size,
'embedding_size': FLAGS.embedding_size,
'epochs': FLAGS.epochs,
'batch_size': FLAGS.batch_size,
}
if not os.path.isdir(FLAGS.artifacts_dir):
os.makedirs(FLAGS.artifacts_dir)
metadata_json_path = os.path.join(FLAGS.artifacts_dir, 'imdb.metadata.json')
json.dump(metadata, open(metadata_json_path, 'wt'))
print('\nSaved model metadata at: %s' % metadata_json_path)
tfjs.converters.save_keras_model(model, FLAGS.artifacts_dir)
print('\nSaved model artifcats in directory: %s' % FLAGS.artifacts_dir)
def _keras_2_tfjs(h5_model_path, path_to_save):
"""
Converts a Keras h5 model into a tf.js model and saves it on disk.
"""
model = keras.models.load_model(h5_model_path)
tfjs.converters.save_keras_model(model, path_to_save, np.uint16)
K.clear_session()
]
classification_layers = [
keras.layers.Dense(128),
keras.layers.Activation('relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(NUM_CLASSES),
keras.layers.Activation('softmax')
]
model = keras.models.Sequential(feature_layers + classification_layers)
train_model(model,
optimizer,
(x_train_lt5, y_train_lt5),
(x_test_lt5, y_test_lt5),
NUM_CLASSES, batch_size=batch_size, epochs=epochs)
tfjs.converters.save_keras_model(model, artifacts_dir)