Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def graph_fn(mode, features):
x = plx.layers.Embedding(input_dim=10000, output_dim=128)(features['source_token'])
x = plx.layers.Bidirectional(plx.layers.LSTM(units=128, dropout=0.2, recurrent_dropout=0.2))(x)
x = plx.layers.Dropout(rate=0.5)(x)
x = plx.layers.Dense(units=2)(x)
return x
def graph_fn(mode, features):
x = features['x']
for i in range(num_layers):
x = plx.layers.LSTM(units=num_units)(x)
return plx.layers.Dense(units=output_units)(x)
'image': [
(plx.processing.image.Standardization, {}),
(plx.layers.Reshape, {'new_shape': [28 * 28]})
]
}
},
},
'estimator_config': {'output_dir': output_dir},
'model_config': {
'module': 'Generator',
'summaries': ['loss'],
'loss_config': {'module': 'mean_squared_error'},
'optimizer_config': {'module': 'adadelta', 'learning_rate': 0.9},
'encoder_config': {
'definition': [
(plx.layers.FullyConnected, {'num_units': 128}),
(plx.layers.FullyConnected, {'num_units': 256}),
]
},
'decoder_config': {
'definition': [
(plx.layers.FullyConnected, {'num_units': 256}),
(plx.layers.FullyConnected, {'num_units': 28 * 28}),
]
}
}
}
experiment_config = plx.configs.ExperimentConfig.read_configs(config)
return plx.experiments.create_experiment(experiment_config)
'n_classes': 10,
'graph_config': {
'name': 'mnist',
'features': ['image'],
'definition': [
(plx.layers.HighwayConv2d,
{'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'elu'}),
(plx.layers.HighwayConv2d,
{'num_filter': 16, 'filter_size': 2, 'strides': 1, 'activation': 'elu'}),
(plx.layers.HighwayConv2d,
{'num_filter': 16, 'filter_size': 1, 'strides': 1, 'activation': 'elu'}),
(plx.layers.MaxPool2d, {'kernel_size': 2}),
(plx.layers.BatchNormalization, {}),
(plx.layers.FullyConnected, {'num_units': 128, 'activation': 'elu'}),
(plx.layers.FullyConnected, {'num_units': 256, 'activation': 'elu'}),
(plx.layers.FullyConnected, {'num_units': 10}),
]
}
}
}
experiment_config = plx.configs.ExperimentConfig.read_configs(config)
return plx.experiments.create_experiment(experiment_config)
'meta_data_file': meta_data_file},
'definition': {
'image': [
(plx.processing.image.Standardization, {}),
]
}
},
},
'estimator_config': {'output_dir': output_dir},
'model_config': {
'summaries': ['loss', 'image_input', 'image_result'],
'module': 'Generator',
'optimizer_config': {'module': 'adadelta', 'learning_rate': 0.9},
'encoder_config': {
'definition': [
(plx.layers.Conv2d,
{'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
(plx.layers.MaxPool2d, {'kernel_size': 2}),
(plx.layers.Conv2d, {'num_filter': 32, 'filter_size': 3, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
(plx.layers.MaxPool2d, {'kernel_size': 2}),
]
},
'decoder_config': {
'definition': [
(plx.layers.Conv2d,
{'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
(plx.layers.Upsample2d, {'kernel_size': 2}),
(plx.layers.Conv2d, {'num_filter': 32, 'filter_size': 3, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
def graph_fn(mode, features):
x = plx.layers.Dense(units=32, activation='relu')(features['x'])
x = plx.layers.Dropout(rate=0.3)(x)
x = plx.layers.Dense(units=32, activation='relu')(x)
x = plx.layers.Dropout(rate=0.3)(x)
x = plx.layers.Dense(units=1)(x)
return plx.layers.Dropout(rate=0.3)(x)
def encoder_fn(mode, features):
x = plx.layers.Dense(units=128)(features)
return plx.layers.Dense(units=256)(x)
def graph_fn(mode, features):
return plx.layers.Dense(units=512)(features['state'])
'meta_data_file': meta_data_file},
'definition': {
'image': [
(plx.processing.image.Standardization, {}),
]
}
},
},
'estimator_config': {'output_dir': output_dir},
'model_config': {
'summaries': ['loss', 'image_input', 'image_result'],
'module': 'Generator',
'optimizer_config': {'module': 'adadelta', 'learning_rate': 0.9},
'encoder_config': {
'definition': [
(plx.layers.Conv2d,
{'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
(plx.layers.MaxPool2d, {'kernel_size': 2}),
(plx.layers.Conv2d, {'num_filter': 32, 'filter_size': 3, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
(plx.layers.MaxPool2d, {'kernel_size': 2}),
]
},
'decoder_config': {
'definition': [
(plx.layers.Conv2d,
{'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
(plx.layers.Upsample2d, {'kernel_size': 2}),
(plx.layers.Conv2d, {'num_filter': 32, 'filter_size': 3, 'activation': 'relu',
'regularizer': 'l2_regularizer'}),
def graph_fn(mode, features):
x = plx.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu',
kernel_regularizer=l2(0.02))(features['image'])
x = plx.layers.MaxPooling2D(pool_size=3, strides=2)(x)
x = plx.layers.Conv2D(filters=156, kernel_size=5, activation='relu',
kernel_regularizer=l2(0.02))(x)
x = plx.layers.MaxPooling2D(pool_size=3, strides=2)(x)
x = plx.layers.Conv2D(filters=384, kernel_size=3, activation='relu')(x)
x = plx.layers.Conv2D(filters=384, kernel_size=3, activation='relu')(x)
x = plx.layers.Conv2D(filters=256, kernel_size=3, activation='relu')(x)
x = plx.layers.MaxPooling2D(pool_size=3, strides=2)(x)
x = plx.layers.Flatten()(x)
x = plx.layers.Dense(units=4096, activation='tanh')(x)
x = plx.layers.Dropout(rate=0.5)(x)
x = plx.layers.Dense(units=4096, activation='tanh')(x)
x = plx.layers.Dropout(rate=0.5)(x)
x = plx.layers.Dense(units=17)(x)
return x