Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_import_object():
imported_dummy = import_object(__name__ + '.Dummy')
assert Dummy is imported_dummy
self.shape = shape
self.latent_dim = latent_dim
self.batch_size = batch_size
self.iterations_critic = iterations_critic
self.epochs = epochs
self.hyperparameters = hyperparameters
self.encoder_input_shape = encoder_input_shape
self.generator_input_shape = generator_input_shape
self.critic_x_input_shape = critic_x_input_shape
self.critic_z_input_shape = critic_z_input_shape
self.layers_encoder, self.layers_generator = layers_encoder, layers_generator
self.layers_critic_x, self.layers_critic_z = layers_critic_x, layers_critic_z
self.optimizer = import_object(optimizer)(learning_rate)
def build_layer(layer, hyperparameters):
layer_class = import_object(layer['class'])
layer_kwargs = layer['parameters'].copy()
if issubclass(layer_class, keras.layers.wrappers.Wrapper):
layer_kwargs['layer'] = build_layer(layer_kwargs['layer'], hyperparameters)
for key, value in layer_kwargs.items():
if isinstance(value, str):
layer_kwargs[key] = hyperparameters.get(value, value)
return layer_class(**layer_kwargs)
self.layers = layers
self.optimizer = import_object(optimizer)
self.loss = import_object(loss)
self.metrics = metrics
self.epochs = epochs
self.verbose = verbose
self.classification = classification
self.hyperparameters = hyperparameters
self.validation_split = validation_split
self.batch_size = batch_size
self.shuffle = shuffle
for callback in callbacks:
callback['class'] = import_object(callback['class'])
self.callbacks = callbacks
' in future versions of MLPrimitives. Please use `on` instead.'
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
on = time_index
if groupby:
df = df.groupby(groupby)
if isinstance(rule, int):
rule = '{}s'.format(rule)
dtir = df.resample(rule, on=on)
if not callable(aggregation) and aggregation not in _RESAMPLE_AGGS:
try:
aggregation = import_object(aggregation)
except (AttributeError, ImportError, ValueError):
pass
df = dtir.aggregate(aggregation)
for name in df.index.names:
if name in df:
del df[name]
if reset_index:
df.reset_index(inplace=True)
return df
def __init__(self, layers, loss, optimizer, classification, callbacks=tuple(),
metrics=None, epochs=10, verbose=False, validation_split=0, batch_size=32,
shuffle=True, **hyperparameters):
self.layers = layers
self.optimizer = import_object(optimizer)
self.loss = import_object(loss)
self.metrics = metrics
self.epochs = epochs
self.verbose = verbose
self.classification = classification
self.hyperparameters = hyperparameters
self.validation_split = validation_split
self.batch_size = batch_size
self.shuffle = shuffle
for callback in callbacks:
callback['class'] = import_object(callback['class'])
self.callbacks = callbacks
def graph_pairs_feature_extraction(X, functions, node_columns, graph=None):
functions = [import_object(function) for function in functions]
X = X.copy()
pairs = X[node_columns].values
# for i, graph in enumerate(graphs):
def apply(function):
try:
values = function(graph, pairs)
return np.array(list(values))[:, 2]
except ZeroDivisionError:
LOGGER.warn("ZeroDivisionError captured running %s", function)
return np.zeros(len(pairs))
for function in functions:
def graph_feature_extraction(X, functions, graphs):
functions = [import_object(function) for function in functions]
for node_column, graph in graphs.items():
index_type = type(X[node_column].values[0])
features = pd.DataFrame(index=graph.nodes)
features.index = features.index.astype(index_type)
def apply(function):
values = function(graph)
return np.array(list(values.values()))
for function in functions:
name = '{}_{}'.format(function.__name__, node_column)
features[name] = apply(function)
X = X.merge(features, left_on=node_column, right_index=True, how='left')