Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_fit_output(self):
# Setup variables
primitives = [
'sklearn.preprocessing.StandardScaler',
'sklearn.linear_model.LogisticRegression'
]
pipeline = MLPipeline(primitives)
named = 'default'
list_ = ['default', 0]
int_block = 0
invalid_int = 10
str_block = 'sklearn.preprocessing.StandardScaler#1'
invalid_block = 'InvalidBlockName'
str_block_variable = 'sklearn.preprocessing.StandardScaler#1.X'
invalid_variable = 'sklearn.preprocessing.StandardScaler#1.invalid'
# Run
named_out = pipeline.fit(self.X, self.y, output_=named)
list_out = pipeline.fit(self.X, self.y, output_=list_)
int_out = pipeline.fit(self.X, self.y, output_=int_block)
str_out = pipeline.fit(self.X, self.y, output_=str_block)
str_out_variable = pipeline.fit(self.X, self.y,
def test_predict_start(self):
# Setup variables
primitives = [
'sklearn.preprocessing.StandardScaler',
'sklearn.linear_model.LogisticRegression'
]
pipeline = MLPipeline(primitives)
pipeline.fit(self.X, self.y)
# Mock the first block
block_mock = Mock()
pipeline.blocks['sklearn.preprocessing.StandardScaler#1'] = block_mock
# Run first block
context = {
'X': self.X,
}
int_start = 1
str_start = 'sklearn.linear_model.LogisticRegression#1'
pipeline.predict(start_=int_start, **context)
pipeline.predict(start_=str_start, **context)
outputs = {
'default': [
{
'name': 'a_name',
'variable': 'a_variable',
'type': 'a_type',
}
],
'debug': [
{
'name': 'another_name',
'variable': 'another_variable',
}
]
}
pipeline = MLPipeline(['a_primitive', 'another_primitive'], outputs=outputs)
pipeline.blocks['a_primitive#1'].produce_output = [
{
'name': 'output',
'type': 'whatever'
}
]
pipeline.blocks['another_primitive#1'].produce_output = [
{
'name': 'something',
}
]
returned = pipeline.get_outputs(['default', 'debug', -1, 'a_primitive#1.output'])
expected = [
def test_get_tunable_hyperparameters(self):
mlpipeline = MLPipeline(['a_primitive'])
tunable = dict()
mlpipeline._tunable_hyperparameters = tunable
returned = mlpipeline.get_tunable_hyperparameters()
assert returned == tunable
assert returned is not tunable
def test__get_block_args(self):
input_names = {
'a_block': {
'arg_3': 'arg_3_alt'
}
}
pipeline = MLPipeline(['a_primitive'], input_names=input_names)
block_args = [
{
'name': 'arg_1',
},
{
'name': 'arg_2',
'default': 'arg_2_value'
},
{
'name': 'arg_3',
},
{
'name': 'arg_4',
'required': False
},
def test_get_tunable_hyperparameters_flat(self):
mlpipeline = MLPipeline(['a_primitive'])
mlpipeline._tunable_hyperparameters = {
'block_1': {
'hp_1': {
'type': 'int',
'range': [
1,
10
],
}
},
'block_2': {
'hp_1': {
'type': 'str',
'default': 'a',
'values': [
'a',
def test_get_output_variables(self):
outputs = {
'default': [
{
'name': 'a_name',
'variable': 'a_variable',
'type': 'a_type',
}
]
}
pipeline = MLPipeline(['a_primitive'], outputs=outputs)
names = pipeline.get_output_variables()
assert names == ['a_variable']
def test_get_output_names(self):
outputs = {
'default': [
{
'name': 'a_name',
'variable': 'a_variable',
'type': 'a_type',
}
]
}
pipeline = MLPipeline(['a_primitive'], outputs=outputs)
names = pipeline.get_output_names()
assert names == ['a_name']
def _get_pipeline_dict(pipeline, primitives):
if isinstance(pipeline, dict):
return pipeline
elif isinstance(pipeline, str):
return load_pipeline(pipeline)
elif isinstance(pipeline, MLPipeline):
return pipeline.to_dict()
elif isinstance(pipeline, list):
if primitives is not None:
raise ValueError('if `pipeline` is a `list`, `primitives` must be `None`')
return {'primitives': pipeline}
elif pipeline is None:
if primitives is None:
raise ValueError('Either `pipeline` or `primitives` must be not `None`.')
return dict()