Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_two_estimators_predict(self):
pipeline = StandardScaler() >> ( PCA() & Nystroem() & LogisticRegression() )>>ConcatFeatures() >> NoOp() >> LogisticRegression()
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_concat_with_hyperopt2(self):
from lale.operators import make_pipeline, make_union
from lale.lib.lale import Hyperopt
pca = PCA(n_components=3)
nys = Nystroem(n_components=10)
concat = ConcatFeatures()
lr = LogisticRegression(random_state=42, C=0.1)
trainable = make_pipeline(make_union(pca, nys), lr)
clf = Hyperopt(estimator=trainable, max_evals=2)
from sklearn.datasets import load_iris
iris_data = load_iris()
clf.fit(iris_data.data, iris_data.target)
clf.predict(iris_data.data)
def test_make_choice_with_instance(self):
from lale.operators import make_union, make_choice, make_pipeline
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
tfm = PCA() | Nystroem() | NoOp()
with self.assertRaises(AttributeError):
trained = tfm.fit(X, y)
planned_pipeline1 = (OneHotEncoder | NoOp) >> tfm >> (LogisticRegression | KNeighborsClassifier)
planned_pipeline2 = (OneHotEncoder | NoOp) >> (PCA | Nystroem) >> (LogisticRegression | KNeighborsClassifier)
planned_pipeline3 = make_choice(OneHotEncoder, NoOp) >> make_choice(PCA, Nystroem) >> make_choice(LogisticRegression, KNeighborsClassifier)
def test_fit_predict(self):
from sklearn.datasets import load_iris
from lale.lib.lale import TopKVotingClassifier
from lale.lib.sklearn import Nystroem
from sklearn.metrics import accuracy_score
ensemble = TopKVotingClassifier(estimator=(PCA() | Nystroem()) >> (LogisticRegression()|KNeighborsClassifier()), args_to_optimizer={'max_evals':3}, k=2)
trained = ensemble.fit(self.X_train, self.y_train)
trained.predict(self.X_test)
def test_two_estimators_predict_proba(self):
pipeline = StandardScaler() >> ( PCA() & Nystroem() & LogisticRegression() )>>ConcatFeatures() >> NoOp() >> LogisticRegression()
trained = pipeline.fit(self.X_train, self.y_train)
trained.predict_proba(self.X_test)
def dont_test_smac_choice(self):
import numpy as np
from sklearn import svm, datasets
from sklearn.model_selection import cross_val_score
# Import ConfigSpace and different types of parameters
from smac.configspace import ConfigurationSpace
# Import SMAC-utilities
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC as orig_SMAC
tfm = PCA() | Nystroem() | NoOp()
planned_pipeline1 = (OneHotEncoder(handle_unknown = 'ignore', sparse = False) | NoOp()) >> tfm >> (LogisticRegression() | KNeighborsClassifier())
cs:ConfigurationSpace = get_smac_space(planned_pipeline1, lale_num_grids=1)
# Scenario object
scenario = Scenario({"run_obj": "quality", # we optimize quality (alternatively runtime)
"runcount-limit": 1, # maximum function evaluations
"cs": cs, # configuration space
"deterministic": "true"
})
# Optimize, using a SMAC-object
tae = iris_fmin_tae(planned_pipeline1, num_folds=2)
print("Optimizing! Depending on your machine, this might take a few minutes.")
smac = orig_SMAC(scenario=scenario, rng=np.random.RandomState(42),
tae_runner=tae)
def test_remove_last4(self):
pipeline = StandardScaler() >> ( PCA() & Nystroem() & PassiveAggressiveClassifier() )>>ConcatFeatures() >> NoOp() >> PassiveAggressiveClassifier()
new_pipeline = pipeline.remove_last(inplace=True)
self.assertEqual(len(new_pipeline._steps), 6)
self.assertEqual(len(pipeline._steps), 6)
def test_remove_last2(self):
pipeline = StandardScaler() >> ( PCA() & Nystroem() & PassiveAggressiveClassifier() )>>ConcatFeatures() >> NoOp() >> (PassiveAggressiveClassifier() & LogisticRegression())
with self.assertRaises(ValueError):
pipeline.remove_last()