Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_xgboost_n_iter():
from hyperactive import RandomSearchOptimizer
n_iter_list = [0, 1, 1, 10]
for n_iter in n_iter_list:
opt = RandomSearchOptimizer(search_config, n_iter)
opt.fit(X, y)
opt.predict(X)
opt.score(X, y)
def test_RandomSearchOptimizer():
from hyperactive import RandomSearchOptimizer
opt0 = RandomSearchOptimizer(
search_config, n_iter_0, random_state=random_state, verbosity=0, cv=cv, n_jobs=1
)
opt0.fit(X, y)
opt1 = RandomSearchOptimizer(
search_config,
n_iter_1,
random_state=random_state,
verbosity=0,
cv=cv,
n_jobs=n_jobs,
)
opt1.fit(X, y)
assert opt0.score_best < opt1.score_best
def test_catboost_memory():
from hyperactive import RandomSearchOptimizer
memory_list = [False, True]
for memory in memory_list:
opt = RandomSearchOptimizer(search_config, 1, memory=memory)
opt.fit(X, y)
opt.predict(X)
opt.score(X, y)
def test_catboost():
from hyperactive import RandomSearchOptimizer
opt = RandomSearchOptimizer(search_config, 1)
opt.fit(X, y)
opt.predict(X)
opt.score(X, y)
ml_scores = [
"accuracy_score",
"balanced_accuracy_score",
"average_precision_score",
"brier_score_loss",
"f1_score",
"log_loss",
"precision_score",
"recall_score",
"jaccard_score",
"roc_auc_score",
]
for score in ml_scores:
opt = RandomSearchOptimizer(search_config, 1, metric=score)
assert opt._config_.metric == score
opt.fit(X, y)
assert opt._config_.metric == score
opt.predict(X)
assert opt._config_.metric == score
opt.score(X, y)
assert opt._config_.metric == score
def test_random_state():
from hyperactive import RandomSearchOptimizer
opt0 = RandomSearchOptimizer(search_config, 1, random_state=False)
opt0.fit(X, y)
opt1 = RandomSearchOptimizer(search_config, 1, random_state=0)
opt1.fit(X, y)
opt2 = RandomSearchOptimizer(search_config, 1, random_state=1)
opt2.fit(X, y)
def test_catboost_warm_start():
from hyperactive import RandomSearchOptimizer
warm_start = {
"catboost.CatBoostClassifier": {
"iterations": [3],
"learning_rate": [1],
"depth": [3],
"verbose": [0],
}
}
warm_start_list = [None, warm_start]
for warm_start in warm_start_list:
opt = RandomSearchOptimizer(search_config, 1, warm_start=warm_start)
opt.fit(X, y)
opt.predict(X)
opt.score(X, y)
def test_n_jobs_4():
from hyperactive import RandomSearchOptimizer
opt = RandomSearchOptimizer(search_config, 1, n_jobs=4)
opt.fit(X, y)
def test_n_jobs_1():
from hyperactive import RandomSearchOptimizer
opt = RandomSearchOptimizer(search_config, 1, n_jobs=1)
opt.fit(X, y)
from sklearn.datasets import load_iris
from hyperactive import RandomSearchOptimizer
iris_data = load_iris()
X, y = iris_data.data, iris_data.target
search_config = {
"sklearn.ensemble.RandomForestClassifier": {"n_estimators": range(10, 100, 10)}
}
opt = RandomSearchOptimizer(search_config, n_iter=10)
opt.fit(X, y)