Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from smac.utils.merge_foreign_data import merge_foreign_data
from smac.utils.io.cmd_reader import truthy as _is_truthy
from smac.utils.io.input_reader import InputReader
from smac.runhistory.runhistory import RunHistory
from smac.optimizer.objective import average_cost
from smac.tae.execute_ta_run import StatusType
in_reader = InputReader()
if sys.version_info[0] == 2:
import mock
else:
from unittest import mock
class InitFreeScenario(Scenario):
def __init__(self):
pass
class ScenarioTest(unittest.TestCase):
def setUp(self):
logging.basicConfig()
self.logger = logging.getLogger(
self.__module__ + '.' + self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
base_directory = os.path.split(__file__)[0]
base_directory = os.path.abspath(
os.path.join(base_directory, '..', '..'))
eta = ExecuteTARunHydra(
cost_oracle=self.oracle, tae=ExecuteTARunAClib,
ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 1"),
stats=stats)
status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
assert status == StatusType.SUCCESS
assert cost == 0
assert runtime == 0
print(status, cost, runtime)
eta = ExecuteTARunHydra(cost_oracle=self.oracle, tae=ExecuteTARunAClib,
ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"),
stats=stats)
status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
assert status == StatusType.SUCCESS
assert cost == 0
assert runtime == 0
print(status, cost, runtime)
eta = ExecuteTARunHydra(cost_oracle=self.oracle, tae=ExecuteTARunAClib,
ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"), stats=stats,
run_obj="quality")
status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
assert status == StatusType.SUCCESS
assert cost == 0
assert runtime == 3.0
print(status, cost, runtime, ar_info)
def test_run(self):
'''
running some simple algo in aclib 2.0 style
'''
scen = Scenario(scenario={'cs': ConfigurationSpace(),
'run_obj': 'quality',
'output_dir': ''}, cmd_options=None)
stats = Stats(scen)
eta = ExecuteTARunHydra(
cost_oracle=self.oracle, tae=ExecuteTARunAClib,
ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 1"),
stats=stats)
status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
assert status == StatusType.SUCCESS
assert cost == 0
assert runtime == 0
print(status, cost, runtime)
eta = ExecuteTARunHydra(cost_oracle=self.oracle, tae=ExecuteTARunAClib,
eta = ExecuteTARunOld(
ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 2"),
stats=stats)
status, cost, runtime, ar_info = eta.run(config={})
assert status == StatusType.SUCCESS
assert cost == 2.0
assert runtime == 2.0
print(status, cost, runtime)
eta = ExecuteTARunOld(
ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 2"),
stats=stats, run_obj="quality")
status, cost, runtime, ar_info = eta.run(config={},)
assert status == StatusType.SUCCESS
assert cost == 4.0
assert runtime == 2.0
print(status, cost, runtime, ar_info)
def test_init(self):
scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs,
'output_dir': ''})
stats = Stats(scen)
TrajLogger(output_dir='./tmp_test_folder', stats=stats)
self.assertFalse(os.path.exists('smac3-output'))
self.assertTrue(os.path.exists('tmp_test_folder'))
ta=shlex.split(""),
stats=stats)
def test_success(**kwargs):
return "Result of this algorithm run: SUCCESS,1,1,1,12354", ""
eta._call_ta = test_success
status, cost, runtime, ar_info = eta.run(config={},)
self.assertEqual(status, StatusType.SUCCESS)
def test_success(**kwargs):
return "Result of this algorithm run: SUCESS,1,1,1,12354", ""
eta._call_ta = test_success
status, cost, runtime, ar_info = eta.run(config={},)
self.assertEqual(status, StatusType.CRASHED)
def test_success(**kwargs):
return "Result of this algorithm run: success,1,1,1,12354", ""
eta._call_ta = test_success
status, cost, runtime, ar_info = eta.run(config={},)
self.assertEqual(status, StatusType.SUCCESS)
def test_run(self):
'''
running some simple algo in aclib 2.0 style
'''
scen = Scenario(scenario={'cs': ConfigurationSpace(),
'run_obj': 'quality',
'output_dir': ''}, cmd_options=None)
stats = Stats(scen)
eta = ExecuteTARunAClib(
ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 1"),
stats=stats)
status, cost, runtime, ar_info = eta.run(config={})
assert status == StatusType.TIMEOUT
assert cost == 2.0
assert runtime == 2.0
print(status, cost, runtime)
eta = ExecuteTARunAClib(
ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"),
stats=stats)
status, cost, runtime, ar_info = eta.run(config={})
assert status == StatusType.SUCCESS
assert cost == 3.0
assert runtime == 3.0
print(status, cost, runtime)
eta = ExecuteTARunAClib(ta=shlex.split(
def test_get_runs(self):
''' test if the runs are generated as expected '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts' : self.train_insts,
'test_insts': self.test_insts})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Get multiple configs
self.maxDiff=None
expected = [_Run(config='config1', inst='3', seed=1608637542, inst_specs='three'),
_Run(config='config2', inst='3', seed=1608637542, inst_specs='three'),
_Run(config='config1', inst='3', seed=1273642419, inst_specs='three'),
_Run(config='config2', inst='3', seed=1273642419, inst_specs='three'),
_Run(config='config1', inst='4', seed=1935803228, inst_specs='four'),
_Run(config='config2', inst='4', seed=1935803228, inst_specs='four'),
_Run(config='config1', inst='4', seed=787846414, inst_specs='four'),
_Run(config='config2', inst='4', seed=787846414, inst_specs='four'),
@author: Aaron Klein
'''
import logging
import numpy as np
from robo.task.branin import Branin
from robo.initial_design.init_random_uniform import init_random_uniform
from smac.smbo.smbo import SMBO
from smac.scenario.scenario import Scenario
logging.basicConfig(level=logging.DEBUG)
scenario = Scenario()
task = Branin()
X = init_random_uniform(task.X_lower, task.X_upper, 20)
Y = task.evaluate(X)
instance_features = np.array([[1]])
smac = SMBO(scenario)
new_x = smac.choose_next(X, Y)
print new_x
def test_passed_runhistory_deterministic(self):
''' test if passed runhistory is in resulting runhistory '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts' : self.train_insts,
'deterministic' : True})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory(average_cost)
for config in old_configs[:int(len(old_configs)/2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')
configs = validator._get_configs('all')
insts = validator._get_instances('train')
runs_w_rh = validator._get_runs(configs, insts, repetitions=2,
runhistory=old_rh)
runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)