Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, optimize_mode="maximize", population_size=32):
"""
Parameters
----------
optimize_mode : str, default 'maximize'
population_size : int
initial population size. The larger population size,
the better evolution performance.
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.population_size = population_size
self.trial_result = []
self.searchspace_json = None
self.total_data = {}
self.random_state = None
self.population = None
self.space = None
ent_coef : float
Policy entropy coefficient in the optimization objective
lr : float
Learning rate of the model (lstm network), constant
vf_coef : float
Value function loss coefficient in the optimization objective
max_grad_norm : float
Gradient norm clipping coefficient
gamma : float
Discounting factor
lam : float
Advantage estimation discounting factor (lambda in the paper)
cliprange : float
Cliprange in the PPO algorithm, constant
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.model_config = ModelConfig()
self.model = None
self.search_space = None
self.running_trials = {} # key: parameter_id, value: actions/states/etc.
self.inf_batch_size = trials_per_update # number of trials to generate in one inference
self.first_inf = True # indicate whether it is the first time to inference new trials
self.trials_result = [None for _ in range(self.inf_batch_size)] # results of finished trials
self.credit = 0 # record the unsatisfied trial requests
self.param_ids = []
self.finished_trials = 0
self.chosen_arch_template = {}
self.actions_spaces = None
self.actions_to_config = None
self.full_act_space = None
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Method invoked when a trial reports its final result.
Override of the abstract method in :class:`~nni.tuner.Tuner`.
"""
value = extract_scalar_reward(value)
if self._optimize_mode == OptimizeMode.Minimize:
value = -value
logger.info("Received trial result.")
logger.info("value :%s", value)
logger.info("parameter : %s", parameters)
self._space.register(parameters, value)
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Tuner receive result from trial.
Parameters
----------
parameter_id : int
The id of parameters, generated by nni manager.
parameters : dict
A group of parameters that trial has tried.
value : dict/float
if value is dict, it should have "default" key.
"""
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Maximize:
value = -value
logger.info("Received trial result.")
logger.info("value is : %s", str(value))
logger.info("parameter is : %s", str(parameters))
# parse parameter to sample_x
sample_x = [0 for i in range(len(self.key_order))]
for key in parameters:
idx = self.key_order.index(key)
sample_x[idx] = parameters[key]
# parse value to sample_y
temp_y = []
if sample_x in self.samples_x:
idx = self.samples_x.index(sample_x)
self.optimal_y = reward
else:
if self.constant_liar_type == 'mean':
_sum = self.optimal_y[0] + reward
_number = self.optimal_y[1] + 1
self.optimal_y = [_sum, _number]
elif self.constant_liar_type == 'min':
self.optimal_y = min(self.optimal_y, reward)
elif self.constant_liar_type == 'max':
self.optimal_y = max(self.optimal_y, reward)
logger.debug("Update optimal_y with reward, optimal_y = %s", self.optimal_y)
else:
rval = self.rval
if self.optimize_mode is OptimizeMode.Maximize:
reward = -reward
domain = rval.domain
trials = rval.trials
new_id = len(trials)
rval_specs = [None]
rval_results = [domain.new_result()]
rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)]
vals = params
idxs = dict()
out_y = dict()
json2vals(self.json, vals, out_y)
def __init__(self, optimize_mode = 'maximize', feature_percent = 0.6):
"""Initlization function
count :
optimize_mode : contains "Maximize" or "Minimize" mode.
search_space : define which features that tuner need to search
feature_percent : @mengjiao
default_space : @mengjiao
epoch_importance : @mengjiao
estimate_sample_prob : @mengjiao
"""
self.count = -1
self.optimize_mode = OptimizeMode(optimize_mode)
self.search_space = None
self.feature_percent = feature_percent
self.default_space = []
self.epoch_importance = []
self.estimate_sample_prob = None
logger.debug('init aufo-fe done.')
If we have generated new trials after this trial end, we will return a new trial parameters.
Otherwise, we will return None.
"""
global _KEY
self.num_finished_configs[i] += 1
logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d',
self.s, self.i, i, self.num_finished_configs[i], self.num_configs_to_run[i])
if self.num_finished_configs[i] >= self.num_configs_to_run[i] and self.no_more_trial is False:
# choose candidate configs from finished configs to run in the next round
assert self.i == i + 1
# finish this bracket
if self.i > self.s:
self.no_more_trial = True
return None
this_round_perf = self.configs_perf[i]
if self.optimize_mode is OptimizeMode.Maximize:
sorted_perf = sorted(this_round_perf.items(
), key=lambda kv: kv[1][1], reverse=True) # reverse
else:
sorted_perf = sorted(
this_round_perf.items(), key=lambda kv: kv[1][1])
logger.debug(
'bracket %s next round %s, sorted hyper configs: %s', self.s, self.i, sorted_perf)
next_n, next_r = self.get_n_r()
logger.debug('bracket %s next round %s, next_n=%d, next_r=%d',
self.s, self.i, next_n, next_r)
hyper_configs = dict()
for k in range(next_n):
params_id = sorted_perf[k][0]
params = self.hyper_configs[i][params_id]
params[_KEY] = next_r # modify r
# generate new id
def __init__(self, optimize_mode="maximize", config_dedup=False):
"""
Parameters
----------
optimize_mode : str
Optimize mode, 'maximize' or 'minimize', by default 'maximize'
config_dedup : bool
If True, the tuner will not generate a configuration that has been already generated.
If False, a configuration may be generated twice, but it is rare for relatively large search space.
"""
self.logger = logger
self.optimize_mode = OptimizeMode(optimize_mode)
self.total_data = {}
self.optimizer = None
self.smbo_solver = None
self.first_one = True
self.update_ss_done = False
self.loguniform_key = set()
self.categorical_dict = {}
self.cs = None
self.dedup = config_dedup
for key, value in _params.items():
if key in self.loguniform_key:
_params[key] = np.log(value)
elif key in self.categorical_dict:
if value in self.categorical_dict[key]:
_params[key] = self.categorical_dict[key].index(value)
else:
self.logger.info("The value %s of key %s is not in search space.", str(value), key)
valid_entry = False
break
if not valid_entry:
continue
# start import this data entry
_completed_num += 1
config = Configuration(self.cs, values=_params)
if self.optimize_mode is OptimizeMode.Maximize:
_value = -_value
if self.first_one:
self.smbo_solver.nni_smac_receive_first_run(config, _value)
self.first_one = False
else:
self.smbo_solver.nni_smac_receive_runs(config, _value)
self.logger.info("Successfully import data to smac tuner, total data: %d, imported data: %d.", len(data), _completed_num)
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
"""
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.')
# restore the paramsters contains "_index"
params = self.total_data[parameter_id]
if self.optimize_mode == OptimizeMode.Minimize:
reward = -reward
indiv = Individual(config=params, result=reward)
self.population.append(indiv)