Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
assert data['trial_job_id'] is not None
assert data['parameter_index'] is not None
assert data['trial_job_id'] in self.job_id_para_id_map
self._handle_trial_end(self.job_id_para_id_map[data['trial_job_id']])
ret = self._get_one_trial_job()
if ret is None:
self.unsatisfied_jobs.append({'trial_job_id': data['trial_job_id'], 'parameter_index': data['parameter_index']})
else:
ret['trial_job_id'] = data['trial_job_id']
ret['parameter_index'] = data['parameter_index']
# update parameter_id in self.job_id_para_id_map
self.job_id_para_id_map[data['trial_job_id']] = ret['parameter_id']
send(CommandType.SendTrialJobParameter, json_tricks.dumps(ret))
else:
assert 'value' in data
value = extract_scalar_reward(data['value'])
if self.optimize_mode is OptimizeMode.Maximize:
reward = -value
else:
reward = value
assert 'parameter_id' in data
s, i, _ = data['parameter_id'].split('_')
logger.debug('bracket id = %s, metrics value = %s, type = %s', s, value, data['type'])
s = int(s)
# add to self.job_id_para_id_map here,
# because when the first parameter_id is created, trial_job_id is not known yet.
if data['trial_job_id'] in self.job_id_para_id_map:
assert self.job_id_para_id_map[data['trial_job_id']] == data['parameter_id']
else:
self.job_id_para_id_map[data['trial_job_id']] = data['parameter_id']
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Method invoked when a trial reports its final result.
Override of the abstract method in :class:`~nni.tuner.Tuner`.
"""
value = extract_scalar_reward(value)
if self._optimize_mode == OptimizeMode.Minimize:
value = -value
logger.info("Received trial result.")
logger.info("value :%s", value)
logger.info("parameter : %s", parameters)
self._space.register(parameters, value)
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Tuner receive result from trial.
Parameters
----------
parameter_id : int
The id of parameters, generated by nni manager.
parameters : dict
A group of parameters that trial has tried.
value : dict/float
if value is dict, it should have "default" key.
"""
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Maximize:
value = -value
logger.info("Received trial result.")
logger.info("value is : %s", str(value))
logger.info("parameter is : %s", str(parameters))
# parse parameter to sample_x
sample_x = [0 for i in range(len(self.key_order))]
for key in parameters:
idx = self.key_order.index(key)
sample_x[idx] = parameters[key]
# parse value to sample_y
temp_y = []
if sample_x in self.samples_x:
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Record the result from a trial
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
"""
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.')
# restore the paramsters contains "_index"
params = self.total_data[parameter_id]
if self.optimize_mode == OptimizeMode.Minimize:
reward = -reward
indiv = Individual(config=params, result=reward)
self.population.append(indiv)
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
"""
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize:
value = -value
self.trials_result[trial_info_idx] = value
self.finished_trials += 1
logger.debug('receive_trial_result, parameter_id %d, trial_info_idx %d, finished_trials %d, inf_batch_size %d',
parameter_id, trial_info_idx, self.finished_trials, self.inf_batch_size)
if self.finished_trials == self.inf_batch_size:
logger.debug('Start next round inference in receive_trial_result')
self._next_round_inference()
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
'''
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
# get the feature importance
if self.search_space is None:
self.search_space = value['feature_importance']
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
logger.debug('receive trial result is:\n')
logger.debug(str(parameters))
logger.debug(str(reward))
return
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
Record an observation of the objective function
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
"""
reward = extract_scalar_reward(value)
# restore the paramsters contains '_index'
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.')
params = self.total_data[parameter_id]
# code for parallel
if self.parallel:
constant_liar = kwargs.get('constant_liar', False)
if constant_liar:
rval = self.CL_rval
else:
rval = self.rval
# ignore duplicated reported final result (due to aware of intermedate result)
if parameter_id not in self.running_data:
logger.info("Received duplicated final result with parameter id: %s", parameter_id)
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
'''
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
# get the feature importance
if self.search_space is None:
self.search_space = value['feature_importance']
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
logger.debug('receive trial result is:\n')
logger.debug(str(parameters))
logger.debug(str(reward))
#indiv = Individual(graph_loads(parameters), result=reward)
#self.population.append(indiv)
return