Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
state='COMPLETE',
)
session.add(tuningrun)
for gen, line in enumerate(open(args.candidatelog)):
if line[0] != '#':
line = re.split('\t', line)
date = tuningrun.start_date + timedelta(seconds=float(line[0]))
cfg = os.path.normpath(
os.path.join(os.path.dirname(args.candidatelog), '..', line[5]))
result = run(args, cfg)
result.was_new_best = True
result.tuning_run = tuningrun
result.collection_date = date
session.add(result)
desired_result = resultsdb.models.DesiredResult(
limit=args.limit,
tuning_run=tuningrun,
generation=gen,
requestor='Imported',
request_date=date,
start_date=date,
result=result,
state='COMPLETE')
session.add(desired_result)
tuningrun.end_date = date
print(gen, date, result.time)
session.commit()
config = self.get_configuration(self.seed_cfgs.pop())
dr = DesiredResult(configuration=config,
requestor='seed',
generation=self.generation,
request_date=datetime.now(),
tuning_run=self.tuning_run)
else:
dr = self.root_technique.desired_result()
if dr is None or dr is False:
log.debug("no desired result, skipping to testing phase")
break
self.session.flush() # populate configuration_id
duplicates = (self.session.query(DesiredResult)
.filter_by(tuning_run=self.tuning_run,
configuration_id=dr.configuration_id)
.filter(DesiredResult.id != dr.id)
.order_by(DesiredResult.request_date)
.limit(1).all())
self.session.add(dr)
if len(duplicates):
if not self.args.no_dups:
log.warning("duplicate configuration request #%d %s/%s %s",
self.test_count,
dr.requestor,
duplicates[0].requestor,
'OLD' if duplicates[0].result else 'PENDING')
self.session.flush()
desired_result_id = dr.id
def callback(result):
dr = self.session.query(DesiredResult).get(desired_result_id)
dr.result = result
combine_fn,
no_data = None):
"""
return reduce(combine_fn, map(extract_fn, data)) for each quanta of the
tuning run
"""
value_by_quanta = [ no_data ]
start_date = run.start_date
subq = (session.query(resultsdb.models.Result.id)
.filter_by(tuning_run = run, was_new_best = True, state='OK'))
q = (session.query(resultsdb.models.DesiredResult)
.join(resultsdb.models.Result)
.filter(resultsdb.models.DesiredResult.state=='COMPLETE',
resultsdb.models.DesiredResult.tuning_run == run,
resultsdb.models.DesiredResult.result_id.in_(subq.subquery()))
.order_by(resultsdb.models.DesiredResult.request_date))
first_id = None
for dr in q:
if first_id is None:
first_id = dr.id
td = (dr.request_date - start_date)
duration = td.seconds + (td.days * 24 * 3600.0)
# TODO: Make this variable configurable
by_request_count = True
stats_quanta = 10
if by_request_count:
quanta = dr.id - first_id
else:
quanta = int(old_div(duration, stats_quanta))
combine_fn,
no_data = None):
'''
return reduce(combine_fn, map(extract_fn, data)) for each quanta of the
tuning run
'''
value_by_quanta = [ no_data ]
start_date = run.start_date
subq = (session.query(resultsdb.models.Result.id)
.filter_by(tuning_run = run, was_new_best = True, state='OK'))
q = (session.query(resultsdb.models.DesiredResult)
.join(resultsdb.models.Result)
.filter(resultsdb.models.DesiredResult.state=='COMPLETE',
resultsdb.models.DesiredResult.tuning_run == run,
resultsdb.models.DesiredResult.result_id.in_(subq.subquery()))
.order_by(resultsdb.models.DesiredResult.request_date))
first_id = None
for dr in q:
if first_id is None:
first_id = dr.id
td = (dr.request_date - start_date)
duration = td.seconds + (td.days * 24 * 3600.0)
# TODO: Make this variable configurable
by_request_count = True
stats_quanta = 10
if by_request_count:
quanta = dr.id - first_id
else:
quanta = int(duration / stats_quanta)
extract_fn,
combine_fn,
no_data = None):
'''
return reduce(combine_fn, map(extract_fn, data)) for each quanta of the
tuning run
'''
value_by_quanta = [ no_data ]
start_date = run.start_date
subq = (session.query(resultsdb.models.Result.id)
.filter_by(tuning_run = run, was_new_best = True, state='OK'))
q = (session.query(resultsdb.models.DesiredResult)
.join(resultsdb.models.Result)
.filter(resultsdb.models.DesiredResult.state=='COMPLETE',
resultsdb.models.DesiredResult.tuning_run == run,
resultsdb.models.DesiredResult.result_id.in_(subq.subquery()))
.order_by(resultsdb.models.DesiredResult.request_date))
first_id = None
for dr in q:
if first_id is None:
first_id = dr.id
td = (dr.request_date - start_date)
duration = td.seconds + (td.days * 24 * 3600.0)
# TODO: Make this variable configurable
by_request_count = True
stats_quanta = 10
if by_request_count:
quanta = dr.id - first_id
else:
state = Column(Enum('UNKNOWN', 'REQUESTED', 'RUNNING',
'COMPLETE', 'ABORTED',
name="t_dr_state"),
default='UNKNOWN')
result_id = Column(ForeignKey(Result.id), index=True)
result = relationship(Result, backref='desired_results')
start_date = Column(DateTime)
#input_id = Column(ForeignKey(Input.id))
#input = relationship(Input, backref='desired_results')
Index('ix_desired_result_custom1', DesiredResult.tuning_run_id,
DesiredResult.generation)
Index('ix_desired_result_custom2', DesiredResult.tuning_run_id,
DesiredResult.configuration_id)
# track bandit meta-technique information if a bandit meta-technique is used for a tuning run.
class BanditInfo(Base):
tuning_run_id = Column(ForeignKey(TuningRun.id))
tuning_run = relationship(TuningRun, backref='bandit_info')
# the bandit exploration/exploitation tradeoff
c = Column(Float)
# the bandit window
window = Column(Integer)
class BanditSubTechnique(Base):
bandit_info_id = Column(ForeignKey(BanditInfo.id))
bandit_info = relationship(BanditInfo, backref='subtechniques')
name = Column(String(128))
assert self.dummy_suggest is not None, "opentuner gave up on the first call!"
# Use the dummy suggestion in this case.
X.append(self.dummy_suggest)
using_dummy_suggest = True
continue
# Get the simple dict equivalent to suggestion.
x_guess = desired_results[ii].configuration.data
X.append(x_guess)
# Now save the desired result for future use in observe.
x_guess_ = OpentunerOptimizer.hashable_dict(x_guess)
assert x_guess_ not in self.x_to_dr, "the suggestions should not already be in the x_to_dr dict"
self.x_to_dr[x_guess_] = desired_results[ii]
# This will also catch None from opentuner.
assert isinstance(self.x_to_dr[x_guess_], DesiredResult)
assert len(X) == n_suggestions, "incorrect number of suggestions provided by opentuner"
# Log suggestion for repeating if opentuner gives up next time. We can
# only do this when it is not already being used since it we will be
# checking guesses against dummy_suggest in observe.
if not using_dummy_suggest:
self.dummy_suggest = X[-1]
return X
'COMPLETE', 'ABORTED',
name="t_dr_state"),
default='UNKNOWN')
result_id = Column(ForeignKey(Result.id), index=True)
result = relationship(Result, backref='desired_results')
start_date = Column(DateTime)
#input_id = Column(ForeignKey(Input.id))
#input = relationship(Input, backref='desired_results')
Index('ix_desired_result_custom1', DesiredResult.tuning_run_id,
DesiredResult.generation)
Index('ix_desired_result_custom2', DesiredResult.tuning_run_id,
DesiredResult.configuration_id)
# track bandit meta-technique information if a bandit meta-technique is used for a tuning run.
class BanditInfo(Base):
tuning_run_id = Column(ForeignKey(TuningRun.id))
tuning_run = relationship(TuningRun, backref='bandit_info')
# the bandit exploration/exploitation tradeoff
c = Column(Float)
# the bandit window
window = Column(Integer)
class BanditSubTechnique(Base):
bandit_info_id = Column(ForeignKey(BanditInfo.id))
bandit_info = relationship(BanditInfo, backref='subtechniques')
name = Column(String(128))
Corresponding values where objective has been evaluated.
"""
assert len(X) == len(y)
for x_guess, y_ in zip(X, y):
x_guess_ = OpentunerOptimizer.hashable_dict(x_guess)
# If we can't find the dr object then it must be the dummy guess.
if x_guess_ not in self.x_to_dr:
assert x_guess == self.dummy_suggest, "Appears to be guess that did not originate from suggest"
continue
# Get the corresponding DesiredResult object.
dr = self.x_to_dr.pop(x_guess_, None)
# This will also catch None from opentuner.
assert isinstance(dr, DesiredResult), "DesiredResult object not available in x_to_dr"
# Opentuner's arg names assume we are minimizing execution time.
# So, if we want to minimize we have to pretend y is a 'time'.
result = Result(time=y_)
self.api.report_result(dr, result)
def stats_over_time(session,
run,
extract_fn,
combine_fn,
no_data = None):
"""
return reduce(combine_fn, map(extract_fn, data)) for each quanta of the
tuning run
"""
value_by_quanta = [ no_data ]
start_date = run.start_date
subq = (session.query(resultsdb.models.Result.id)
.filter_by(tuning_run = run, was_new_best = True, state='OK'))
q = (session.query(resultsdb.models.DesiredResult)
.join(resultsdb.models.Result)
.filter(resultsdb.models.DesiredResult.state=='COMPLETE',
resultsdb.models.DesiredResult.tuning_run == run,
resultsdb.models.DesiredResult.result_id.in_(subq.subquery()))
.order_by(resultsdb.models.DesiredResult.request_date))
first_id = None
for dr in q:
if first_id is None:
first_id = dr.id
td = (dr.request_date - start_date)
duration = td.seconds + (td.days * 24 * 3600.0)
# TODO: Make this variable configurable
by_request_count = True
stats_quanta = 10
if by_request_count: