Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def execute(self):
#import the algorithm module
try:
importStr = 'from algorithm.rating.' + self.config['recommender'] + ' import ' + self.config['recommender']
exec (importStr)
except ImportError:
importStr = 'from algorithm.ranking.' + self.config['recommender'] + ' import ' + self.config['recommender']
exec (importStr)
if self.evaluation.contains('-cv'):
k = int(self.evaluation['-cv'])
if k <= 1 or k > 10:
k = 3
mkl.set_num_threads(max(1,mkl.get_max_threads()/k))
#create the manager
manager = Manager()
m = manager.dict()
i = 1
tasks = []
binarized = False
if self.evaluation.contains('-b'):
binarized = True
for train,test in DataSplit.crossValidation(self.trainingData,k,binarized=binarized):
fold = '['+str(i)+']'
if self.config.contains('social'):
recommender = self.config['recommender'] + "(self.config,train,test,self.relation,fold)"
else:
recommender = self.config['recommender']+ "(self.config,train,test,fold)"
def get_max_threads(self):
"""Returns the maximum number of threads the solver will use"""
return mkl.get_max_threads()
try:
importStr = 'from recommender.baseline.' + self.config['recommender'] + ' import ' + self.config['recommender']
exec (importStr)
except ImportError:
importStr = 'from recommender.cf.' + self.config['recommender'] + ' import ' + self.config['recommender']
try:
exec (importStr)
except ImportError:
importStr = 'from recommender.advanced.' + self.config['recommender'] + ' import ' + self.config['recommender']
exec (importStr)
if self.evaluation.contains('-cv'):
k = int(self.evaluation['-cv'])
if k <= 1 or k > 10:
k = 3
mkl.set_num_threads(max(1,mkl.get_max_threads()/k))
#create the manager used to communication in multiprocess
manager = Manager()
m = manager.dict()
i = 1
tasks = []
binarized = False
if self.evaluation.contains('-b'):
binarized = True
for train,test in DataSplit.crossValidation(self.trainingData,k):
fold = '['+str(i)+']'
# if self.config.contains('social'):
# recommender = self.config['recommender'] + "(self.config,train,test,self.relation,fold)"
# else:
recommender = self.config['recommender']+ "(self.config,train,test,fold)"
print('Trying super dangerous workaround to make dlfilter work')
elif o == "--notchwidth":
linkchar = '='
notchpct = float(a)
print('Setting notchwidth to', notchpct, '%')
elif o == "--nprocs":
linkchar = '='
nprocs = int(a)
if nprocs < 1:
nprocs = tide_multiproc.maxcpus()
print('Will use', nprocs, 'processors for long calculations')
elif o == '--mklthreads':
mklthreads = int(a)
linkchar = '='
if mklexists:
mklmaxthreads = mkl.get_max_threads()
if mklthreads > mklmaxthreads:
print('mkl max threads =', mklmaxthreads, ' - using max')
mklthreads = mklmaxthreads
print('Will use', mklthreads, 'MKL threads for accelerated numpy processing.')
else:
print('MKL not present - ignoring --mklthreads')
elif o == "--stdfreq":
linkchar = '='
stdfreq = float(a)
print('Setting common output frequency to', stdfreq)
elif o == "--envcutoff":
linkchar = '='
envcutoff = float(a)
print('Will set top of cardiac envelope band to', envcutoff)
elif o == "--envthresh":
def get_max_threads(self):
"""Returns the maximum number of threads the solver will use"""
return mkl.get_max_threads()
'''
if workers is None:
# default number of processes to have simultaneously running
workers = cpu_count()//2 + 1
if workers < 0:
raise ValueError('number of worker processes must be 0 or greater')
if workers == 0:
# perform the map on the parent process
return [f(i) for i in args]
# attempt to prevent lower level functions from running in parallel
if _HAS_MKL:
starting_threads = mkl.get_max_threads()
mkl.set_num_threads(1)
# q_in has a max size of 1 so that args is not copied over to
# the next process until absolutely necessary
q_in = Queue(1)
q_out = Queue()
# any exceptions found by the child processes are put in this queue
# and then raised by the parent
q_err = Queue()
# spawn worker processes
procs = []
for i in range(workers):
p = Process(target=_f,args=(f,q_in,q_out,q_err))
# process is starting and waiting for something to be put on q_in
p.start()
from trainer import Trainer
from utils import (update_task, get_max_of_db_column,
get_a_task, ExploitationNeeded,
LossIsNaN, get_task_ids_and_scores, PopulationFinished,
get_col_from_populations, RemainingTasksTaken,
print_with_time, ExploitationOcurring,
create_new_population)
from config import (get_optimizer, DATA_DIR, MODEL_CLASS, LOSS_FN,
HYPERPARAM_NAMES, EPOCHS, BATCH_SIZE, POPULATION_SIZE,
EXPLOIT_INTERVAL, USE_SQLITE)
if __name__ == "__main__":
# TODO: Does this help?
nproc = mkl.get_max_threads() # e.g. 12
mkl.set_num_threads(nproc)
parser = argparse.ArgumentParser(description="Population Based Training")
parser.add_argument("-g", "--gpu", type=int, default=0, help="Selects GPU with the given ID. IDs are those shown in nvidia-smi.") # noqa
parser.add_argument("-p", "--population_id", type=int, default=None, help="Resumes work on the population with the given ID. Use -1 to select the most recently created population. Without this flag, a new population will be created.") # noqa
parser.add_argument("-e", "--exploiter", action="store_true", help="Set this process as the exploiter. It will be responsible for running the exploit step over the entire population at the end of each interval.") # noqa
args = parser.parse_args()
gpu = args.gpu
population_id = args.population_id
exploiter = args.exploiter
inputs = bcolz.open(osp.join(DATA_DIR, "trn_inputs.bcolz"), 'r')
targets = bcolz.open(osp.join(DATA_DIR, "trn_targets.bcolz"), 'r')
pathlib.Path('checkpoints').mkdir(exist_ok=True)
checkpoint_str = "checkpoints/pop-%03d_task-%03d.pth"
interval_limit = int(np.ceil(EPOCHS / EXPLOIT_INTERVAL))