Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def main():
random.seed(64)
NGEN = 50
MU = 50
LAMBDA = 100
CXPB = 0.7
MUTPB = 0.2
pop = toolbox.population(n=MU)
hof = tools.ParetoFront()
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,
halloffame=hof)
return pop, stats, hof
## register evaluation, selection, crossover, and mutation functions
print('toolbox reg')
print("register evaluation")
toolbox.register("evaluate", maxProfit)
print("register selection")
toolbox.register("select", tools.selTournament, tournsize=3)
print("register mate")
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxStrategyDepth))
toolbox.decorate("expr_mut", gp.staticLimit(key=operator.attrgetter("height"), max_value=maxStrategyDepth))
print("register statistics")
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
def trainTestFitness(trainScore,testScore):
train=np.exp(-1*trainScore)
test=np.exp(-1*testScore)
score=np.abs(train-test)+1-np.divide(np.sqrt((1-train)**2+(1-test)**2),np.sqrt(2))
if((train<0 or train>1)):
score=-1
if ((test<0) or (test>1)):
score=-1
return score
pareto_logo.close()
pareto_log_fileo.close()
if len(pareto_front) < MU:
diff = MU - len(pareto_front)
pop_tar = pareto_front + toolbox.population(n=diff)
else:
assert( len(pareto_front) == MU)
pop_tar = pareto_front
#reiterating
CXPB = 0.9
toolbox.register("evaluate", minimize_tar_approach2)
pareto_front = fronts[0]
stats = tools.Statistics(lambda ind: ind.fitness.values[1])
# stats.register("avg", numpy.mean, axis=0)
# stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
#toolbox.register("evaluate", minimize_tar)
#pop_tar = toolbox.population(n=MU)
print(type(pop_tar))
for item in pop_tar:
del item.fitness.values
#print("population initialized")
#network_obj = Neterr(indim, outdim, n_hidden, np.random)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop_tar if not ind.fitness.valid]
def main(verbose=True):
random.seed(318)
NGEN = 40
CXPB = 0.5
MUTPB = 0.1
ALPHA = 0.05
BETA = 10
GAMMA = 0.25
RHO = 0.9
pop = toolbox.population(n=300)
hof = tools.HallOfFame(1)
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", numpy.mean)
mstats.register("std", numpy.std)
mstats.register("min", numpy.min)
mstats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (mstats.fields if mstats else [])
for gen, state in enumerate(gp.HARM(pop, toolbox, CXPB, MUTPB, ALPHA, BETA, GAMMA, RHO)):
hof.update(state.population)
record = mstats.compile(state.population)
logbook.record(gen=gen, nevals=state.nevals, **record)
if verbose:
def main():
# random.seed(64)
MU, LAMBDA = 100, 200
pop = toolbox.population(n=MU)
hof = tools.ParetoFront()
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA,
cxpb=0.7, mutpb=0.3, ngen=40,
stats=stats, halloffame=hof)
return pop, logbook, hof
def main():
pop = toolbox.population(n=cfg.ga.pop)
if cfg.ga.history:
history.update(pop)
hof = deap.tools.ParetoFront(similarity) if cfg.ga.pareto \
else deap.tools.HallOfFame(cfg.default.results, similarity)
stats = deap.tools.Statistics(lambda ind: ind.fitness.values)
numpy.set_printoptions(precision=cfg.default.precision)
stats.register("avg", numpy.mean, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
pop, log = deap.algorithms.eaMuPlusLambda(pop, toolbox,
mu = int(cfg.ga.mu*cfg.ga.pop), lambda_= int(cfg.ga.lambda_*cfg.ga.pop),
cxpb=cfg.ga.cx_pb, mutpb=cfg.ga.mut_pb,
ngen=cfg.ga.gens, stats=stats, halloffame=hof)
return pop, log, hof
possible_gene_values = list(sceobj.bmps_params.keys())
if 0 not in possible_gene_values:
possible_gene_values.append(0)
units_info = sceobj.cfg.units_infos
suit_bmps = sceobj.suit_bmps
gene_to_unit = sceobj.cfg.gene_to_unit
unit_to_gene = sceobj.cfg.unit_to_gene
updown_units = sceobj.cfg.updown_units
scoop_log('Population: %d, Generation: %d' % (pop_size, gen_num))
scoop_log('BMPs configure unit: %s, configuration method: %s' % (cfg_unit, cfg_method))
# create reference point for hypervolume
ref_pt = numpy.array([worst_econ, worst_env]) * multi_weight * -1
stats = tools.Statistics(lambda sind: sind.fitness.values)
stats.register('min', numpy.min, axis=0)
stats.register('max', numpy.max, axis=0)
stats.register('avg', numpy.mean, axis=0)
stats.register('std', numpy.std, axis=0)
logbook = tools.Logbook()
logbook.header = 'gen', 'evals', 'min', 'max', 'avg', 'std'
# Initialize population
pop = toolbox.population(sceobj.cfg, n=pop_size) # type: List
init_time = time.time() - stime
def delete_fitness(new_ind):
"""Delete the fitness and other information of new individual."""
del new_ind.fitness.values
new_ind.gen = -1
def main():
# The cma module uses the numpy random number generator
numpy.random.seed(128)
# The CMA-ES algorithm takes a population of one individual as argument
# The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
# for more details about the rastrigin and other tests for CMA-ES
strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
#logger = tools.EvolutionLogger(stats.functions.keys())
# The CMA-ES algorithm converge with good probability with those settings
algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof)
# print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values)
return hof[0].fitness.values[0]
def main():
random.seed(10)
pop = toolbox.population(n=100)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
algorithms.eaSimple(pop, toolbox, 0.5, 0.2, 40, stats, halloffame=hof)
return pop, stats, hof
def main(seed=None, play = 0, NGEN = 40, MU = 4 * 10):
random.seed(seed)
# this has to be a multiple of 4. period.
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values[1])
# stats.register("avg", numpy.mean, axis=0)
# stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=MU)
#network_obj = Neterr(indim, outdim, n_hidden, np.random)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals