How to use the pypet.cartesian_product function in pypet

To help you get started, we’ve selected a few pypet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SmokinCaterpillar / pypet / examples / example_12_sharing_data_between_processes.py View on Github external
file_title='Example_12_Sharing_Data',
                      comment='The first example!',
                      continuable=False, # We have shared data in terms of a multiprocessing list,
                      # so we CANNOT use the continue feature.
                      multiproc=True,
                      ncores=2)

    # The environment has created a trajectory container for us
    traj = env.v_trajectory

    # Add both parameters
    traj.f_add_parameter('x', 1, comment='I am the first dimension!')
    traj.f_add_parameter('y', 1, comment='I am the second dimension!')

    # Explore the parameters with a cartesian product
    traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

    # We want a shared list where we can put all out results in. We use a manager for this:
    result_list = mp.Manager().list()
    # Let's make some space for potential results
    result_list[:] =[0 for _dummy in range(len(traj))]

    # Run the simulation
    env.f_run(multiply, result_list)

    # Now we want to store the final list as numpy array
    traj.f_add_result('z', np.array(result_list))

    # Finally let's print the result to see that it worked
    print(traj.z)

    #Disable logging and close all log-files
github raphaelholca / hebbianCNN / pypet_launch.py View on Github external
print_dict = parameter_dict.copy()
print_dict.update(explore_dict)

""" create pypet environment """
env = pypet.Environment(trajectory 		= 'explore_perf',
						log_stdout		= False,
						add_time 		= False,
						multiproc 		= True,
						ncores 			= 6,
						filename		=  os.path.join(save_path, 'explore_perf.hdf5'))


traj = env.v_trajectory
pp.add_parameters(traj, parameter_dict)

explore_dict = pypet.cartesian_product(explore_dict, tuple(explore_dict.keys())) #if not all entry of dict need be explored through cartesian product replace tuple(.) only with relevant dict keys in tuple

explore_dict['name'] = pp.set_run_names(explore_dict, parameter_dict['name'])
traj.f_explore(explore_dict)

""" launch simulation with pypet for parameter exploration """
tic = time.time()
env.f_run(pp.launch_exploration, images_train, labels_train, images_test, labels_test, save_path)
toc = time.time()

""" save parameters to file """
helper.print_params(print_dict, save_path, runtime=toc-tic)

""" plot results """
name_best = pp.plot_results(folder_path=save_path)
pp.faceting(save_path)
github SmokinCaterpillar / pypet / examples / example_17_wrapping_an_existing_project / pypetwrap.py View on Github external
traj.par.ncells = 400, 'Number of cells'
    traj.par.steps = 250, 'Number of timesteps'
    traj.par.rule_number = 30, 'The ca rule'
    traj.par.initial_name = 'random', 'The type of initial state'
    traj.par.seed = 100042, 'RNG Seed'


    # Explore
    exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
                'initial_name' : ['single', 'random'],}
    # # You can uncomment the ``exp_dict`` below to see that changing the
    # # exploration scheme is now really easy:
    # exp_dict = {'rule_number' : [10, 30, 90, 110, 184],
    #             'ncells' : [100, 200, 300],
    #             'seed': [333444555, 123456]}
    exp_dict = cartesian_product(exp_dict)
    traj.f_explore(exp_dict)

    # Run the simulation
    logger.info('Starting Simulation')
    env.run(wrap_automaton)

    # Load all data
    traj.f_load(load_data=2)

    logger.info('Printing data')
    for idx, run_name in enumerate(traj.f_iter_runs()):
        # Plot all patterns
        filename = os.path.join(folder, make_filename(traj))
        plot_pattern(traj.crun.pattern, traj.rule_number, filename)
        progressbar(idx, len(traj), logger=logger)
github SmokinCaterpillar / pypet / examples / example_19b_using_deap_less_overhead.py View on Github external
for g in range(traj.NGEN):

        # ------- Evaluate current generation -------- #
        print("-- Generation %i --" % g)

        # Determine individuals that need to be evaluated
        eval_pop = [ind for ind in pop if not ind.fitness.valid]

        # Add as many explored runs as individuals that need to be evaluated.
        # Furthermore, add the individuals as explored parameters.
        # We need to convert them to lists or write our own custom IndividualParameter ;-)
        # Note the second argument to `cartesian_product`:
        # This is for only having the cartesian product
        # between ``generation x (ind_idx AND individual)``, so that every individual has just one
        # unique index within a generation.
        traj.f_expand(cartesian_product({'generation': [g],
                                         'ind_idx': range(len(eval_pop)),
                                         'individual':[list(x) for x in eval_pop]},
                                            [('ind_idx', 'individual'),'generation']))

        fitnesses_results = toolbox.map(toolbox.evaluate)  # evaluate using our fitness function

        # fitnesses_results is a list of
        # a nested tuple: [(run_idx, (fitness,)), ...]
        for idx, result in enumerate(fitnesses_results):
            # Update fitnesses
            _, fitness = result  # The environment returns tuples: [(run_idx, run), ...]
            eval_pop[idx].fitness.values = fitness

        # Append all fitnesses (note that DEAP fitnesses are tuples of length 1
        # but we are only interested in the value)
        traj.fitnesses.extend([x.fitness.values[0] for x in eval_pop])
github SmokinCaterpillar / pypet / examples / example_03_trajectory_merging.py View on Github external
comment = 'I am going to be merged into some other trajectory!')

# Get the trajectories from the environment
traj1 = env1.v_trajectory
traj2 = env2.v_trajectory

# Add both parameters
traj1.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj1.f_add_parameter('y', 1.0, comment='I am the second dimension!')
traj2.f_add_parameter('x', 1.0, comment='I am the first dimension!')
traj2.f_add_parameter('y', 1.0, comment='I am the second dimension!')

# Explore the parameters with a cartesian product for the first trajectory:
traj1.f_explore(cartesian_product({'x':[1.0,2.0,3.0,4.0], 'y':[6.0,7.0,8.0]}))
# Let's explore slightly differently for the second:
traj2.f_explore(cartesian_product({'x':[3.0,4.0,5.0,6.0], 'y':[7.0,8.0,9.0]}))


# Run the simulations with all parameter combinations
env1.f_run(multiply)
env2.f_run(multiply)

# Now we merge them together into traj1
# We want to remove duplicate entries
# like the parameter space point x=3.0, y=7.0.
# Several points have been explored by both trajectories and we need them only once.
# Therefore, we set remove_duplicates=True (Note this takes O(N1*N2)!).
# We also want to backup both trajectories, but we let the system choose the filename.
# Accordingly we choose backup_filename=True instead of providing a filename.
# We want to move the hdf5 nodes from one trajectory to the other.
# Thus we set move_nodes=True.
# Finally,we want to delete the other trajectory afterwards since we already have a backup.
github SmokinCaterpillar / pypet / examples / example_19_using_deap.py View on Github external
pop = toolbox.population(n=traj.popsize)
    CXPB, MUTPB, NGEN = traj.CXPB, traj.MUTPB, traj.NGEN


    print("Start of evolution")
    for g in range(traj.NGEN):

        # ------- Evaluate current generation -------- #
        print("-- Generation %i --" % g)

        # Determine individuals that need to be evaluated
        eval_pop = [ind for ind in pop if not ind.fitness.valid]

        # Add as many explored runs as individuals that need to be evaluated
        traj.f_expand(cartesian_product({'generation': [g], 'ind_idx': range(len(eval_pop))}))

        fitnesses_results = toolbox.map(toolbox.evaluate, eval_pop)  # evaluate using our fitness function
        # fitnesses_results is a list of
        # a nested tuple: [(run_idx, (fitness,)), ...]
        for idx, result in enumerate(fitnesses_results):
            # Update fitnesses_results
            _, fitness = result  # The environment returns tuples: [(run_idx, run), ...]
            eval_pop[idx].fitness.values = fitness

        print("  Evaluated %i individuals" % len(fitnesses_results))

        # Gather all the fitnesses_results in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
github SmokinCaterpillar / pypet / examples / example_08_f_find_idx.py View on Github external
# Create an environment that handles running
filename = os.path.join('hdf5', 'example_08.hdf5')
env = Environment(trajectory='Example08',filename=filename,
                  file_title='Example08',
                  comment='Another example!')

# Get the trajectory from the environment
traj = env.v_trajectory

# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')

# Explore the parameters with a cartesian product:
traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))

# Run the simulation
env.f_run(multiply)

# We load all results
traj.f_load(load_results=pypetconstants.LOAD_DATA)

# And now we want to find som particular results, the ones where x was 2 or y was 8.
# Therefore, we use a lambda function
my_filter_predicate= lambda x,y: x==2 or y==8

# We can now use this lambda function to search for the run indexes associated with x==2 OR y==8.
# We need a list specifying the names of the parameters and the predicate to do this.
# Note that names need to be in the order as listed in the lambda function, here 'x' and 'y':
idx_iterator = traj.f_find_idx(['x','y'], my_filter_predicate)
github SmokinCaterpillar / pypet / examples / example_20_using_deap_manual_runs.py View on Github external
pop = toolbox.population(n=traj.popsize)
    CXPB, MUTPB, NGEN = traj.CXPB, traj.MUTPB, traj.NGEN


    start_idx = 0  # We need to count executed runs

    print("Start of evolution")
    for g in range(traj.NGEN):
        print("-- Generation %i --" % g)

        # Determine individuals that need to be evaluated
        eval_pop = [ind for ind in pop if not ind.fitness.valid]

        # Add as many explored runs as individuals that need to be evaluated
        traj.f_expand(cartesian_product({'generation': [g], 'ind_idx': range(len(eval_pop))}))

        # We need to make the storage service multiprocessing safe
        mc = MultiprocContext(traj, wrap_mode='QUEUE')
        mc.f_start()

        # Create a single iterable to be passed to our fitness function (wrapper).
        # `yields='copy'` is important, the pool's `map` function will
        # go over the whole iterator at once and store it in memory.
        # So for every run we need a copy of the trajectory.
        # Alternatively, you could use `yields='self'` and use the pool's `imap` function.
        zip_iterable = izip(traj.f_iter_runs(start_idx, yields='copy'), eval_pop)

        fitnesses = toolbox.map(eval_wrapper, zip_iterable)
        # fitnesses is just a list of tuples [(fitness,), ...]
        for idx, fitness in enumerate(fitnesses):
            # Update fitnesses