Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def main():
filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
# If we pass a filename to the trajectory a new HDF5StorageService will
# be automatically created
traj = Trajectory(filename=filename,
dynamically_imported_classes=[BrianMonitorResult,
BrianParameter])
# Let's create and fake environment to enable logging:
env = Environment(traj, do_single_runs=False)
# Load the trajectory, but onyl laod the skeleton of the results
traj.f_load(index=-1, load_parameters=2, load_derived_parameters=2, load_results=1)
# Find the result instances related to the fano factor
fano_dict = traj.f_get_from_runs('mean_fano_factor', fast_access=False)
# Load the data of the fano factor results
ffs = fano_dict.values()
traj.f_load_items(ffs)
# 2nd phase let's run the experiment
# We pass `euler_scheme` as our top-level simulation function and
# the Lorenz equation 'diff_lorenz' as an additional argument
env.f_run(euler_scheme, diff_lorenz)
# We don't have a 3rd phase of post-processing here
# 4th phase analysis.
# I would recommend to do post-processing completely independent from the simulation,
# but for simplicity let's do it here.
# Let's assume that we start all over again and load the entire trajectory new.
# Yet, there is an error within this approach, do you spot it?
del traj
traj = Trajectory(filename=filename)
# We will only fully load parameters and derived parameters.
# Results will be loaded manually later on.
try:
# However, this will fail because our trajectory does not know how to
# build the FunctionParameter. You have seen this coming, right?
traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
load_results=1)
except ImportError as e:
print('That did\'nt work, I am sorry: %s ' % str(e))
# Ok, let's try again but this time with adding our parameter to the imports
traj = Trajectory(filename=filename,
dynamically_imported_classes=FunctionParameter)
__author__ = 'Robert Meyer'
import os # To allow pathnames under Windows and Linux
from pypet import Trajectory, NotUniqueNodeError
# We first generate a new Trajectory
filename = os.path.join('hdf5', 'example_02.hdf5')
traj = Trajectory('Example', filename=filename,
comment='Access and Storage!')
# We add our first parameter with the data 'Harrison Ford'
traj.f_add_parameter('starwars.characters.han_solo', 'Harrison Ford')
# This automatically added the groups 'starwars' and the subgroup 'characters'
# Let's get the characters subgroup
characters = traj.parameters.starwars.characters
# Since characters is unique we could also use shortcuts
characters = traj.characters
# Or the get method
characters = traj.f_get('characters')
__author__ = 'Robert Meyer'
from pypet import Trajectory, Result, Parameter
traj = Trajectory()
# There are more ways to add data,
# 1st the standard way:
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
# 2nd by providing a new parameter/result instance, be aware that the data is added where
# you specify it. There are no such things as shortcuts for parameter creation:
traj.parameters.y = Parameter('y', 1, comment='I am the second dimension!')
# 3rd as before, but if our new leaf has NO name it will be renamed accordingly:
traj.parameters.t = Parameter('', 1, comment='Third dimension')
# See:
print('t=' + str(traj.t))
# What happens if our new parameter's name does not match the name passed to the constructor?
traj.parameters.subgroup = Parameter('v', 2, comment='Fourth dimension')
# Well, since 'subgroup' != 'v', 'subgroup' becomes just another group node created on the fly
print(traj.parameters.subgroup)
def get_empty_traj(filename, name_or_index=-1):
"""
This takes an hdf5 filename (which should have been generated by pypet/LTL in
the first place) and returns a :class:`~pypet.trajectory.Trajectory` instance loaded from
the `index` th Trajectory stored in the file. In this function, nothing is loaded
for the results and derived parameters, whereas the parameters are fully loaded.
This is recommended when the file size is REALLY LARGE (e.g. > 20GB)
:param filename: filename of an HDF file created by LTL
:param name_or_index: The name or index of the trajectory to load from the file,
if unspecified, the LAST trajectory is loaded.
"""
traj = Trajectory(filename=filename)
load_params_dict = {
'load_parameters':pypetconstants.LOAD_DATA,
'load_results':pypetconstants.LOAD_NOTHING,
'load_derived_parameters':pypetconstants.LOAD_NOTHING,
'force':True
}
if isinstance(name_or_index, str):
load_params_dict['name'] = name_or_index
else:
index = int(name_or_index)
load_params_dict['index'] = index
# Loading Trajectory from file.
with timed(logger, "Primary Loading of The HDF File"):
traj.f_load(**load_params_dict)
def get_skeleton_traj(filename, name_or_index=-1):
"""
This takes an hdf5 filename (which should have been generated by pypet/LTL in
the first place) and returns a :class:`~pypet.trajectory.Trajectory` instance loaded from
the Trajectory referred to by the `name_or_index` parameter stored in the file.
In this function, only the tree structure aka skeleton is loaded for the results
and derived parameters, whereas the parameters are fully loaded
This is recommended for moderately sized files (< 20 GB)
:param filename: filename of an HDF file created by LTL
:param name_or_index: The name or index of the trajectory to load from the file,
if unspecified, the LAST trajectory is loaded.
"""
traj = Trajectory(filename=filename)
load_params_dict = {
'load_parameters':pypetconstants.LOAD_DATA,
'load_results':pypetconstants.LOAD_SKELETON,
'load_derived_parameters':pypetconstants.LOAD_SKELETON,
'force':True
}
if isinstance(name_or_index, str):
load_params_dict['name'] = name_or_index
else:
index = int(name_or_index)
load_params_dict['index'] = index
# Loading Trajectory from file.
with timed(logger, "Primary Loading of The HDF File"):
traj.f_load(**load_params_dict)
logger.info("Finished Primary Loading")
def main():
# We don't use an environment so we enable logging manually
logging.basicConfig(level=logging.INFO)
filename = os.path.join('hdf5','example_16.hdf5')
traj = Trajectory(filename=filename, overwrite_file=True)
# The result that will be manipulated
traj.f_add_result('last_process_name', 'N/A',
comment='Name of the last process that manipulated the trajectory')
with MultiprocContext(trajectory=traj, wrap_mode='LOCK') as mc:
# The multiprocessing context manager wraps the storage service of the trajectory
# and passes the wrapped service to the trajectory.
# Also restores the original storage service in the end.
# Moreover, wee need to use the `MANAGER_LOCK` wrapping because the locks
# are pickled and send to the pool for all function executions
# Start a pool of processes manipulating the trajectory
iterable = (traj for x in range(50))
pool = mp.Pool(processes=4)
# Pass the trajectory and the function to the pool and execute it 20 times
def main():
# This time we don't need an environment since we just going to look
# at data in the trajectory
traj = Trajectory('FiringRate', add_time=False)
# Let's load the trajectory from the file
# Only load the parameters, we will load the results on the fly as we need them
traj.f_load(filename='./hdf5/FiringRate.hdf5', load_parameters=2,
load_results=0, load_derived_parameters=0)
# We'll simply use auto loading so all data will be loaded when needed.
traj.v_auto_load = True
rates_frame = traj.res.summary.firing_rates.rates_frame
# Here we load the data automatically on the fly
plt.figure()
plt.subplot(2,1,1)
#Let's iterate through the columns and plot the different firing rates :
for tau_ref, I_col in rates_frame.iteritems():