Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@docval({'name':'name', 'type': str, 'doc': 'the name of the attribute'},
{'name':'value', 'type': None, 'doc': 'the attribute value'})
def set_attribute(self, **kwargs):
"""
Set an attribute for this group.
"""
name, value = getargs('name', 'value', kwargs)
super().__getitem__(GroupBuilder.__attribute)[name] = value
self.obj_type[name] = GroupBuilder.__attribute
@docval({'name': 'obj_type', 'type': (str, type), 'doc': 'a class name or type object'},
{'name': 'spec', 'type': 'Spec', 'doc': 'a Spec object'})
def register_spec(cls, **kwargs):
'''
Associate a specified object type with an HDF5 specification
'''
obj_type, spec = getargs('obj_type', 'spec', kwargs)
type_name = obj_type.__name__ if isinstance(obj_type, type) else obj_type
if type_name in cls.__specs:
raise ValueError("'%s' - cannot overwrite existing specification" % type_name)
cls.__specs[type_name] = spec
@docval({'name': 'name', 'type': str, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'source', 'type': str, 'doc': ('Name of TimeSeries or Modules that serve as the source for the data '
'contained here. It can also be the name of a device, for stimulus or '
'acquisition data')},
{'name': 'data', 'type': (list, np.ndarray), 'doc': 'The data this TimeSeries dataset stores. Can also store binary data e.g. image frames'},
{'name': 'reference_frame', 'type': str, 'doc': 'description defining what the zero-position is'},
{'name': 'conversion', 'type': float, 'doc': 'Scalar to multiply each element by to conver to volts', 'default': _default_conversion},
{'name': 'resolution', 'type': float, 'doc': 'The smallest meaningful difference (in specified unit) between values in data', 'default': _default_resolution},
{'name': 'timestamps', 'type': (list, np.ndarray), 'doc': 'Timestamps for samples stored in data', 'default': None},
{'name': 'starting_time', 'type': float, 'doc': 'The timestamp of the first sample', 'default': None},
{'name': 'rate', 'type': float, 'doc': 'Sampling rate in Hz', 'default': None},
{'name': 'comments', 'type': str, 'doc': 'Human-readable comments about this TimeSeries dataset', 'default':None},
{'name': 'description', 'type': str, 'doc': 'Description of this TimeSeries dataset', 'default':None},
{'name': 'parent', 'type': 'NWBContainer', 'doc': 'The parent NWBContainer for this NWBContainer', 'default': None},
@docval({'name': 'name', 'type': str, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'source', 'type': str, 'doc': ('Name of TimeSeries or Modules that serve as the source for the data '
'contained here. It can also be the name of a device, for stimulus or '
'acquisition data')},
{'name': 'data', 'type': (list, np.ndarray, TimeSeries), 'doc': 'The data this TimeSeries dataset stores. Can also store binary data e.g. image frames'},
{'name': 'electrodes', 'type': (list, tuple), 'doc': 'the names of the electrode groups, or the ElectrodeGroup objects that each channel corresponds to'},
{'name': 'resolution', 'type': float, 'doc': 'The smallest meaningful difference (in specified unit) between values in data', 'default': _default_resolution},
{'name': 'conversion', 'type': float, 'doc': 'Scalar to multiply each element by to conver to volts', 'default': _default_conversion},
{'name': 'timestamps', 'type': (list, np.ndarray, TimeSeries), 'doc': 'Timestamps for samples stored in data', 'default': None},
{'name': 'starting_time', 'type': float, 'doc': 'The timestamp of the first sample', 'default': None},
{'name': 'rate', 'type': float, 'doc': 'Sampling rate in Hz', 'default': None},
{'name': 'comments', 'type': str, 'doc': 'Human-readable comments about this TimeSeries dataset', 'default':None},
{'name': 'description', 'type': str, 'doc': 'Description of this TimeSeries dataset', 'default':None},
@docval({'name': 'name', 'type': str, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'source', 'type': str, 'doc': ('Name of TimeSeries or Modules that serve as the source for the data '
'contained here. It can also be the name of a device, for stimulus or '
'acquisition data')},
{'name': 'data', 'type': (list, np.ndarray, 'TimeSeries'), 'doc': 'The data this TimeSeries dataset stores. Can also store binary data e.g. image frames'},
{'name': 'site', 'type': str, 'doc': 'Name of the site description'},
{'name': 'resolution', 'type': float, 'doc': 'The smallest meaningful difference (in specified unit) between values in data', 'default': _default_resolution},
# Optional arguments:
{'name': 'conversion', 'type': float, 'doc': 'Scalar to multiply each element in data to convert it to the specified unit', 'default': _default_conversion},
## time related data is optional, but one is required -- this will have to be enforced in the constructor
{'name': 'timestamps', 'type': (list, np.ndarray, 'TimeSeries'), 'doc': 'Timestamps for samples stored in data', 'default': None},
{'name': 'starting_time', 'type': float, 'doc': 'The timestamp of the first sample', 'default': None},
{'name': 'rate', 'type': float, 'doc': 'Sampling rate in Hz', 'default': None},
{'name': 'comments', 'type': str, 'doc': 'Human-readable comments about this TimeSeries dataset', 'default':None},
{'name': 'description', 'type': str, 'doc': 'Description of this TimeSeries dataset', 'default':None},
@docval({'name':'name', 'type': str, 'doc': 'the name of this link'},
{'name':'builder', 'type': 'LinkBuilder', 'doc': 'the LinkBuilder that represents this link'})
def set_link(self, **kwargs):
"""
Add a link to this group
"""
name, builder = getargs('name', 'builder', kwargs)
self.__set_builder(name, builder, GroupBuilder.__link)
@docval(*deepcopy(_attr_args))
def add_attribute(self, **kwargs):
""" Add an attribute to this object
"""
doc, name = kwargs.pop('doc', 'name')
spec = AttributeSpec(doc, name, **kwargs)
#attr.set_parent(self)
self['attributes'].append(attr)
return spec
@docval({'name':'dataset', 'type': 'DatasetBuilder', 'doc': 'the DatasetBuilder to merge into this DatasetBuilder'})
def deep_update(self, **kwargs):
"""Merge data and attributes from given DatasetBuilder into this DatasetBuilder"""
dataset = getargs('dataset', kwargs)
if dataset.data:
self['data'] = dataset.data #TODO: figure out if we want to add a check for overwrite
self['attributes'].update(dataset.attributes)
@docval({'name':'name', 'type': str, 'doc': 'the name of this link'},
{'name':'file_path', 'type': str, 'doc': 'the file path of this external link'},
{'name':'path', 'type': str, 'doc': 'the absolute path within the external HDF5 file'},
returns='the builder object for the external link', rtype='ExternalLinkBuilder')
def add_external_link(self, **kwargs):
"""
Create an external link and add it to this group.
"""
name, file_path, path = getargs('name', 'file_path', 'path', kwargs)
builder = ExternalLinkBuilder(path, file_path)
self.set_link(name, builder)
return builder
@docval({'name': 'name', 'type': str, 'doc': 'the name of this subgroup'},
{'name': 'groups', 'type': dict, 'doc': 'a dictionary of subgroups to create in this subgroup', 'default': dict()},
{'name': 'datasets', 'type': dict, 'doc': 'a dictionary of datasets to create in this subgroup', 'default': dict()},
{'name': 'attributes', 'type': dict, 'doc': 'a dictionary of attributes to create in this subgroup', 'default': dict()},
{'name': 'links', 'type': dict, 'doc': 'a dictionary of links to create in this subgroup', 'default': dict()},
returns='the GroupBuilder object for the subgroup', rtype='GroupBuilder')
def add_group(self, **kwargs):
"""
Add a subgroup with the given data to this group
"""
name = kwargs.pop('name')
builder = GroupBuilder(**kwargs)
self.set_group(name, builder)
return builder