Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
h5f = _open_file(fname, 'a')
sp = sp_dict['data']
parts = dataset.split('/')
group = '/'.join(parts[:-1])
node_name = parts[-1]
if overwrite:
try:
h5f.removeNode(group, node_name)
except tables.exceptions.NodeError:
pass
atom = tables.Atom.from_dtype(sp.dtype)
shape = sp.shape
filters = tables.Filters(complevel=0, complib='zlib')
arr_node = h5f.createCArray(group, node_name, atom, shape,
filters=filters,
createparents=True)
arr_node[:] = sp
arr_node.attrs['sampfreq']=sp_dict['FS']
#attrs = sp_dict.copy()
"""Write signal"""
h5f = self.h5file
sp = sp_dict['data']
parts = dataset.split('/')
group = '/'.join(parts[:-1])
node_name = parts[-1]
if overwrite:
try:
h5f.removeNode(group, node_name)
except tables.exceptions.NodeError:
pass
atom = tables.Atom.from_dtype(sp.dtype)
shape = sp.shape
filters = tables.Filters(complevel=0, complib='zlib')
arr_node = h5f.createCArray(group, node_name, atom, shape,
filters=filters,
createparents=True)
arr_node[:] = sp
arr_node.attrs['sampfreq'] = sp_dict['FS']
"""
filters = tb.Filters(complevel=9, complib='blosc')
group=self.getGroup(chr,groupName)
#check to see if 0 has been added to the start of indptr
if len(indptr)==len(rownames):
indptr=[0]+indptr
for par in ('data', 'indices', 'indptr', 'shape',"rownames","colnames"):
arr = None
atom=tb.Atom.from_dtype(np.dtype(np.int32))
if (par=='data'):
arr=np.array(data)
if groupName=="AD_geno" or groupName=="PL_geno":
atom=tb.Atom.from_dtype(np.dtype('S20'))
else:
atom=tb.Atom.from_dtype(np.dtype(np.float64))
elif (par=='indices'):
arr=np.array(indices)
elif (par=='indptr'):
arr=np.array(indptr)
elif (par=='shape'):
arr=np.array(shape)
elif(par=="rownames"):
arr=np.array(rownames)
elif(par=="colnames"):
arr=np.array(colnames)
ds = self.file.create_earray(group, par, atom, (0,),filters=filters)
ds.append(arr)
def save_arr_as_mat(fname, arrs, complib='blosc'):
"""
Given 1D and/or 2D arrays save as a column matrix (2D array).
fname : name of file to be saved.
arrs : a list with 1D/2D arrays with *same first dimension*.
"""
nrow, ncol = 0, 0
for a in arrs:
if a.ndim > 1:
ncol += a.shape[1]
else:
ncol += 1
nrow = a.shape[0]
f = tb.openFile(fname, 'w')
atom = tb.Atom.from_dtype(np.dtype('f8'))
shape = (nrow, ncol)
filters = tb.Filters(complib=complib, complevel=9)
d = f.createCArray('/','data', atom=atom, shape=shape, filters=filters)
j1, j2 = 0, 0
for a in arrs:
if a.ndim > 1:
j2 += a.shape[1]
else:
a = a.reshape(nrow, 1)
j2 += a.shape[1]
d[:,j1:j2] = a
j1 = j2
print "file with new array:", f
f.close()
- groupName (string): the group name, for example gene name
"""
filters = tb.Filters(complevel=9, complib='blosc')
group=self.getGroup(chr,groupName)
#check to see if 0 has been added to the start of indptr
if len(hmatrix.indptr)==len(hmatrix.rownames):
hmatrix.indptr=[0]+hmatrix.indptr
for par in ('data', 'indices', 'indptr', 'shape',"rownames","colnames"):
arr = None
atom=tb.Atom.from_dtype(np.dtype(np.int32))
if (par=='data'):
arr=np.array(hmatrix.data)
if groupName=="AD_geno" or groupName=="PL_geno":
atom=tb.Atom.from_dtype(np.dtype('S20'))
else:
atom=tb.Atom.from_dtype(np.dtype(np.float64))
elif (par=='indices'):
arr=np.array(hmatrix.indices)
elif (par=='indptr'):
arr=np.array(hmatrix.indptr)
elif (par=='shape'):
arr=np.array(hmatrix.shape)
elif(par=="rownames"):
arr=np.array(hmatrix.rownames)
elif(par=="colnames"):
arr=np.array(hmatrix.colnames)
ds = self.file.create_earray(group, par, atom, (0,),filters=filters)
ds.append(arr)
def write_to(self, fname):
with tables.open_file(fname, 'w') as f:
M = f.create_carray(f.root, 'baseline',
tables.Atom.from_dtype(self.mtx.dtype),
self.mtx.shape,
filters=tables.Filters(complevel=6,
complib='zlib'))
M[:,:] = self.mtx[:,:]
M.attrs.resolution = self.resolution
M.attrs.fuzz = self.fuzz
M.attrs.north = self.north
M.attrs.south = self.south
M.attrs.east = self.east
M.attrs.west = self.west
M.attrs.lon_spacing = self.lon_spacing
M.attrs.lat_spacing = self.lat_spacing
M.attrs.longitudes = self.lon
M.attrs.latitudes = self.lat
M.attrs.raster_name = self.raster_name
raise ValueError("Attempted to write analysis HDF5 version %d data to a version %d file" % (VERSION, version))
else:
ds = f.create_array(f.root, VERSION_KEY, np.int64(VERSION))
subgroup = f.create_group(group, '_'+key)
for field in namedtuple._fields:
arr = getattr(namedtuple, field)
# XML encode strings so we can store them as HDF5 ASCII
if isinstance(arr, six.string_types):
arr = np.string_(arr.encode('ascii', 'xmlcharrefreplace'))
if not hasattr(arr, 'dtype'):
raise ValueError('%s/%s must be a numpy array or scalar' % (group,key))
atom = tables.Atom.from_dtype(arr.dtype)
if len(arr.shape) > 0:
if arr.size > 0:
ds = f.create_carray(subgroup, field, atom, arr.shape)
else:
ds = f.create_earray(subgroup, field, atom, arr.shape)
ds[:] = arr
else:
ds = f.create_array(subgroup, field, arr)
def saveArray(array, title, group, tables_file):
name = normaliseString(title)
if isinstance(array, list):
array = numpy.array(array)
name += "_was_list"
if array.dtype.char == "U":
encode = numpy.vectorize(lambda s: s.encode("UTF-8"))
array = encode(array).astype("S")
atom = tables.Atom.from_dtype(array.dtype)
data_store = tables_file.create_carray(
group,
name,
atom,
array.shape,
title
)
data_store[:] = array
def _make_pt2_carray(hdf5_file, *args, **kwargs):
read_data = False
if 'obj' in kwargs:
data = kwargs.pop('obj')
if data is not None:
read_data = True
atom = pt.Atom.from_dtype(data.dtype)
if 'atom' not in kwargs:
kwargs['atom'] = atom
if 'shape' not in kwargs:
kwargs['shape'] = data.shape
carray = hdf5_file.createCArray(*args, **kwargs)
if read_data:
carray[:] = data[:]
return carray
def save_vecs(vecs, fout):
fvec = tables.open_file(fout, 'w')
atom = tables.Atom.from_dtype(vecs.dtype)
filters = tables.Filters(complib='blosc', complevel=5)
ds = fvec.create_carray(fvec.root,'vecs', atom, vecs.shape,filters=filters)
ds[:] = vecs
print('done')
fvec.close()