Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def getRAW (p, time=False):
"""Get data using raw format. Returns a numpy array"""
# Build the url and then create a raw object
if time:
url = 'https://{}/sd/{}/{}/raw/{}/{},{}/{},{}/{},{}/{},{}/'.format(SITE_HOST, p.token, ','.join(p.channels), p.resolution, *p.args )
else:
url = 'https://{}/sd/{}/{}/raw/{}/{},{}/{},{}/{},{}/'.format(SITE_HOST, p.token, ','.join(p.channels), p.resolution, *p.args)
# Get the data back
resp = getURL(url)
rawdata = resp.content
return np.frombuffer(rawdata, dtype = ND_dtypetonp[p.datatype])
channel_list = channels.split(',')
ch = proj.getChannelObj(channel_list[0])
# call cutout for first channel
channel_data = cutout( imageargs, ch, proj, db ).data
cubedata = np.zeros ( (len(channel_list),)+channel_data.shape[:], dtype=channel_data.dtype )
cubedata[0,:] = cutout(imageargs, ch, proj, db).data
# iterate from second to nth channel
for idx,channel_name in enumerate(channel_list[1:]):
if channel_name == '0':
continue
else:
ch = proj.getChannelObj(channel_name)
if ND_dtypetonp[ch.channel_datatype] == cubedata.dtype:
cubedata[idx+1,:] = cutout(imageargs, ch, proj, db).data
else:
logger.error("The cutout {} can only contain cutouts of one single Channel Type.".format())
raise NDWSError("The cutout {} can only contain cutouts of one single Channel Type.".format())
return cubedata
except Exception as e:
logger.error("{}".format(e))
raise NDWSError("{}".format(e))
ch = proj.getChannelObj(self.channel)
# get the dataset configuration
[ximagesz, yimagesz, zimagesz] = proj.datasetcfg.dataset_dim(self.resolution)
[starttime, endtime] = ch.time_range
[xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.get_cubedim(self.resolution)
[xsupercubedim, ysupercubedim, zsupercubedim] = supercubedim = proj.datasetcfg.get_supercubedim(self.resolution)
[xoffset, yoffset, zoffset] = proj.datasetcfg.get_offset(self.resolution)
if ch.channel_type in TIMESERIES_CHANNELS and (starttime == 0 and endtime == 0):
logger.error("Timeseries Data cannot have timerange (0,0)")
raise NDWSError("Timeseries Data cannot have timerange (0,0)")
# Get a list of the files in the directories
for timestamp in range(starttime, endtime+1):
for slice_number in range (zoffset, zimagesz, zsupercubedim):
slab = np.zeros([zsupercubedim, yimagesz, ximagesz ], dtype=ND_dtypetonp.get(ch.channel_datatype))
# fetch 16 slices at a time
if ch.channel_type in TIMESERIES_CHANNELS:
time_value = timestamp
else:
time_value = None
self.fetchData(range(slice_number, slice_number+zsupercubedim) if slice_number+zsupercubedim<=zimagesz else range(slice_number, zimagesz), time_value=time_value)
for b in range(zsupercubedim):
if (slice_number + b < zimagesz):
try:
# reading the raw data
file_name = "{}{}".format(self.path, self.generateFileName(slice_number+b))
# print "Open filename {}".format(file_name)
logger.info("Open filename {}".format(file_name))
if ch.channel_datatype in [UINT8, UINT16] and ch.channel_type in IMAGE_CHANNELS:
try:
nifti_img = nibabel.load(niftifname)
nifti_data = np.array(nifti_img.get_data())
# Don't write to readonly channels
if ch.readonly == READONLY_TRUE:
logger.warning("Attempt to write to read only channel {} in project {}".format(ch.channel_name, proj.project_name))
raise NDWSError("Attempt to write to read only channel {} in project {}".format(ch.channel_name, proj.project_name))
# check that the data is the right shape
if nifti_data.shape != tuple(proj.datasetcfg.dataset_dim(0)) and nifti_data.shape != tuple(proj.datasetcfg.dataset_dim(0) + [ch.time_range[1]-ch.time_range[0]+1]):
logger.warning("Not correct shape")
raise NDWSError("Not correct shape")
nifti_data = nifti_data.transpose()
nifti_data = np.array(nifti_data,ND_dtypetonp[ch.channel_datatype])
# create the nifti header
nh = NDNiftiHeader.fromImage(ch, nifti_img)
try:
if len(nifti_data.shape) == 3:
# make 4-d for time cube
nifti_data = nifti_data.reshape([1]+list(nifti_data.shape))
db.writeCuboid ( ch, (0,0,0), 0, nifti_data, timerange=[0,0], blind=True )
elif len(nifti_data.shape) == 4:
db.writeCuboid(ch, (0,0,0), 0, nifti_data, (0, nifti_data.shape[0]-1), blind=True )
# save the header if the data was written
nh.save()
except Exception as e:
logger.warning("Writing to a channel with an incompatible data type. {}".format(ch.channel_type))
time_value = None
self.fetchData(range(slice_number, slice_number+zsupercubedim) if slice_number+zsupercubedim<=zimagesz else range(slice_number, zimagesz), time_value=time_value)
for b in range(zsupercubedim):
if (slice_number + b < zimagesz):
try:
# reading the raw data
file_name = "{}{}".format(self.path, self.generateFileName(slice_number+b))
# print "Open filename {}".format(file_name)
logger.info("Open filename {}".format(file_name))
if ch.channel_datatype in [UINT8, UINT16] and ch.channel_type in IMAGE_CHANNELS:
try:
image_data = np.asarray(Image.open(file_name, 'r'))
slab[b,:,:] = image_data
except Exception as e:
slab[b,:,:] = np.zeros((yimagesz, ximagesz), dtype=ND_dtypetonp.get(ch.channel_datatype))
logger.warning("File corrupted. Cannot open file. {}".format(e))
elif ch.channel_datatype in [UINT32] and ch.channel_type in IMAGE_CHANNELS:
image_data = np.asarray(Image.open(file_name, 'r').convert('RGBA'))
slab[b,:,:] = np.left_shift(image_data[:,:,3], 24, dtype=np.uint32) | np.left_shift(image_data[:,:,2], 16, dtype=np.uint32) | np.left_shift(image_data[:,:,1], 8, dtype=np.uint32) | np.uint32(image_data[:,:,0])
elif ch.channel_type in ANNOTATION_CHANNELS:
image_data = np.asarray(Image.open(file_name, 'r'))
slab[b,:,:] = image_data
else:
logger.error("Cannot ingest this data yet")
raise NDWSError("Cannot ingest this data yet")
except IOError, e:
logger.warning("IOError {}.".format(e))
slab[b,:,:] = np.zeros((yimagesz, ximagesz), dtype=ND_dtypetonp.get(ch.channel_datatype))
for y in range ( 0, yimagesz+1, ysupercubedim ):
for x in range ( 0, ximagesz+1, xsupercubedim ):