Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_mixed_chunk_sizes(self):
source = {"a": zarr.zeros(10, chunks=(1,)), "b": zarr.zeros(10, chunks=(2,))}
self.assertRaises(ValueError, formats.BufferedItemWriter, source)
def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):
initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2]))
gt_vol_zarr = zarr.zeros(
self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint64)
gt_blocks_seen = set()
block_size = np.array((64, 64, 64), np.int64)
def fetch_gt_block(block):
spos = block * block_size
epos = spos + block_size
slice_expr = np.s_[int(spos[0]):int(epos[0]),
int(spos[1]):int(epos[1]),
int(spos[2]):int(epos[2])]
gt_data = self.gt_vol[slice_expr][..., 0]
gt_vol_zarr[slice_expr] = gt_data
def get_patch(spos, epos):
def _start_flood_fill(self, pos):
self._stop_flood_fill()
inf_results = zarr.zeros(
self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
inf_volume = neuroglancer.LocalVolume(
data=inf_results, dimensions=self.dimensions)
with self.viewer.txn() as s:
s.layers['points'] = neuroglancer.LocalAnnotationLayer(self.dimensions)
s.layers['inference'] = neuroglancer.ImageLayer(
source=inf_volume,
shader='''
void main() {
float v = toNormalized(getDataValue(0));
vec4 rgba = vec4(0,0,0,0);
if (v != 0.0) {
rgba = vec4(colormapJet(v), 1.0);
}
emitRGBA(rgba);
"""
Display a zarr array
"""
try:
import zarr
except ImportError:
raise ImportError("""This example uses a zarr array but zarr is not
installed. To install try 'pip install zarr'.""")
import napari
with napari.gui_qt():
data = zarr.zeros((102_0, 200, 210), chunks=(100, 200, 210))
data[53_0:53_1, 100:110, 110:120] = 1
print(data.shape)
# For big data, we should specify the contrast_limits range, or napari will try
# to find the min and max of the full image.
viewer = napari.view_image(data, contrast_limits=[0, 1], rgb=False)
def __init__(self):
viewer = self.viewer = neuroglancer.Viewer()
viewer.actions.add('inference', self._do_inference)
self.gt_vol = cloudvolume.CloudVolume(
'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
mip=0,
bounded=True,
progress=False,
provenance={})
self.dimensions = neuroglancer.CoordinateSpace(
names=['x', 'y', 'z'],
units='nm',
scales=self.gt_vol.resolution,
)
self.inf_results = zarr.zeros(
self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
self.inf_volume = neuroglancer.LocalVolume(
data=self.inf_results, dimensions=self.dimensions)
with viewer.config_state.txn() as s:
s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'
with viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
)
s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
)
s.layers['ground_truth'].visible = False
s.layers['inference'] = neuroglancer.ImageLayer(
source=self.inf_volume,