How to use the neuroglancer.LocalVolume function in neuroglancer

To help you get started, we’ve selected a few neuroglancer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google / neuroglancer / python / examples / flood_filling_simulation.py View on Github external
def _start_flood_fill(self, pos):
        self._stop_flood_fill()
        inf_results = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
        inf_volume = neuroglancer.LocalVolume(
            data=inf_results, dimensions=self.dimensions)

        with self.viewer.txn() as s:
            s.layers['points'] = neuroglancer.LocalAnnotationLayer(self.dimensions)
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=inf_volume,
                shader='''
void main() {
  float v = toNormalized(getDataValue(0));
  vec4 rgba = vec4(0,0,0,0);
  if (v != 0.0) {
    rgba = vec4(colormapJet(v), 1.0);
  }
  emitRGBA(rgba);
}
''',
github google / neuroglancer / python / examples / interactive_inference.py View on Github external
viewer = self.viewer = neuroglancer.Viewer()
        viewer.actions.add('inference', self._do_inference)
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=self.gt_vol.resolution,
        )
        self.inf_results = zarr.zeros(
            self.gt_vol.bounds.to_list()[3:], chunks=(64, 64, 64), dtype=np.uint8)
        self.inf_volume = neuroglancer.LocalVolume(
            data=self.inf_results, dimensions=self.dimensions)
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'inference'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            s.layers['inference'] = neuroglancer.ImageLayer(
                source=self.inf_volume,
                shader='''
void main() {
github google / neuroglancer / python / examples / example_skeletons.py View on Github external
edges=edges,
            vertex_attributes=dict(affinity=np.random.rand(2), affinity2=np.random.rand(2)))


viewer = neuroglancer.Viewer()
dimensions = neuroglancer.CoordinateSpace(
    names=['x', 'y', 'z'],
    units='nm',
    scales=[10, 10, 10],
)
with viewer.txn() as s:
    s.layers.append(
        name='a',
        layer=neuroglancer.SegmentationLayer(
            source=[
                neuroglancer.LocalVolume(
                    data=segmentation,
                    dimensions=dimensions,
                ),
                SkeletonSource(dimensions),
            ],
            skeleton_shader='void main() { emitRGB(colormapJet(affinity)); }',
            selected_alpha=0,
            not_selected_alpha=0,
        ))

if __name__ == '__main__':
    ap = argparse.ArgumentParser()
    ap.add_argument('--static-content-url')
    ap.add_argument('-a', '--bind-address')
    args = ap.parse_args()
    neuroglancer.server.debug = True
github google / neuroglancer / python / examples / example.py View on Github external
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

b = np.cast[np.uint32](np.floor(np.sqrt((ix - 0.5)**2 + (iy - 0.5)**2 + (iz - 0.5)**2) * 10))
b = np.pad(b, 1, 'constant')

viewer = neuroglancer.Viewer()
dimensions = neuroglancer.CoordinateSpace(
    names=['x', 'y', 'z'],
    units='nm',
    scales=[10, 10, 10])
with viewer.txn() as s:
    s.dimensions = dimensions
    s.layers.append(
        name='a',
        layer=neuroglancer.LocalVolume(
            data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c^', 'x', 'y', 'z'],
                units=['', 'nm','nm','nm'],
                scales=[1, 10, 10, 10]),
            voxel_offset=(0, 20, 30, 15),
        ),
        shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""")
    s.layers.append(
        name='b', layer=neuroglancer.LocalVolume(
github google / neuroglancer / python / examples / example_overlay.py View on Github external
a = np.zeros((3, 100, 100, 100), dtype=np.uint8)
ix, iy, iz = np.meshgrid(* [np.linspace(0, 1, n) for n in a.shape[1:]], indexing='ij')
a[0, :, :, :] = np.abs(np.sin(4 * (ix + iy))) * 255
a[1, :, :, :] = np.abs(np.sin(4 * (iy + iz))) * 255
a[2, :, :, :] = np.abs(np.sin(4 * (ix + iz))) * 255

with viewer.txn() as s:
    s.layers['image'] = neuroglancer.ImageLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
    )
    s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
        source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
    )
    s.layers['overlay'] = neuroglancer.ImageLayer(
        source=neuroglancer.LocalVolume(
            a,
            dimensions=neuroglancer.CoordinateSpace(
                scales=[1, 8, 8, 8],
                units=['', 'nm', 'nm', 'nm'],
                names=['c^', 'x', 'y', 'z']),
            voxel_offset=[0, 3000, 3000, 3000]),
        shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""",
    )
    s.voxel_coordinates = [3000, 3000, 3000]
print(viewer.state)
github google / neuroglancer / python / examples / example.py View on Github external
data=a,
            dimensions=neuroglancer.CoordinateSpace(
                names=['c^', 'x', 'y', 'z'],
                units=['', 'nm','nm','nm'],
                scales=[1, 10, 10, 10]),
            voxel_offset=(0, 20, 30, 15),
        ),
        shader="""
void main() {
  emitRGB(vec3(toNormalized(getDataValue(0)),
               toNormalized(getDataValue(1)),
               toNormalized(getDataValue(2))));
}
""")
    s.layers.append(
        name='b', layer=neuroglancer.LocalVolume(
            data=b,
            dimensions=dimensions,
        ))

print(viewer)