How to use the tables.Float32Atom function in tables

To help you get started, we’ve selected a few tables examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github klusta-team / kwiklib / kwiklib / dataio / kwik.py View on Github external
file.createGroup('/channel_groups', 
                         '{0:d}'.format(ichannel_group))
                         
        
        # Determine a sensible chunk shape.
        chunkrows = 500 * 1024 // (nfeatures_ * 4 * (2 if has_masks else 1))
                         
        # Create the arrays.
        if has_masks:
            # Features + masks.
            file.createEArray(channel_group_path, 'features_masks',
                              tb.Float32Atom(), (0, nfeatures_, 2),
                              chunkshape=(chunkrows, nfeatures_, 2))
        else:
            file.createEArray(channel_group_path, 'features_masks',
                              tb.Float32Atom(), (0, nfeatures_),
                              chunkshape=(chunkrows, nfeatures_))
        
        
        # Determine a sensible chunk shape.
        chunkrows = 500 * 1024 // (waveforms_nsamples_ * nchannels_ * 2)
        
        file.createEArray(channel_group_path, 'waveforms_raw',
                          tb.Int16Atom(), (0, waveforms_nsamples_, nchannels_),
                          chunkshape=(chunkrows, waveforms_nsamples_, nchannels_))
        file.createEArray(channel_group_path, 'waveforms_filtered',
                          tb.Int16Atom(), (0, waveforms_nsamples_, nchannels_),
                          chunkshape=(chunkrows, waveforms_nsamples_, nchannels_))
                                                   
    file.close()
github Britefury / self-ensemble-visual-domain-adapt-photo / experiment_selfens_meanteacher.py View on Github external
source_indices, target_indices, n_src, n_tgt = image_dataset.subset_indices(
            d_source, d_target, subsetsize, subsetseed
        )


        #
        # Result file
        #

        if result_file != '':
            cmdline_helpers.ensure_containing_dir_exists(result_file)
            h5_filters = tables.Filters(complevel=9, complib='blosc')
            f_target_pred = tables.open_file(result_file, mode='w')
            g_tgt_pred = f_target_pred.create_group(f_target_pred.root, 'target_pred_y', 'Target prediction')
            if record_history:
                arr_tgt_pred_history = f_target_pred.create_earray(g_tgt_pred, 'y_prob_history', tables.Float32Atom(),
                                                           (0, n_tgt, d_target.n_classes),
                                                           filters=h5_filters)
            else:
                arr_tgt_pred_history = None
        else:
            arr_tgt_pred_history = None
            f_target_pred = None
            g_tgt_pred = None


        n_classes = d_source.n_classes

        print('Loaded data')

        net_class = network_architectures.get_build_fn_for_architecture(arch)
github ellisdg / 3DUnetCNN / unet3d / data.py View on Github external
def create_data_file(out_file, n_channels, n_samples, image_shape):
    hdf5_file = tables.open_file(out_file, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    data_shape = tuple([0, n_channels] + list(image_shape))
    truth_shape = tuple([0, 1] + list(image_shape))
    data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
                                           filters=filters, expectedrows=n_samples)
    truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
                                            filters=filters, expectedrows=n_samples)
    affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
                                             filters=filters, expectedrows=n_samples)
    return hdf5_file, data_storage, truth_storage, affine_storage
github neurokernel / neurokernel / neurokernel / LPU / utils / simpleio.py View on Github external
write memory to a h5 file
    h5 file contains root.real and root.imag(if A complex)
    best for transfer data with Matlab

    A: a ndarray, GPUArray or PitchArray
    filename: name of file to store
    mode: 'w' to start a new file
          'a' to append, leading dimension of A must be the
            same as the existing file

    file can be read by read_file or in matlab using h5read.m
    """
    h5file = tables.openFile(filename, mode, title)

    if (A.dtype == np.float32) or (A.dtype == np.complex64):
        tb = tables.Float32Atom
    elif (A.dtype == np.float64) or (A.dtype == np.complex128):
        tb = tables.Float64Atom
    elif A.dtype == np.int32:
        tb = tables.Int32Atom
    elif A.dtype == np.int64:
        tb = tables.Int64Atom
    else:
        TypeError("Write file error: unkown input dtype")

    if PYCUDA:
        if A.__class__.__name__ in ["GPUArray", "PitchArray"]:
            B = A.get()
        elif A.__class__.__name__ == "ndarray":
            B = A
        else:
            raise TypeError("Write file error: unkown input")
github IntelAI / models / models / image_segmentation / tensorflow / 3d_unet / inference / fp32 / unet3d / data.py View on Github external
def create_data_file(out_file, n_channels, n_samples, image_shape):
    hdf5_file = tables.open_file(out_file, mode='w')
    filters = tables.Filters(complevel=5, complib='blosc')
    data_shape = tuple([0, n_channels] + list(image_shape))
    truth_shape = tuple([0, 1] + list(image_shape))
    data_storage = hdf5_file.create_earray(hdf5_file.root, 'data', tables.Float32Atom(), shape=data_shape,
                                           filters=filters, expectedrows=n_samples)
    truth_storage = hdf5_file.create_earray(hdf5_file.root, 'truth', tables.UInt8Atom(), shape=truth_shape,
                                            filters=filters, expectedrows=n_samples)
    affine_storage = hdf5_file.create_earray(hdf5_file.root, 'affine', tables.Float32Atom(), shape=(0, 4, 4),
                                             filters=filters, expectedrows=n_samples)
    return hdf5_file, data_storage, truth_storage, affine_storage
github PyTables / PyTables / bench / blosc.py View on Github external
def create_synth(kind, prec):

    prefix_orig = 'cellzome/cellzome-'
    iname = dirname+prefix_orig+'none-'+prec+'.h5'
    f = tb.open_file(iname, "r")

    if prec == "single":
        type_ = tb.Float32Atom()
    else:
        type_ = tb.Float64Atom()

    prefix = 'synth/synth-'
    for clevel in range(10):
        oname = '%s/%s-%s%d-%s.h5' % (dirname, prefix, kind, clevel, prec)
        #print "creating...", iname
        f2 = tb.open_file(oname, "w")

        if kind in ["none", "numpy"]:
            filters = None
        else:
            filters = tb.Filters(complib=kind, complevel=clevel, shuffle=shuffle)

        for name in ['maxarea', 'mascotscore']:
            col = f.get_node('/', name)
github data61 / landshark / landshark / importers / targetwrite.py View on Github external
Parameters
    ----------
    sf : ShapefileTargets
        The shapefile object to output.
    filename : str
        The output filename of the HDF5 file.

    """
    title = "Landshark Targets"
    log.info("Creating HDF5 target file")
    h5file = tables.open_file(filename, mode="w", title=title)

    n = sf.n
    # ncols_ord = len(sf.fields)
    ord_atom = tables.Float32Atom()
    filters = tables.Filters(complevel=1, complib="blosc:lz4")

    # log.info("Creating data arrays")
    # target_array = h5file.create_carray(h5file.root, name="targets",
    #                                     atom=ord_atom, shape=(n, ncols_ord),
    #                                     filters=filters)
    # target_array.attrs.labels = sf.fields

    coord_array = h5file.create_carray(h5file.root, name="coordinates",
                                       atom=ord_atom, shape=(n, 2),
                                       filters=filters)
    coord_array.attrs.labels = ["x", "y"]

    # log.info("Writing target data")
    # for i, r in enumerate(sf.ordinal_data()):
    #     target_array[i] = r
github tritemio / PyBroMo / pybromo / storage.py View on Github external
def add_emission_tot(self, chunksize=2**19, comp_filter=default_compression,
                         overwrite=False, params=dict(),
                         chunkslice='bytes'):
        """Add the `emission_tot` array in '/trajectories'.
        """
        kwargs = dict(overwrite=overwrite, chunksize=chunksize, params=params,
                      comp_filter=comp_filter, atom=tables.Float32Atom(),
                      title='Summed emission trace of all the particles')
        return self.add_trajectory('emission_tot', **kwargs)
github data61 / landshark / landshark / importers / featurewrite.py View on Github external
# write the attributes to root
    log.info("Writing global attributes")
    attributes = h5file.root._v_attrs
    attributes.height = image_stack.height
    attributes.width = image_stack.width
    attributes.crs = image_stack.crs
    coords_x = image_stack.coordinates_x
    coords_y = image_stack.coordinates_y
    h5file.create_array(h5file.root, name="x_coordinates", obj=coords_x)
    h5file.create_array(h5file.root, name="y_coordinates", obj=coords_y)

    nbands_cat = len(image_stack.categorical_bands)
    nbands_ord = len(image_stack.ordinal_bands)
    cat_atom = tables.Int32Atom(shape=(nbands_cat,))
    ord_atom = tables.Float32Atom(shape=(nbands_ord,))
    filters = tables.Filters(complevel=1, complib="blosc:lz4")

    log.info("Creating data arrays")
    im_shape = (image_stack.height, image_stack.width)
    cat_array = h5file.create_carray(h5file.root, name="categorical_data",
                                     atom=cat_atom, shape=im_shape,
                                     filters=filters)
    cat_array.attrs.labels = image_stack.categorical_names
    ord_array = h5file.create_carray(h5file.root, name="ordinal_data",
                                     atom=ord_atom, shape=im_shape,
                                     filters=filters)
    ord_array.attrs.labels = image_stack.ordinal_names
    ord_array.attrs.missing_values = image_stack.ordinal_missing
    log.info("Categorical HDF5 block shape: {}".format(cat_array.chunkshape))
    log.info("Ordinal HDF5 block shape: {}".format(ord_array.chunkshape))
github neurokernel / neurokernel / neurokernel / LPU / neurons / photoreceptor.py View on Github external
def _setup_output(self):
        outputfile = self.LPU_id + '_out'
        if self.record_neuron:
            self.outputfile_I = tables.openFile(outputfile+'I.h5', 'w')
            self.outputfile_I.createEArray(
                "/", "array",
                tables.Float64Atom() if self.dtype == np.double else tables.Float32Atom(),
                (0, self.num_neurons))

            self.outputfile_V = tables.openFile(outputfile+'V.h5', 'w')
            self.outputfile_V.createEArray(
                "/", "array",
                tables.Float64Atom() if self.dtype == np.double else tables.Float32Atom(),
                (0, self.num_neurons))

        if self.record_microvilli:
            self.outputfile_X0 = tables.openFile(outputfile+'X0.h5', 'w')
            self.outputfile_X0.createEArray(
                "/", "array",
                tables.Int16Atom(),
                (0, self.num_neurons))

            self.outputfile_X1 = tables.openFile(outputfile+'X1.h5', 'w')
            self.outputfile_X1.createEArray(
                "/", "array",
                tables.Int16Atom(),
                (0, self.num_neurons))

            self.outputfile_X2 = tables.openFile(outputfile+'X2.h5', 'w')