Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
hdf5_compress = params.getboolean('data', 'hdf5_compress')
N_total = params.nb_channels
N_t = params.getint('detection', 'N_t')
dist_peaks = params.getint('detection', 'dist_peaks')
template_shift = params.getint('detection', 'template_shift')
file_out_suff = params.get('data', 'file_out_suff')
spike_thresh = params.getfloat('detection', 'spike_thresh')
spike_width = params.getfloat('detection', 'spike_width')
matched_filter = params.getboolean('detection', 'matched-filter')
matched_thresh = params.getfloat('detection', 'matched_thresh')
sign_peaks = params.get('detection', 'peaks')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
chunk_size = detect_memory(params, whitening=True)
plot_path = os.path.join(params.get('data', 'file_out_suff'), 'plots')
nodes, edges = get_nodes_and_edges(params)
safety_time = params.getint('whitening', 'safety_time')
safety_space = params.getboolean('whitening', 'safety_space')
nb_temp_white = min(max(20, comm.size), N_e)
max_silence_1 = int(20*params.rate // comm.size)
max_silence_2 = 5000
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.arange(len(nodes))
jitter_range = params.getint('detection', 'jitter_range')
template_shift_2 = template_shift + jitter_range
use_hanning = params.getboolean('detection', 'hanning')
rejection_threshold = params.getfloat('detection', 'rejection_threshold')
data_file.open()
#################################################################
if use_hanning:
hanning_filter = numpy.hanning(N_t)
N_e = params.getint('data', 'N_e')
N_total = params.nb_channels
N_t = params.getint('detection', 'N_t')
template_shift = params.getint('detection', 'template_shift')
file_out = params.get('data', 'file_out')
file_out_suff = params.get('data', 'file_out_suff')
sign_peaks = params.get('detection', 'peaks')
matched_filter = params.getboolean('detection', 'matched-filter')
spike_thresh = params.getfloat('detection', 'spike_thresh')
spike_width = params.getfloat('detection', 'spike_width')
dist_peaks = params.getint('detection', 'dist_peaks')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
chunk_size = detect_memory(params, fitting=True)
gpu_only = params.getboolean('fitting', 'gpu_only')
nodes, edges = get_nodes_and_edges(params)
tmp_limits = params.get('fitting', 'amp_limits').replace('(', '').replace(')', '').split(',')
tmp_limits = map(float, tmp_limits)
amp_auto = params.getboolean('fitting', 'amp_auto')
nb_chances = params.getint('fitting', 'nb_chances')
max_chunk = params.getfloat('fitting', 'max_chunk')
noise_thr = params.getfloat('clustering', 'noise_thr')
collect_all = params.getboolean('fitting', 'collect_all')
debug = params.getboolean('fitting', 'debug')
ignore_dead_times = params.getboolean('triggers', 'ignore_times')
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.arange(len(nodes))
data_file.open()
#################################################################
if use_gpu:
import cudamat as cmt
spike_thresh = params.getfloat('detection', 'spike_thresh')
spike_width = params.getfloat('detection', 'spike_width')
if params.getboolean('data', 'global_tmp'):
tmp_path_loc = os.path.join(os.path.abspath(params.get('data', 'file_out_suff')), 'tmp')
else:
tmp_path_loc = tempfile.gettempdir()
plot_path = os.path.join(params.get('data', 'file_out_suff'), 'plots')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
safety_time = params.getint('clustering', 'safety_time')
safety_space = params.getboolean('clustering', 'safety_space')
comp_templates = params.getboolean('clustering', 'compress')
dispersion = params.get('clustering', 'dispersion').replace('(', '').replace(')', '').split(',')
dispersion = map(float, dispersion)
nodes, edges = get_nodes_and_edges(params)
chunk_size = detect_memory(params)
max_elts_elec = params.getint('clustering', 'max_elts')
if sign_peaks == 'both':
max_elts_elec *= 2
nb_elts = int(params.getfloat('clustering', 'nb_elts')*N_e*max_elts_elec)
nb_repeats = params.getint('clustering', 'nb_repeats')
make_plots = params.get('clustering', 'make_plots')
debug_plots = params.get('clustering', 'debug_plots')
merging_param = params.getfloat('clustering', 'merging_param')
merging_method = params.get('clustering', 'merging_method')
remove_mixture = params.getboolean('clustering', 'remove_mixture')
extraction = params.get('clustering', 'extraction')
smart_search = params.getboolean('clustering', 'smart_search')
n_abs_min = params.getint('clustering', 'n_abs_min')
sensitivity = params.getfloat('clustering', 'sensitivity')
hdf5_compress = params.getboolean('data', 'hdf5_compress')
#params = detect_memory(params)
parallel_hdf5 = get_parallel_hdf5_flag(params)
logger = init_logging(params.logfile)
logger = logging.getLogger('circus.extracting')
#################################################################
data_file = params.data_file
N_e = params.getint('data', 'N_e')
N_t = params.getint('detection', 'N_t')
N_total = params.nb_channels
template_shift = params.getint('detection', 'template_shift')
chunk_size = detect_memory(params)
file_out = params.get('data', 'file_out')
file_out_suff = params.get('data', 'file_out_suff')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
nodes, edges = get_nodes_and_edges(params)
safety_time = params.getint('extracting', 'safety_time')
max_elts_temp = params.getint('extracting', 'max_elts')
output_dim = params.getfloat('extracting', 'output_dim')
noise_thr = params.getfloat('extracting', 'noise_thr')
hdf5_compress = params.getboolean('data', 'hdf5_compress')
blosc_compress = params.getboolean('data', 'blosc_compress')
tmp_limits = params.get('fitting', 'amp_limits').replace('(', '').replace(')', '').split(',')
amp_limits = map(float, tmp_limits)
elt_count = 0
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.arange(len(nodes))
data_file.open()
#################################################################
if comm.rank == 0:
print_and_log(["Extracting templates from already found clusters..."], 'default', logger)
def get_max_loc_channel(params):
nodes, edges = get_nodes_and_edges(params)
max_loc_channel = 0
for key in edges.keys():
if len(edges[key]) > max_loc_channel:
max_loc_channel = len(edges[key])
return max_loc_channel
if maxoverlap:
if SHARED_MEMORY:
templates = load_data_memshared(params, 'templates', extension=extension, normalize=normalize)
else:
templates = load_data(params, 'templates', extension=extension)
else:
if SHARED_MEMORY:
templates = load_data_memshared(params, 'templates', normalize=normalize)
else:
templates = load_data(params, 'templates')
if extension == '-merged':
best_elec = load_data(params, 'electrodes', extension)
else:
best_elec = load_data(params, 'electrodes')
nodes, edges = get_nodes_and_edges(params)
N, N_tm = templates.shape
if not SHARED_MEMORY and normalize:
norm_templates = load_data(params, 'norm-templates')
for idx in xrange(N_tm):
myslice = numpy.arange(templates.indptr[idx], templates.indptr[idx+1])
templates.data[myslice] /= norm_templates[idx]
if half:
N_tm //= 2
comm.Barrier()
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.arange(len(nodes))
cuda_string = 'using %d CPU...' %comm.size
def get_neighbors(params, chan=None):
N_total = params.getint('data', 'N_total')
nodes, edges = get_nodes_and_edges(params, validating=True)
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.arange(len(nodes))
if chan is None:
# Select all the channels.
chans = inv_nodes[nodes]
else:
# Select only the neighboring channels of the best channel.
chans = inv_nodes[edges[nodes[chan]]]
return nodes, chans
def write_templates(path, params, extension):
max_loc_channel = get_max_loc_channel(params)
templates = io.load_data(params, 'templates', extension)
N_tm = templates.shape[1]//2
nodes, edges = get_nodes_and_edges(params)
if sparse_export:
n_channels_max = 0
for t in xrange(N_tm):
data = numpy.sum(numpy.sum(templates[:, t].toarray().reshape(N_e, N_t), 1) != 0)
if data > n_channels_max:
n_channels_max = data
else:
n_channels_max = N_e
if export_all:
to_write_sparse = numpy.zeros((N_tm + N_e, N_t, n_channels_max), dtype=numpy.float32)
mapping_sparse = -1 * numpy.ones((N_tm + N_e, n_channels_max), dtype=numpy.int32)
else:
to_write_sparse = numpy.zeros((N_tm, N_t, n_channels_max), dtype=numpy.float32)
mapping_sparse = -1 * numpy.ones((N_tm, n_channels_max), dtype=numpy.int32)
assert len(rate) == len(cells), "Should have the same number of rates and cells"
else:
rate = [rate] * len(cells)
if numpy.iterable(amplitude):
assert len(amplitude) == len(cells), "Should have the same number of amplitudes and cells"
else:
amplitude = [amplitude] * len(cells)
# Retrieve some additional key parameters.
#params = detect_memory(params)
data_file = params.get_data_file(source=True)
N_e = params.getint('data', 'N_e')
N_total = params.nb_channels
hdf5_compress = params.getboolean('data', 'hdf5_compress')
nodes, edges = get_nodes_and_edges(params)
N_t = params.getint('detection', 'N_t')
inv_nodes = numpy.zeros(N_total, dtype=numpy.int32)
inv_nodes[nodes] = numpy.argsort(nodes)
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
N_tm_init = templates.shape[1]//2
thresholds = io.load_data(params, 'thresholds')
limits = io.load_data(params, 'limits')
best_elecs = io.load_data(params, 'electrodes')
norms = io.load_data(params, 'norm-templates')
# Create output directory if it does not exist.
if comm.rank == 0:
if not os.path.exists(file_out):
os.makedirs(file_out)
data_file.open()
chunk_size = params.getint('data', 'chunk_size')
do_temporal_whitening = params.getboolean('whitening', 'temporal')
do_spatial_whitening = params.getboolean('whitening', 'spatial')
N_total = params.nb_channels
if do_spatial_whitening:
spatial_whitening = io.load_data(params, 'spatial_whitening')
if do_temporal_whitening:
temporal_whitening = io.load_data(params, 'temporal_whitening')
#mpi_file = MPI.File()
#mpi_input = mpi_file.Open(comm, data_filename, MPI.MODE_RDONLY)
nb_chunks, last_chunk_len = data_file.analyze(chunk_size)
nodes, _ = get_nodes_and_edges(params)
N_elec = nodes.size
def weighted_mean(weights, values):
"""Compute a weighted mean for the given values"""
norm_weights = [float(weight) / float(sum(weights)) for weight in weights]
weighted_values = [norm_weight * value for (norm_weight, value) in zip(norm_weights, values)]
weighted_mean = sum(weighted_values)
return weighted_mean
def extract_median(chunk_size, gidx):
"""Extract the medians from a chunk of extracellular traces"""
loc_chunk, _ = data_file.get_data(gidx, chunk_size, nodes=nodes)
# Whiten signal.
if do_spatial_whitening:
loc_chunk = numpy.dot(loc_chunk, spatial_whitening)
if do_temporal_whitening: