Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# compute the fourier series indices and their transposes
self._ky = [np.arange(-np.ceil(sz[0]-1)/2, np.floor((sz[0]-1)/2)+1, dtype=np.float32)
for sz in filter_sz]
self._kx = [np.arange(-np.ceil(sz[1]-1)/2, 1, dtype=np.float32)
for sz in filter_sz]
# construct the gaussian label function using poisson formula
sig_y = np.sqrt(np.prod(np.floor(self._base_target_sz))) * config.output_sigma_factor * (self._output_sz / self._img_sample_sz)
yf_y = [np.sqrt(2 * np.pi) * sig_y[0] / self._output_sz[0] * np.exp(-2 * (np.pi * sig_y[0] * ky_ / self._output_sz[0])**2)
for ky_ in self._ky]
yf_x = [np.sqrt(2 * np.pi) * sig_y[1] / self._output_sz[1] * np.exp(-2 * (np.pi * sig_y[1] * kx_ / self._output_sz[1])**2)
for kx_ in self._kx]
self._yf = [yf_y_.reshape(-1, 1) * yf_x_ for yf_y_, yf_x_ in zip(yf_y, yf_x)]
if config.use_gpu:
self._yf = [cp.asarray(yf) for yf in self._yf]
self._ky = [cp.asarray(ky) for ky in self._ky]
self._kx = [cp.asarray(kx) for kx in self._kx]
# construct cosine window
self._cos_window = [self._cosine_window(feature_sz_) for feature_sz_ in feature_sz]
# compute fourier series of interpolation function
self._interp1_fs = []
self._interp2_fs = []
for sz in filter_sz:
interp1_fs, interp2_fs = self._get_interp_fourier(sz)
self._interp1_fs.append(interp1_fs)
self._interp2_fs.append(interp2_fs)
# get the reg_window_edge parameter
reg_window_edge = []
# obtained pm for this batch
Params[8] = float(pmi[ibatch])
pm = pmi[ibatch] * ones((Nfilt,), dtype=np.float64, order='F')
# loading a single batch (same as everywhere)
offset = Nchan * batchstart[k]
dat = proc.flat[offset:offset + NT * Nchan].reshape((-1, Nchan), order='F')
dataRAW = cp.asarray(dat, dtype=np.float32) / params.scaleproc
if ibatch == 0:
# only on the first batch, we first get a new set of spikes from the residuals,
# which in this case is the unmodified data because we start with no templates
# CUDA function to get spatiotemporal clips from spike detections
dWU, cmap = mexGetSpikes2(Params, dataRAW, wTEMP, iC)
dWU = cp.asarray(dWU, dtype=np.float64, order='F')
# project these into the wPCA waveforms
dWU = cp.reshape(
cp.dot(wPCAd, cp.dot(wPCAd.T, dWU.reshape((dWU.shape[0], -1), order='F'))),
dWU.shape, order='F')
# initialize the low-rank decomposition with standard waves
W = W0[:, cp.ones(dWU.shape[2], dtype=np.int32), :]
Nfilt = W.shape[1] # update the number of filters/templates
# initialize the number of spikes for new templates with the minimum allowed value,
# so it doesn't get thrown back out right away
nsp = _extend(nsp, 0, Nfilt, m0)
Params[1] = Nfilt # update in the CUDA parameters
if flag_resort:
# this is a flag to resort the order of the templates according to best peak
nPCs = nPCs or params.nPCs
nt0min = params.nt0min
Nchan = probe.Nchan
batchstart = np.arange(0, NT * Nbatch + 1, NT).astype(np.int64)
k = 0
# preallocate matrix to hold 1D spike snippets
# dd = cp.zeros((params.nt0, int(5e4)), dtype=np.float32, order='F')
dds = []
for ibatch in tqdm(range(0, Nbatch, nskip), desc="Extracting templates"):
offset = Nchan * batchstart[ibatch]
dat = proc.flat[offset:offset + NT * Nchan].reshape((-1, Nchan), order='F')
# move data to GPU and scale it back to unit variance
dataRAW = cp.asarray(dat, dtype=np.float32) / params.scaleproc
# find isolated spikes from each batch
row, col, mu = isolated_peaks_new(dataRAW, params)
# for each peak, get the voltage snippet from that channel
c = get_SpikeSample(dataRAW, row, col, params)
# if k + c.shape[1] > dd.shape[1]:
# dd = cp.pad(dd, (0, dd.shape[1]), mode='constant')
# dd[:, k:k + c.shape[1]] = c
dds.append(c)
k = k + c.shape[1]
if k > 1e5:
break
def save_mean_and_variance(self):
subset_size = self.num_observations_per_file
new_total_size = self.total_images + subset_size
co1 = self.total_images / new_total_size
co2 = subset_size / new_total_size
images = cp.asarray(self.images)
subset_mean = cp.mean(images, axis=(0, 1))
subset_var = cp.var(images, axis=(0, 1))
new_dataset_mean = subset_mean if self.dataset_mean is None else co1 * self.dataset_mean + co2 * subset_mean
new_dataset_var = subset_var if self.dataset_var is None else co1 * (
self.dataset_var + self.dataset_mean**2) + co2 * (
subset_var + subset_mean**2) - new_dataset_mean**2
# avoid negative value
new_dataset_var[new_dataset_var < 0] = 0
self.dataset_var = new_dataset_var
self.dataset_mean = new_dataset_mean
self.dataset_std = cp.sqrt(self.dataset_var)
def apply(self, source_array):
# Split image into channels as cupy (GPU) arrays
blue_channel = cp.asarray(source_array[:, :, 0])
green_channel = cp.asarray(source_array[:, :, 1])
red_channel = cp.asarray(source_array[:, :, 2])
# Determine parameters for CUDA Blur
height, width = source_array.shape[:2]
dim_grid_x = math.ceil(width / DIM_BLOCK)
dim_grid_y = math.ceil(height / DIM_BLOCK)
# Blur each channel of the image
for channel in (red_channel, green_channel, blue_channel):
self.apply_filter(
(dim_grid_x, dim_grid_y),
(DIM_BLOCK, DIM_BLOCK),
(
channel,
else:
self.value[i] = 1
preferenceMutationCount +=1
if mutatePreferenceProbability:
if 1.0 - intensity <= np.random.uniform(low=0.0, high=1.0, size=None):
self.value[i+1] = np.random.uniform(low=0.0, high=1.0, size=None)
probablityMutationCount +=1
i +=1
if preferenceMutationCount>0 or probablityMutationCount > 0:
warnings.warn("mutation occured in %s preference(s) and %s preference weights(s)!" % (preferenceMutationCount, probablityMutationCount))
else:
warnings.warn("mutateDNA called but no mutation detected, try increasing intensity / changing mutatePreferenceProbability and/or mutatePreference = true!")
if cp.cuda.is_available() and self.useGPU:
if self.createInGPUMem:
self.valueWeight = cp.asarray(self.value[1::2], dtype=np.float64)
self.valuePreference = cp.asarray(self.value[0::2], dtype=np.float64)
def getDnaType(self, *args, **kwargs):
def to_gpu(*args):
""" Upload numpy arrays to GPU and return them"""
if len(args) > 1:
return (cp.asarray(x) for x in args)
else:
return cp.asarray(args[0])
def convert_arrays_to_cupy(arrays): # pragma: no cover
"""Convert numpy arrays to ``cupy.ndarray`` instances.
"""
import cupy
return [cupy.asarray(x) for x in arrays]
def mexDistances2(Params, Ws, W, iMatch, iC, Wh, mus, mu):
code, _ = get_cuda('mexDistances2')
Nspikes = int(Params[0])
Nfilters = int(Params[2])
d_Params = cp.asarray(Params, dtype=np.float64, order='F')
d_Ws = cp.asarray(Ws, dtype=np.float32, order='F')
d_W = cp.asarray(W, dtype=np.float32, order='F')
d_iMatch = cp.asarray(iMatch, dtype=np.bool, order='F')
d_iC = cp.asarray(iC, dtype=np.int32, order='F')
d_Wh = cp.asarray(Wh, dtype=np.int32, order='F')
d_mu = cp.asarray(mu, dtype=np.float32, order='F')
d_mus = cp.asarray(mus, dtype=np.float32, order='F')
d_cmax = cp.zeros(Nspikes * Nfilters, dtype=np.float32, order='F')
d_id = cp.zeros(Nspikes, dtype=np.int32, order='F')
d_x = cp.zeros(Nspikes, dtype=np.float32, order='F')
# get list of cmaxes for each combination of neuron and filter
computeCost = cp.RawKernel(code, 'computeCost')
computeCost(
(Nfilters,), (1024,), (d_Params, d_Ws, d_mus, d_W, d_mu, d_iMatch, d_iC, d_Wh, d_cmax))