Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# for each spike, estimate its probability to come from either Gaussian cluster
logp[:, 0] = -1. / 2 * log(s1) - ((x - mu1) ** 2) / (2 * s1) + log(p)
logp[:, 1] = -1. / 2 * log(s2) - ((x - mu2) ** 2) / (2 * s2) + log(1 - p)
lMax = logp.max(axis=1)
logp = logp - lMax[:, cp.newaxis] # subtract the max for floating point accuracy
rs = cp.exp(logp) # exponentiate the probabilities
pval = cp.log(cp.sum(rs, axis=1)) + lMax # get the normalizer and add back the max
logP[k] = mean(pval) # this is the cost function: we can monitor its increase
rs = rs / cp.sum(rs, axis=1)[:, cp.newaxis] # normalize so that probabilities sum to 1
p = mean(rs[:, 0]) # mean probability to be assigned to Gaussian 1
# new estimate of mean of cluster 1 (weighted by "responsibilities")
mu1 = cp.dot(rs[:, 0], x) / cp.sum(rs[:, 0])
# new estimate of mean of cluster 2 (weighted by "responsibilities")
mu2 = cp.dot(rs[:, 1], x) / cp.sum(rs[:, 1])
s1 = cp.dot(rs[:, 0], (x - mu1) ** 2) / cp.sum(rs[:, 0]) # new estimates of variances
s2 = cp.dot(rs[:, 1], (x - mu2) ** 2) / cp.sum(rs[:, 1])
if (k >= 10) and (k % 2 == 0):
# starting at iteration 10, we start re-estimating the pursuit direction
# that is, given the Gaussian cluster assignments, and the mean and variances,
# we re-estimate w
# these equations follow from the model
StS = cp.matmul(
clp.T, clp * (rs[:, 0] / s1 + rs[:, 1] / s2)[:, cp.newaxis]) / nSpikes
StMu = cp.dot(clp.T, rs[:, 0] * mu1 / s1 + rs[:, 1] * mu2 / s2) / nSpikes
# this is the new estimate of the best pursuit direction
if ibatch % 5 == 0:
# this drops templates based on spike rates and/or similarities to
# other templates
W, U, dWU, mu, nsp, ndrop = triageTemplates2(
params, iW, C2C, W, U, dWU, mu, nsp, ndrop)
Nfilt = W.shape[1] # update the number of filters
Params[1] = Nfilt
# this adds new templates if they are detected in the residual
dWU0, cmap = mexGetSpikes2(Params, drez, wTEMP, iC)
if dWU0.shape[2] > 0:
# new templates need to be integrated into the same format as all templates
# apply PCA for smoothing purposes
dWU0 = cp.reshape(cp.dot(wPCAd, cp.dot(
wPCAd.T, dWU0.reshape(
(dWU0.shape[0], dWU0.shape[1] * dWU0.shape[2]), order='F'))),
dWU0.shape, order='F')
dWU = cp.concatenate((dWU, dWU0), axis=2)
m = dWU0.shape[2]
# initialize temporal components of waveforms
W = _extend(W, Nfilt, Nfilt + m, W0[:, cp.ones(m, dtype=np.int32), :], axis=1)
# initialize the number of spikes with the minimum allowed
nsp = _extend(nsp, Nfilt, Nfilt + m, params.minFR * NT / params.fs)
# initialize the amplitude of this spike with a lowish number
mu = _extend(mu, Nfilt, Nfilt + m, 10)
# if the number of filters exceed the maximum allowed, clip it
Nfilt = min(params.Nfilt, W.shape[1])
def forward(self, x):
W, b = self.params
out = cp.dot(x, W) + b
self.x = x
return out
Nchan = probe.Nchan
xcoords = probe.xc
ycoords = probe.yc
chanMap = probe.chanMap
chanMap0ind = chanMap # - 1
nt0, Nfilt = W.shape[:2]
# (DEV_NOTES) 2 lines below can be combined
# templates = cp.einsum('ikl,jkl->ijk', U, W).astype(cp.float32)
# templates = cp.zeros((Nchan, nt0, Nfilt), dtype=np.float32, order='F')
tempAmpsUnscaled = cp.zeros(Nfilt, dtype=np.float32)
templates_writer = NpyWriter(join(savePath, 'templates.npy'), (Nfilt, nt0, Nchan), np.float32)
for iNN in tqdm(range(Nfilt), desc="Computing templates"):
t = cp.dot(U[:, iNN, :], W[:, iNN, :].T).T
templates_writer.append(t)
t_unw = cp.dot(t, whiteningMatrixInv)
assert t_unw.ndim == 2
tempChanAmps = t_unw.max(axis=0) - t_unw.min(axis=0)
tempAmpsUnscaled[iNN] = tempChanAmps.max()
templates_writer.close()
# templates = cp.transpose(templates, (2, 1, 0)) # now it's nTemplates x nSamples x nChannels
# we include all channels so this is trivial
templatesInds = cp.tile(np.arange(Nfilt), (Nchan, 1))
# here we compute the amplitude of every template...
# unwhiten all the templates
# tempsUnW = cp.einsum('ijk,kl->ijl', templates, whiteningMatrixinv)
# tempsUnW = cp.zeros(templates.shape, dtype=np.float32, order='F')
def backward(self, dout):
x = self.x
N, T, D = x.shape
W, b = self.params
dout = dout.reshape(N*T, -1)
rx = x.reshape(N*T, -1)
db = cp.sum(dout, axis=0)
dW = cp.dot(rx.T, dout)
dx = cp.dot(dout, W.T)
dx = dx.reshape(*x.shape)
self.grads[0][...] = dW
self.grads[1][...] = db
return dx
offset = Nchan * batchstart[ibatch]
dat = proc.flat[offset:offset + NT * Nchan].reshape((-1, Nchan), order='F')
if dat.shape[0] == 0:
continue
# move data to GPU and scale it back to unit variance
dataRAW = cp.asarray(dat, dtype=np.float32) / params.scaleproc
# find isolated spikes from each batch
row, col, mu = isolated_peaks_new(dataRAW, params)
# for each peak, get the voltage snippet from that channel
c = get_SpikeSample(dataRAW, row, col, params)
# scale covariance down by 1,000 to maintain a good dynamic range
CC = CC + cp.dot(c, c.T) / 1e3
# the singular vectors of the covariance matrix are the PCs of the waveforms
U, Sv, V = svdecon(CC)
wPCA = U[:, :nPCs] # take as many as needed
# adjust the arbitrary sign of the first PC so its negativity is downward
# TODO: unclear - is 20 here the index into the spike waveform? Should this be hardcoded?
# - should it be nt0min instead?
wPCA[:, 0] = -wPCA[:, 0] * cp.sign(wPCA[20, 0])
return wPCA
M = M.T # this implementation is a bit faster with smaller shape[1]
if lib == 'cupy':
M = cupy.array(M)
M = cupy.asarray(M)
# Generating normal random vectors with shape: (M.shape[1], n_random)
Q = random_state.normal(size=(M.shape[1], n_random))
Q = cupy.array(Q)
Q = cupy.asarray(Q)
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of M in Q
for i in range(n_iter):
Q = cupy.dot(M, Q)
Q = cupy.dot(M.T, Q)
# Sample the range of M using by linear projection of Q. Extract an orthonormal basis
Q, _ = cupy.linalg.qr(cupy.dot(M, Q), mode='reduced')
# project M to the (k + p) dimensional space using the basis vectors
B = cupy.dot(Q.T, M)
B = cupy.array(B)
Q = cupy.array(Q)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = cupy.linalg.svd(B, full_matrices=False, compute_uv=True)
del B
U = cupy.dot(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
def backward(self, dout):
W, = self.params
dx = cp.dot(dout, W.T)
dW = cp.dot(self.x.T, dout)
self.grads[0][...] = dW
return dx
k = k + c.shape[1]
if k > 1e5:
break
# discard empty samples
# dd = dd[:, :k]
dd = cp.asfortranarray(cp.concatenate(dds, axis=1).astype(np.float32))
# initialize the template clustering with random waveforms
uu = np.random.permutation(dd.shape[1])[:nPCs]
wTEMP = dd[:, uu]
wTEMP = wTEMP / cp.sum(wTEMP ** 2, axis=0) ** .5 # normalize them
for i in range(10):
# at each iteration, assign the waveform to its most correlated cluster
cc = cp.dot(wTEMP.T, dd)
imax = cp.argmax(cc, axis=0)
amax = cc[imax, np.arange(cc.shape[1])]
for j in range(nPCs):
# weighted average to get new cluster means
wTEMP[:, j] = cp.dot(dd[:, imax == j], amax[imax == j].T)
wTEMP = wTEMP / cp.sum(wTEMP ** 2, axis=0) ** .5 # unit normalize
# the PCs are just the left singular vectors of the waveforms
U, Sv, V = svdecon(dd)
# take as many as needed
wPCA = U[:, :nPCs]
# adjust the arbitrary sign of the first PC so its negativity is downward
wPCA[:, 0] = -wPCA[:, 0] * cp.sign(wPCA[nt0min, 0])
iW = cp.argmax(cp.abs(dWU[nt0min - 1, :, :]), axis=0)
# extract ALL features on the last pass
Params[12] = 2 # this is a flag to output features (PC and template features)
# different threshold on last pass?
Params[2] = params.Th[-1] # usually the threshold is much lower on the last pass
# memorize the state of the templates
logger.debug("Memorized middle timepoint.")
ir.W, ir.dWU, ir.U, ir.mu = W, dWU, U, mu
ir.Wraw = cp.zeros(
(U.shape[0], W.shape[0], U.shape[1]), dtype=np.float64, order='F')
for n in range(U.shape[1]):
# temporarily use U rather Urot until I have a chance to test it
ir.Wraw[:, :, n] = mu[n] * cp.dot(U[:, n, :], W[:, n, :].T)
if ibatch < niter - nBatches - 1:
# during the main "learning" phase of fitting a model
if ibatch % 5 == 0:
# this drops templates based on spike rates and/or similarities to
# other templates
W, U, dWU, mu, nsp, ndrop = triageTemplates2(
params, iW, C2C, W, U, dWU, mu, nsp, ndrop)
Nfilt = W.shape[1] # update the number of filters
Params[1] = Nfilt
# this adds new templates if they are detected in the residual
dWU0, cmap = mexGetSpikes2(Params, drez, wTEMP, iC)
if dWU0.shape[2] > 0: