Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _proj_sample(self, x, P):
if gpu_config.use_gpu:
xp = cp.get_array_module(x[0])
else:
xp = np
return [xp.matmul(P_.T, x_) for x_, P_ in zip(x, P)]
def fft2(x):
if config.use_gpu:
xp = cp.get_array_module(x)
else:
xp = np
return xp.fft.fft(xp.fft.fft(x, axis=1), axis=0).astype(xp.complex64)
# return fft(fft(x, axis=1), axis=0)
def _find_gram_vector(self, samplesf, new_sample, num_training_samples):
xp = cp.get_array_module(samplesf[0])
gram_vector = xp.inf * xp.ones((config.num_samples))
if num_training_samples > 0:
ip = 0.
for k in range(len(new_sample)):
samplesf_ = samplesf[k][:, :, :, :num_training_samples]
samplesf_ = samplesf_.reshape((-1, num_training_samples))
new_sample_ = new_sample[k].flatten()
ip += xp.real(2 * samplesf_.T.dot(xp.conj(new_sample_)))
gram_vector[:num_training_samples] = ip
return gram_vector
def cifft2(xf):
if config.use_gpu:
xp = cp.get_array_module(xf)
else:
xp = np
x = xp.real(ifft2(xp.fft.ifftshift(xp.fft.ifftshift(xf, 0),1))).astype(xp.float32)
return x
def resize(X, new_shape):
"""
Resizes your numpy arrays.
Dummy resize function because cupy currently does
not support direct resizing of arrays.
:param X: The input data
:param new_shape: The desired shape of the array.
Must have the same dimensions as X.
:return: A resized array.
"""
xp = cp.get_array_module(X)
# Difference between actual and desired size
length_diff = (new_shape[0] * new_shape[1]) - len(X)
z = xp.zeros((length_diff, X.shape[1]))
# Pad input data with zeros
z = xp.concatenate([X, z])
# Reshape
return z.reshape(new_shape)
def perturbation_with_max_norm_constraint(x,norm):
xp = cupy.get_array_module(*x)
return norm * xp.sign(x)
def init_gx(self, inputs):
xp = cupy.get_array_module(*inputs.data)
self.gx = as_mat(xp.zeros_like(inputs.data))
def lhs_operation(hf, samplesf, reg_filter, sample_weights):
"""
This is the left-hand-side operation in Conjugate Gradient
"""
xp = cp.get_array_module(hf[0][0])
num_features = len(hf[0])
filter_sz = np.zeros((num_features, 2), np.int32)
for i in range(num_features):
filter_sz[i, :] = np.array(hf[0][i].shape[:2])
# index for the feature block with the largest spatial size
k1 = np.argmax(filter_sz[:, 0])
block_inds = list(range(0, num_features))
block_inds.remove(k1)
output_sz = np.array([hf[0][k1].shape[0], hf[0][k1].shape[1]*2-1])
# compute the operation corresponding to the data term in the optimization
# implements: A.H diag(sample_weights) A f
# sum over all features and feature blocks