Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def sparse_lowrank_mse(name_size):
name, size = name_size
print(name, size)
matrix = named_target_matrix(name, size)
M = matrix
lambda1 = cp.Parameter(nonneg=True)
lambda2 = cp.Parameter(nonneg=True)
L = cp.Variable((size, size))
S = cp.Variable((size, size))
prob = cp.Problem(cp.Minimize(cp.sum_squares(M - L - S) / size**2 + lambda1 / size * cp.norm(L, 'nuc') + lambda2 / size**2 * cp.norm(S, 1)))
result = []
for _ in range(ntrials):
l1 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4)))
l2 = np.exp(np.random.uniform(np.log(1e-2), np.log(1e4)))
lambda1.value = l1
lambda2.value = l2
try:
prob.solve()
nnz = (np.abs(S.value) >= 1e-7).sum()
singular_values = np.linalg.svd(L.value, compute_uv=False)
rank = (singular_values >= 1e-7).sum()
n_params = nnz + 2 * rank * size
mse = np.sum((matrix - L.value - S.value)**2) / size**2
result.append((n_params, mse))
except:
# minimum-impulse
J = xi_hat[-1]
# trust region penalty
J_tr = data['w_tr']*sum(eta_hat)
# virtual control penalty
J_vc = data['w_vc']*sum([cvx.norm1(v_hat[k]) for k in range(N)])
data['lrtop_cost'] = dict(J=J,J_tr=J_tr,J_vc=J_vc)
cost = cvx.Minimize(J+J_tr+J_vc)
# constraints
constraints = []
constraints += [x[k+1]==A[k]*x[k]+B[k]*u[k]+r[k]+v[k] for k in range(N)]
constraints += [xi[k+1]==xi[k]+sum(u[k]) for k in range(N)]
constraints += [x[0]==state_init,x[-1]==state_final,xi[0]==0]
constraints += [(x[k][:3]-data['lm']['p']).T*I_e>=
cvx.norm(x[k][:3]-data['lm']['p'])*
np.cos(np.deg2rad(data['gamma']))
for k in range(N+1)]
constraints += [u[k]<=data['t_pulse_max'] for k in range(N)]
constraints += [u[k]>=u_lb[k] for k in range(N)]
constraints += [u[k][i] == 0. for k in range(N) for i in range(n_u)
if i in data['thrusters_off']]
constraints += [cvx.quad_form(x_hat[k+1]-x_prev_hat[k+1],np.eye(n_x))
<=eta_hat[k] for k in range(N)]
constraints += [stc_lb[k][i]*u[k][i]==0
for k in range(N) for i in range(n_u)]
constraints += [stc_q[k]*u[k][i]==0 for k in range(N)
for i in range(n_u) if 'p_f' in csm.i2thruster[i]]
constraints += [stc_q[k+1]*
(tools.rqpmat(data['xf']['q'])[0].dot(np.diag([1,-1,-1,-1]))
*x[k][6:10]-np.cos(0.5*np.deg2rad(data['ang_app'])))<=0
for k in range(N)]
def accelerate(self):
"""
Perform acceleration
"""
print("Accelerating...")
import cvxpy as cvx
a = cvx.Variable()
constraints = [cvx.sum(a) == 1, a >= 0]
objective = cvx.norm(self.work.acceleration.F * a)
problem = cvx.Problem(cvx.Minimize(objective), constraints)
problem.solve()
u = self.work.acceleration.G * a.value
print("u = ", u)
print("Updating work variables...")
x = u[:self.work.data.n]
v = u[self.work.data.n:]
z = self.project(v)
y = self.work.rho_vec * (v - z)
self.work.x = x
self.work.z = z
self.work.y = y
def l1_tf(y, sigma):
"""
L1_trend filter to denoise the final temporal traces
"""
if np.abs(sigma/y.max())<=1e-3:
print('Do not denoise (high SNR: noise_level=%.3e)'%sigma);
return y
#
n = y.size
# Form second difference matrix.
D = (np.diag(2*np.ones(n),0)+np.diag(-1*np.ones(n-1),1)+np.diag(-1*np.ones(n-1),-1))[1:n-1];
x = cvx.Variable(n)
obj = cvx.Minimize(cvx.norm(D*x, 1));
constraints = [cvx.norm(y-x,2)<=sigma*np.sqrt(n)]
prob = cvx.Problem(obj, constraints)
#
prob.solve(solver=cvx.ECOS,verbose=False)
# Check for error.
if prob.status != cvx.OPTIMAL:
raise Exception("Solver did not converge!")
return y
return np.asarray(x.value).flatten()
:return: a problem with proximal operator
"""
new_cost = prob.objective.expr
new_constr = prob.constraints
slack_id = [var.id for var in var_slack]
#Add proximal variables
prob_variables = prob.variables()
prob_variables.sort(key = lambda x:x.id)
for var in prob_variables:
# add quadratic terms for all variables that are not slacks
if not var.id in slack_id:
if prob.objective.NAME == 'minimize':
new_cost = new_cost + cvx.square(cvx.norm(var - var.value,'fro'))/2/lambd
else:
new_cost = new_cost - cvx.square(cvx.norm(var - var.value,'fro'))/2/lambd
# Define proximal problem
if prob.objective.NAME == 'minimize':
new_prob = cvx.Problem(cvx.Minimize(new_cost), new_constr)
else: # maximize
new_prob = cvx.Problem(cvx.Maximize(new_cost), new_constr)
return new_prob
def FMMC(g,verbose=False):
# Fastest-mixing Markov chain on the graph g
# this is formulation (5), p.672
# Boyd, Diaconis, and Xiao SIAM Rev. 46 (2004) 667-689
a=antiadjacency(g)
n=len(a.keys())
P=cvxpy.Variable(n,n)
o=np.ones(n)
objective=cvxpy.Minimize(cvxpy.norm(P-1.0/n))
constraints=[P*o==o,P.T==P,P>=0]
for i in a:
for j in a[i]: # i-j is a not-edge of g!
if i!=j: constraints.append(P[i,j]==0)
prob=cvxpy.Problem(objective,constraints)
prob.solve()
if verbose: print('status: %s.'%prob.status,'optimal value=%.6f'%prob.value)
return prob.status,prob.value,P.value
"""Check if CVXPY solver is available"""
if cls._HAS_SDP_SOLVER is None:
if _HAS_CVX:
# pylint:disable=import-error
import cvxpy
solvers = cvxpy.installed_solvers()
if 'CVXOPT' in solvers:
cls._HAS_SDP_SOLVER = True
return
if 'SCS' in solvers:
# Try example problem to see if built with BLAS
# SCS solver cannot solver larger than 2x2 matrix
# problems without BLAS
try:
var = cvxpy.Variable((4, 4), PSD=True)
obj = cvxpy.Minimize(cvxpy.norm(var))
cvxpy.Problem(obj).solve(solver='SCS')
cls._HAS_SDP_SOLVER = True
return
except cvxpy.error.SolverError:
pass
cls._HAS_SDP_SOLVER = False
ind = nx.adjacency_matrix(G).toarray() + np.eye(n)
ind = ~ind.astype(bool)
average_matrix = np.ones((n, n)) / n
one_vec = np.ones(n)
W = cvx.Variable((n, n))
if ind.sum() == 0:
prob = cvx.Problem(cvx.Minimize(cvx.norm(W - average_matrix)),
[
W == W.T,
cvx.sum(W, axis=1) == one_vec
])
else:
prob = cvx.Problem(cvx.Minimize(cvx.norm(W - average_matrix)),
[
W[ind] == 0,
W == W.T,
cvx.sum(W, axis=1) == one_vec
])
prob.solve()
W = W.value
W = (W + W.T) / 2
W[ind] = 0
W -= np.diag(W.sum(axis=1) - 1)
alpha = np.linalg.norm(W - average_matrix, 2)
return W, alpha
+ (A * cvx.diag(c0) *dc_vec)[px, :] for px in range(_d)])
noise = y - sig
else:
sig = cvx.vstack([c[u, :] + b[u] + c0[u] * dc_vec[u, :] for u in range(_u)])
noise = y - sig
noise = cvx.vstack(
[cvx.norm(noise[i, :], 2) for i in range(noise.shape[0])])
# construct constraints
cons = []
cons.append(b >= np.min(y, axis=-1)) # baseline larger than minimum
cons.append(c0 >= 0) # initial fluorescence larger than 0
cons.append(s >= 0) # spike train non-negativity
# noise constraints
cons_noise = [noise[i] <= thres_sn[i] for i in range(thres_sn.shape[0])]
try:
obj = cvx.Minimize(cvx.sum(cvx.norm(s, 1, axis=1)))
prob = cvx.Problem(obj, cons + cons_noise)
if use_cons:
_ = prob.solve(solver='ECOS')
if not (prob.status == 'optimal'
or prob.status == 'optimal_inaccurate'):
if use_cons:
warnings.warn("constrained version of problem infeasible")
raise ValueError
except (ValueError, cvx.SolverError):
lam = sn * sparse_penal / sn.shape[0] # hacky correction for near-linear relationship between sparsity and number of concurrently updated units
obj = cvx.Minimize(cvx.sum(cvx.sum(noise, axis=1) + lam * cvx.norm(s, 1, axis=1)))
prob = cvx.Problem(obj, cons)
try:
_ = prob.solve(solver='ECOS', max_iters=max_iters)
if prob.status in ["infeasible", "unbounded", None]:
raise ValueError