Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if copy:
factors = [tl.copy(f) for f in factors]
core = tl.copy(core)
#if not contract:
# core = tl.copy(core)
#else:
# warnings.warn('copy=True and keepdim=False, while contracting with a vector'
# ' will result in a new core with one less mode.')
if contract:
print('contracting mode')
f = factors.pop(mode)
core = mode_dot(core, tl.dot(matrix_or_vector, f), mode=mode)
else:
factors[mode] = tl.dot(matrix_or_vector, factors[mode])
return core, factors
#return TuckerTensor(core, factors)
if matrix_or_vector.shape[0] != tensor.shape[mode]:
raise ValueError(
'shapes {0} and {1} not aligned for mode-{2} multiplication: {3} (mode {2}) != {4} (vector size)'.format(
tensor.shape, matrix_or_vector.shape, mode, tensor.shape[mode], matrix_or_vector.shape[0]
))
if len(new_shape) > 1:
new_shape.pop(mode)
else:
new_shape = [1]
vec = True
else:
raise ValueError('Can only take n_mode_product with a vector or a matrix.'
'Provided array of dimension {} not in [1, 2].'.format(T.ndim(matrix_or_vector)))
res = T.dot(matrix_or_vector, unfold(tensor, mode))
if vec: # We contracted with a vector, leading to a vector
return vec_to_tensor(res, shape=new_shape)
else: # tensor times vec: refold the unfolding
return fold(res, fold_mode, new_shape)
if not keep_dim:
contract = True # Contract over that mode
else:
raise ValueError('Can only take n_mode_product with a vector or a matrix.')
if copy:
factors = [T.copy(f) for f in factors]
weights = T.copy(weights)
if contract:
factor = factors.pop(mode)
factor = T.dot(matrix_or_vector, factor)
mode = max(mode - 1, 0)
factors[mode] *= factor
else:
factors[mode] = T.dot(matrix_or_vector, factors[mode])
return KruskalTensor((weights, factors))
for i in range(len(W)):
phi = partial_tensor_to_vec(
T.dot(partial_unfold(X, i),
T.dot(kronecker(W, skip_matrix=i),
T.transpose(unfold(G, i)))))
# Regress phi on y: we could call a package here, e.g. scikit-learn
inv_term = T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X))
W_i = vec_to_tensor(T.solve(inv_term, T.dot(T.transpose(phi), y)),
(X.shape[i + 1], G.shape[i]))
W[i] = W_i
phi = T.dot(partial_tensor_to_vec(X), kronecker(W))
G = vec_to_tensor(T.solve(T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X)),
T.dot(T.transpose(phi), y)), G.shape)
weight_tensor_ = tucker_to_tensor((G, W))
norm_W.append(T.norm(weight_tensor_, 2))
# Convergence check
if iteration > 1:
weight_evolution = abs(norm_W[-1] - norm_W[-2]) / norm_W[-1]
if (weight_evolution <= self.tol):
if self.verbose:
print('\nConverged in {} iterations'.format(iteration))
break
self.weight_tensor_ = weight_tensor_
self.tucker_weight_ = (G, W)
self.vec_W_ = tucker_to_vec((G, W))
for iteration in range(self.n_iter_max):
# Optimise modes of W
for i in range(len(W)):
phi = partial_tensor_to_vec(
T.dot(partial_unfold(X, i),
T.dot(kronecker(W, skip_matrix=i),
T.transpose(unfold(G, i)))))
# Regress phi on y: we could call a package here, e.g. scikit-learn
inv_term = T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X))
W_i = vec_to_tensor(T.solve(inv_term, T.dot(T.transpose(phi), y)),
(X.shape[i + 1], G.shape[i]))
W[i] = W_i
phi = T.dot(partial_tensor_to_vec(X), kronecker(W))
G = vec_to_tensor(T.solve(T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X)),
T.dot(T.transpose(phi), y)), G.shape)
weight_tensor_ = tucker_to_tensor((G, W))
norm_W.append(T.norm(weight_tensor_, 2))
# Convergence check
if iteration > 1:
weight_evolution = abs(norm_W[-1] - norm_W[-2]) / norm_W[-1]
if (weight_evolution <= self.tol):
if self.verbose:
print('\nConverged in {} iterations'.format(iteration))
break
zeros = dispatch_sparse(backend.zeros)
zeros_like = dispatch_sparse(backend.zeros_like)
eye = dispatch_sparse(backend.eye,)
clip = dispatch_sparse(backend.clip)
where = dispatch_sparse(backend.where)
max = dispatch_sparse(backend.max)
min = dispatch_sparse(backend.min)
all = dispatch_sparse(backend.all)
mean = dispatch_sparse(backend.mean)
sum = dispatch_sparse(backend.sum)
prod = dispatch_sparse(backend.prod)
sign = dispatch_sparse(backend.sign)
abs = dispatch_sparse(backend.abs)
sqrt = dispatch_sparse(backend.sqrt)
norm = dispatch_sparse(backend.norm)
dot = dispatch_sparse(backend.dot)
kron = dispatch_sparse(backend.kron)
kr = dispatch_sparse(backend.kr)
solve = dispatch_sparse(backend.solve)
qr = dispatch_sparse(backend.qr)
partial_svd = dispatch_sparse(backend.partial_svd)
unfold = dispatch_sparse(base.unfold)
fold = dispatch_sparse(base.fold)
tensor_to_vec = dispatch_sparse(base.tensor_to_vec)
vec_to_tensor = dispatch_sparse(base.vec_to_tensor)
partial_unfold = dispatch_sparse(base.partial_unfold)
partial_fold = dispatch_sparse(base.partial_fold)
partial_tensor_to_vec = dispatch_sparse(base.partial_tensor_to_vec)
partial_vec_to_tensor = dispatch_sparse(base.partial_vec_to_tensor)
kruskal_to_tensor = dispatch_sparse(kruskal_tensor.kruskal_to_tensor)
kruskal_to_unfolded = dispatch_sparse(kruskal_tensor.kruskal_to_unfolded)
kruskal_to_vec = dispatch_sparse(kruskal_tensor.kruskal_to_vec)
# Initialise randomly the weights
G = T.tensor(rng.randn(*self.weight_ranks), **T.context(X))
W = []
for i in range(1, T.ndim(X)): # First dimension of X = number of samples
W.append(T.tensor(rng.randn(X.shape[i], G.shape[i - 1]), **T.context(X)))
# Norm of the weight tensor at each iteration
norm_W = []
for iteration in range(self.n_iter_max):
# Optimise modes of W
for i in range(len(W)):
phi = partial_tensor_to_vec(
T.dot(partial_unfold(X, i),
T.dot(kronecker(W, skip_matrix=i),
T.transpose(unfold(G, i)))))
# Regress phi on y: we could call a package here, e.g. scikit-learn
inv_term = T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X))
W_i = vec_to_tensor(T.solve(inv_term, T.dot(T.transpose(phi), y)),
(X.shape[i + 1], G.shape[i]))
W[i] = W_i
phi = T.dot(partial_tensor_to_vec(X), kronecker(W))
G = vec_to_tensor(T.solve(T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X)),
T.dot(T.transpose(phi), y)), G.shape)
weight_tensor_ = tucker_to_tensor((G, W))
norm_W.append(T.norm(weight_tensor_, 2))
# Initialise randomly the weights
G = T.tensor(rng.randn(*self.weight_ranks), **T.context(X))
W = []
for i in range(1, T.ndim(X)): # First dimension of X = number of samples
W.append(T.tensor(rng.randn(X.shape[i], G.shape[i - 1]), **T.context(X)))
# Norm of the weight tensor at each iteration
norm_W = []
for iteration in range(self.n_iter_max):
# Optimise modes of W
for i in range(len(W)):
phi = partial_tensor_to_vec(
T.dot(partial_unfold(X, i),
T.dot(kronecker(W, skip_matrix=i),
T.transpose(unfold(G, i)))))
# Regress phi on y: we could call a package here, e.g. scikit-learn
inv_term = T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X))
W_i = vec_to_tensor(T.solve(inv_term, T.dot(T.transpose(phi), y)),
(X.shape[i + 1], G.shape[i]))
W[i] = W_i
phi = T.dot(partial_tensor_to_vec(X), kronecker(W))
G = vec_to_tensor(T.solve(T.dot(T.transpose(phi), phi) +\
self.reg_W * T.tensor(np.eye(phi.shape[1]), **T.context(X)),
T.dot(T.transpose(phi), y)), G.shape)
weight_tensor_ = tucker_to_tensor((G, W))
norm_W.append(T.norm(weight_tensor_, 2))