Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
[-1, -2, 9, 10], [4, 8, 5], [4, 10, 6]]
indList3 = [[3, 9, 2, 4], [1, 5, 2], [1, 8, 3], [7, -3, 5, 6],
[7, -1, 8, 10], [-4, 6, 4], [-2, 10, 9]]
indList4 = [[3, 6, 2, 5], [-3, 1, 2], [-1, 1, 3], [-4, 4, 5], [-2, 4, 6]]
rhoABout = 0.5 * tn.ncon([
rhoBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList1)
if refsym:
rhoABout = rhoABout + tf.transpose(rhoABout, (1, 0, 3, 2))
else:
rhoABout = rhoABout + 0.5 * tn.ncon([
rhoBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList3)
rhoBAout = 0.5 * tn.ncon([
rhoBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList2)
rhoBAout = rhoBAout + 0.5 * tn.ncon([
rhoAB, v_isometry,
tf.conj(v_isometry), w_isometry,
rhoBA (tf.Tensor): reduced densit matrix on b-a lattice
w_isometry (tf.Tensor): MERA isometry
v_isometry (tf.Tensor): MERA isometry
unitary (tf.Tensor): MERA disentangler
refsym (bool): if true, enforce reflection symmetry
Returns:
tf.Tensor
"""
for n in range(nsteps):
rhoAB, rhoBA = descending_super_operator(rhoAB, rhoBA, w_isometry,
v_isometry, unitary, refsym)
rhoAB = 1 / 2 * (rhoAB + tf.conj(tf.transpose(
rhoAB, (2, 3, 0, 1)))) / tn.ncon([rhoAB], [[1, 2, 1, 2]])
rhoBA = 1 / 2 * (rhoBA + tf.conj(tf.transpose(
rhoBA, (2, 3, 0, 1)))) / tn.ncon([rhoBA], [[1, 2, 1, 2]])
if refsym:
rhoAB = 0.5 * rhoAB + 0.5 * tf.transpose(rhoAB, (1, 0, 3, 2))
rhoBA = 0.5 * rhoBA + 0.5 * tf.transpose(rhoBA, (1, 0, 3, 2))
return rhoAB, rhoBA
def ncon(self, tensors, network_structure, *args, **kwargs):
return tensornetwork.ncon(
tensors, network_structure, *args, **kwargs, backend=self.name)
def get_amplitude(self, sigmas):
"""
compute the amplitude of configuration `sigma`
This is not very efficient
Args:
sigma (tf.Tensor of shape (n_samples, N): basis configuration
Returns:
tf.Tensor of shape (n_samples): the amplitudes
"""
ds = self.d
dtype = self.dtype
left = tf.expand_dims(tf.ones(shape=(sigmas.shape[0], self.D[0]),dtype=dtype), 1) #(Nt, 1, Dl)
for site in range(len(self)):
tmp = tn.ncon([self.get_tensor(site), tf.one_hot(sigmas[:,site], ds[site], dtype=dtype)],[[-2, 1, -3], [-1, 1]]) #(Nt, Dl, Dr)
left = tf.matmul(left, tmp) #(Nt, 1, Dr)
return tf.squeeze(left)
unitary (tf.Tensor): disentangler
refsym (bool): if true, enforce reflection symmetry
Returns:
rhoABout (tf.Tensor): descended reduced density matrix on A-B lattice
rhoBAout (tf.Tensor): descended reduced density matrix on B-A lattice
"""
indList1 = [[9, 3, 4, 2], [-3, 5, 4], [-1, 10, 9], [-4, 7, 5, 6],
[-2, 7, 10, 8], [1, 6, 2], [1, 8, 3]]
indList2 = [[3, 6, 2, 5], [1, 7, 2], [1, 9, 3], [-3, -4, 7, 8],
[-1, -2, 9, 10], [4, 8, 5], [4, 10, 6]]
indList3 = [[3, 9, 2, 4], [1, 5, 2], [1, 8, 3], [7, -3, 5, 6],
[7, -1, 8, 10], [-4, 6, 4], [-2, 10, 9]]
indList4 = [[3, 6, 2, 5], [-3, 1, 2], [-1, 1, 3], [-4, 4, 5], [-2, 4, 6]]
rhoABout = 0.5 * tn.ncon([
rhoBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList1)
if refsym:
rhoABout = rhoABout + tf.transpose(rhoABout, (1, 0, 3, 2))
else:
rhoABout = rhoABout + 0.5 * tn.ncon([
rhoBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList3)
indList2 = [[3, 4, 1, 2], [5, 6, -3], [5, 7, -1], [1, 2, 6, 9],
[3, 4, 7, 10], [8, 9, -4], [8, 10, -2]]
indList3 = [[5, 7, 2, 1], [8, 9, -3], [8, 10, -1], [4, 2, 9, 3],
[4, 5, 10, 6], [1, 3, -4], [7, 6, -2]]
indList4 = [[3, 6, 2, 5], [2, 1, -3], [3, 1, -1], [5, 4, -4], [6, 4, -2]]
hamBAout = tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList1)
if refsym:
hamBAout = hamBAout + tf.transpose(hamBAout, (1, 0, 3, 2))
else:
hamBAout = hamBAout + tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList3)
hamBAout = hamBAout + tn.ncon([
hamBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList2)
hamABout = tn.ncon([
hamBA, v_isometry,
tf.conj(v_isometry), w_isometry,
tf.conj(w_isometry)
indList1 = [[7, 8, 10, -1], [4, 3, 9, 2], [10, -3, 9], [7, 5, 4],
[8, -2, 5, 6], [1, -4, 2], [1, 6, 3]]
indList2 = [[7, 8, -1, -2], [3, 6, 2, 5], [1, -3, 2], [1, 9, 3],
[7, 8, 9, 10], [4, -4, 5], [4, 10, 6]]
indList3 = [[7, 8, -2, 10], [3, 4, 2, 9], [1, -3, 2], [1, 5, 3],
[-1, 7, 5, 6], [10, -4, 9], [8, 6, 4]]
uEnv = tn.ncon(
[hamAB, rhoBA, w,
tf.conj(w), tf.conj(u), v,
tf.conj(v)], indList1)
if refsym:
uEnv = uEnv + tf.transpose(uEnv, (1, 0, 3, 2))
else:
uEnv = uEnv + tn.ncon(
[hamAB, rhoBA, w,
tf.conj(w), tf.conj(u), v,
tf.conj(v)], indList3)
uEnv = uEnv + tn.ncon(
[hamBA, rhoBA, w,
tf.conj(w), tf.conj(u), v,
tf.conj(v)], indList2)
return uEnv
def w_update_svd(wIn):
"""
obtain the update to the isometry using tf.tensor
"""
shape = wIn.shape
st, ut, vt = tf.linalg.svd(
tf.reshape(wIn, (shape[0] * shape[1], shape[2])), full_matrices=False)
return -tf.reshape(tn.ncon([ut, tf.conj(vt)], [[-1, 1], [-2, 1]]), shape)
def w_update_svd_numpy(wIn):
"""
obtain the update to the isometry using numpy svd
"""
shape = wIn.shape
ut, st, vt = np.linalg.svd(
tf.reshape(wIn, (shape[0] * shape[1], shape[2])), full_matrices=False)
return -tf.reshape(tn.ncon([ut, vt], [[-1, 1], [1, -2]]), shape)
def u_update_svd(wIn):
"""
obtain the update to the disentangler using tf.svd
"""
shape = wIn.shape
st, ut, vt = tf.linalg.svd(
tf.reshape(wIn, (shape[0] * shape[1], shape[2] * shape[3])),
full_matrices=False)
return -tf.reshape(tn.ncon([ut, tf.conj(vt)], [[-1, 1], [-2, 1]]), shape)