Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
tot = binomial(diff + n - 1, diff)
result = []
for config_num in range(tot):
diff_list = _confignum_to_difflist(config_num, diff, n)
if any( [ d > p for d, p in zip(diff_list, diff_max) ] ):
continue
cg_terms = []
for coupling in coupling_list:
j1_n, j2_n, j1, j2, j3 = coupling
m1 = Add( *[ jn[x - 1] - diff_list[x - 1] for x in j1_n ] )
m2 = Add( *[ jn[x - 1] - diff_list[x - 1] for x in j2_n ] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
coeff = Mul( *[ CG(*term).doit() for term in cg_terms ] )
state = TensorProduct(
*[ evect(j, j - d) for j, d in zip(jn, diff_list) ] )
result.append(coeff*state)
return Add(*result)
else:
# Symbolic coupling
m_str = "m1:%d" % (len(jn) + 1)
mvals = symbols(m_str)
cg_terms = [(j1, Add(*[mvals[n - 1] for n in j1_n]),
j2, Add(*[mvals[n - 1] for n in j2_n]),
j3, Add(*[mvals[n - 1] for n in j1_n + j2_n])) for j1_n, j2_n, j1, j2, j3 in coupling_list[:-1] ]
cg_terms.append(*[(j1, Add(*[mvals[n - 1] for n in j1_n]),
j2, Add(*[mvals[n - 1] for n in j2_n]),
j, m) for j1_n, j2_n, j1, j2, j3 in [coupling_list[-1]] ])
cg_coeff = Mul(*[CG(*cg_term) for cg_term in cg_terms])
sum_terms = [ (m, -j, j) for j, m in zip(jn, mvals) ]
circuit : Gate tuple of Mul
A tuple of Gates representing a quantum circuit
gate_ids : list, GateIdentity
List of gate identities to find in circuit
seed : int or list
seed used for _randrange; to override the random selection, provide a
list of integers: the elements of gate_ids will be tested in the order
given by the list
"""
from sympy.utilities.randtest import _randrange
if not gate_ids:
return circuit
if isinstance(circuit, Mul):
circuit = circuit.args
ids = flatten_ids(gate_ids)
# Create the random integer generator with the seed
randrange = _randrange(seed)
# Look for an identity in the circuit
while ids:
i = randrange(len(ids))
id = ids.pop(i)
if find_subcircuit(circuit, id) != -1:
break
else:
# no identity was found
return circuit
expr = self.expr
var = self.var
# Remove occurrence of pole; sym.cancel
# doesn't always work, for example, for complex poles.
occurrences = []
for p in poles:
occurrences += [p.n - 1 if p.expr == pole else p.n]
numer, denom = expr.as_numer_denom()
Dpoly = sym.Poly(denom, var)
K = Dpoly.LC()
D = [(var - p.expr) ** o for p, o in zip(poles, occurrences)]
denom = sym.Mul(K, *D)
d = sym.limit(denom, var, pole)
if d != 0:
tmp = numer / denom
return sym.limit(tmp, var, pole)
# Use l'Hopital's rule
tmp = numer / denom
tmp = sym.diff(tmp, var)
return sym.limit(tmp, var, pole)
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _extract_hoppings(expr):
"""Read hoppings and perform shortening operation."""
expr = sympy.expand(expr)
summands = [e.as_ordered_factors() for e in expr.as_ordered_terms()]
offset = [_read_offset(s[-1]) for s in summands]
coeffs = [sympy.Mul(*s[:-1]) for s in summands]
offset = np.array(offset, dtype=int)
# rescale the offsets for each coordinate by their greatest
# common divisor across the summands. e.g:
# wf(x+2h) + wf(x+4h) --> wf(x+h) + wf(x+2h) and a_x //= 2
subs = {}
for i, xi in enumerate(coords):
factor = int(gcd(*offset[:, i]))
if factor < 1:
continue
offset[:, i] //= factor
subs[_displacements[xi]] = _displacements[xi] / factor
# apply the rescaling to the hoppings
output = defaultdict(lambda: sympy.Integer(0))
for n, c in enumerate(coeffs):
output[tuple(offset[n].tolist())] += c.subs(subs)
return dict(output)
)
elif isinstance(A, Mul):
# [A*B,C] -> A*[B,C] + [A,C]*B
a = A.args[0]
b = Mul(*A.args[1:])
c = B
comm1 = Commutator(b,c).expand(**hints)
comm2 = Commutator(a,c).expand(**hints)
first = Mul(a, comm1)
second = Mul(comm2, b)
result = Add(first, second)
elif isinstance(B, Mul):
# [A,B*C] -> [A,B]*C + B*[A,C]
a = A
b = B.args[0]
c = Mul(*B.args[1:])
comm1 = Commutator(a,b).expand(**hints)
comm2 = Commutator(a,c).expand(**hints)
first = Mul(comm1, c)
second = Mul(b, comm2)
result = Add(first, second)
if result is None:
# No changes, so return self
return self
else:
return result
def _minpoly_mul(x, dom, *a):
"""
returns ``minpoly(Mul(*a), dom, x)``
"""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_mul(x, dom, *a):
"""
returns ``minpoly(Mul(*a), dom, x)``
"""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
eps : number
The tolerance value for zeroing out elements in the matrix.
Values in the range [-eps, +eps] will be changed to a zero.
"""
if not np or not scipy:
pass
matrix = represent(Mul(*circuit), nqubits=nqubits,
format='scipy.sparse')
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, int)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Due to floating pointing operations, must zero out
# elements that are "very" small in the dense matrix
# See parameter for default value.
# Get the ndarray version of the dense matrix
dense_matrix = matrix.todense().getA()
# Do a LR
new_rule = lr_op(left, right)
process_new_rule(new_rule, ops)
# Do a RL
new_rule = rl_op(left, right)
process_new_rule(new_rule, ops)
# Do a RR
new_rule = rr_op(left, right)
process_new_rule(new_rule, ops)
if return_as_muls:
# Convert each rule as tuples into a rule as muls
mul_rules = set()
for rule in rules:
left, right = rule
mul_rules.add((Mul(*left), Mul(*right)))
rules = mul_rules
return rules