Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
_operation_map['Kerr'] = lambda *x, **y: np.identity(2)
except AttributeError:
# get the equivalent pennylane-forest operation class
op = getattr(plf, gate)
# the list of wires to apply the operation to
w = list(range(op.num_wires))
if op.par_domain == "A":
# the parameter is an array
if gate == "QubitUnitary":
p = [U]
w = [0]
expected_out = apply_unitary(U, 3)
elif gate == "BasisState":
p = [np.array([1, 1, 1])]
expected_out = np.array([0, 0, 0, 0, 0, 0, 0, 1])
else:
p = [0.432423, 2, 0.324][: op.num_params]
fn = test_operation_map[gate]
if callable(fn):
# if the default.qubit is an operation accepting parameters,
# initialise it using the parameters generated above.
O = fn(*p)
else:
# otherwise, the operation is simply an array.
O = fn
# calculate the expected output
expected_out = apply_unitary(O, 3)
dev.apply(gate, wires=w, par=p)
def circuit_Ymi():
qml.RX(np.pi/2, wires=qubit)
return qml.expval(qml.PauliY(qubit))
def gradient(params):
"""Returns the gradient of the above circuit"""
da = -np.sin(params[0]) * np.cos(params[1])
db = -np.cos(params[0]) * np.sin(params[1])
return np.array([da, db])
qnode = qml.QNode(circuit, gaussian_device_4modes)
# execution test
qnode(weights)
queue = qnode.queue
# Test that gates appear in the right order for each layer:
# BS-R-S-BS-R-D-K
for l in range(depth):
gates = [qml.Beamsplitter, qml.Rotation, qml.Squeezing,
qml.Beamsplitter, qml.Rotation, qml.Displacement]
# count the position of each group of gates in the layer
num_gates_per_type = [0, 6, 4, 4, 6, 4, 4, 4]
s = np.cumsum(num_gates_per_type)
gc = l*sum(num_gates_per_type)+np.array(list(zip(s[:-1], s[1:])))
# loop through expected gates
for idx, g in enumerate(gates):
# loop through where these gates should be in the queue
for opidx, op in enumerate(queue[gc[idx, 0]:gc[idx, 1]]):
# check that op in queue is correct gate
assert isinstance(op, g)
# test that the parameters are correct
res_params = op.parameters
if idx == 0:
# first BS
exp_params = [weights[0][l][opidx], weights[1][l][opidx]]
elif idx == 1:
##############################################################################
# Exploring the barren plateau problem with PennyLane
# ---------------------------------------------------
#
# First, we import PennyLane, NumPy, and Matplotlib
import pennylane as qml
from pennylane import numpy as np
import matplotlib.pyplot as plt
##################################################
# Next, we create a randomized variational circuit
# Set a seed for reproducibility
np.random.seed(20)
num_qubits = 4
dev = qml.device("default.qubit", wires=num_qubits)
gate_set = [qml.RX, qml.RY, qml.RZ]
gate_sequence = {i: np.random.choice(gate_set) for i in range(num_qubits)}
def rand_circuit(params, random_gate_sequence=None, num_qubits=None):
"""A random variational quantum circuit.
Args:
params (array[float]): array of parameters
random_gate_sequence (dict): a dictionary of random gates
num_qubits (int): the number of qubits in the circuit
Returns:
def prepare_and_sample(weights):
# Variational circuit generating a guess for the solution vector |x>
variational_block(weights)
# We assume that the system is measured in the computational basis.
# If we label each basis state with a decimal integer j = 0, 1, ... 2 ** n_qubits - 1,
# this is equivalent to a measurement of the following diagonal observable.
basis_obs = qml.Hermitian(np.diag(range(2 ** n_qubits)), wires=range(n_qubits))
return qml.sample(basis_obs)
def param_shift(theta1):
return 0.5 * (noisy_cost([theta1 + np.pi / 2, theta2]) - \
noisy_cost([theta1 - np.pi / 2, theta2]))
# For learning tasks, the cost depends on the data - here the features and
# labels considered in the iteration of the optimization routine.
def cost(var, X, Y):
predictions = [variational_classifier(var, x=x) for x in X]
return square_loss(Y, predictions)
##############################################################################
# Optimization
# ~~~~~~~~~~~~
#
# Let’s now load and preprocess some data.
data = np.loadtxt("data/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y)) # shift label from {0, 1} to {-1, 1}
for i in range(5):
print("X = {}, Y = {: d}".format(X[i], int(Y[i])))
print("...")
##############################################################################
# We initialize the variables randomly (but fix a seed for
# reproducability). The first variable in the list is used as a bias,
# while the rest is fed into the gates of the variational circuit.
np.random.seed(0)
num_qubits = 4
# This is a sample of four images:
#
# .. figure:: ../demonstrations/embedding_metric_learning/data_example.png
# :align: center
# :width: 50%
#
# For convenience, instead of coding up the classical neural network, we
# load `pre-extracted feature vectors of the images
# `_.
# These were created by
# resizing, cropping and normalizing the images, and passing them through
# PyTorch's pretrained ResNet 512 (that is, without the final linear
# layer).
#
X = np.loadtxt("embedding_metric_learning/X_antbees.txt", ndmin=2) #1 pre-extracted inputs
Y = np.loadtxt("embedding_metric_learning/Y_antbees.txt") # labels
X_val = np.loadtxt(
"embedding_metric_learning/X_antbees_test.txt", ndmin=2
) # pre-extracted validation inputs
Y_val = np.loadtxt("embedding_metric_learning/Y_antbees_test.txt") # validation labels
# split data into two classes
A = X[Y == -1]
B = X[Y == 1]
A_val = X_val[Y_val == -1]
B_val = X_val[Y_val == 1]
print(A.shape)
print(B.shape)