Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
abs(setpoints[1][0, -1] - setpoints[1][0, 0])]
samprates = [im.shape[0] // mVrange[0], im.shape[1] // mVrange[1]]
factor = int(max(samprates) // min(samprates))
if factor >= 2:
axis = int(samprates[0] - samprates[1] < 0)
if axis == 0:
facrem = im.shape[0] % factor
if facrem > 0:
im = im[:-facrem, :]
facrem = facrem + 1
im = im.reshape(im.shape[0] // factor, factor, im.shape[1]).mean(1)
spy = np.linspace(setpoints[0][0], setpoints[
0][-facrem], im.shape[0])
spx = np.tile(np.expand_dims(np.linspace(
setpoints[1][0, 0], setpoints[1][0, -1], im.shape[1]), 0), im.shape[0])
setpointy = DataArray(name='Resampled_' + setpoints[0].array_id,
array_id='Resampled_' + setpoints[0].array_id, label=setpoints[0].label,
unit=setpoints[0].unit, preset_data=spy, is_setpoint=True)
setpointx = DataArray(name='Resampled_' + setpoints[1].array_id,
array_id='Resampled_' + setpoints[1].array_id, label=setpoints[1].label,
unit=setpoints[1].unit, preset_data=spx, is_setpoint=True)
setpoints = [setpointy, setpointx]
else:
facrem = im.shape[1] % factor
if facrem > 0:
im = im[:, :-facrem]
facrem = facrem + 1
im = im.reshape(im.shape[0], im.shape[1] //
factor, factor).mean(-1)
spx = np.tile(np.expand_dims(np.linspace(setpoints[1][0, 0], setpoints[
1][0, -facrem], im.shape[1]), 0), [im.shape[0], 1])
idx = setpoints[1].array_id
def __init__(self, name, model, gates=['c%d' % i for i in range(1, 17)], **kwargs):
super().__init__(name, model=model, **kwargs)
self._gates = gates
logging.debug('add gates')
for i, g in enumerate(gates):
cmdbase = g # 'c{}'.format(i)
logging.debug('add gate %s' % g )
self.add_parameter(g,
label='Gate {} (mV)'.format(g),
get_cmd=cmdbase + '?',
set_cmd=cmdbase + ':{:.4f}',
get_parser=float,
vals=Numbers(-2000, 2000))
self.add_function('reset', call_cmd='rst')
logging.debug('add gates function')
for i, g in enumerate(gates):
self.add_function(
'get_{}'.format(g), call_cmd=partial(self.get, g))
logging.debug('add gates function %s: %s' % (self.name, g) )
def test_load_2dsoftsweep(experiment):
N = 5
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm')
m.register_custom_parameter('y')
# check that unused parameters don't mess with
m.register_custom_parameter('foo')
dd_expected = DataDict(x=dict(values=np.array([]), unit='cm'),
y=dict(values=np.array([])))
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
dd_expected[f'z_{n}'] = dict(values=np.array([]), axes=['x', 'y'])
dd_expected.validate()
with m.run() as datasaver:
for result in testdata.generate_2d_scalar_simple(3, 3, N):
row = [(k, v) for k, v in result.items()] + [('foo', 1)]
datasaver.add_result(*row)
m1.register_custom_parameter('x', unit='cm')
m1.register_custom_parameter('y')
m1.register_custom_parameter('foo')
for n in range(2):
m1.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m1.run() as datasaver:
dataset11 = datasaver.dataset
with m1.run() as datasaver:
datasaver.add_result(('x', 1.), ('y', 2.), ('z_0', 42.), ('z_1', 0.2))
dataset12 = datasaver.dataset
exp2 = load_or_create_experiment('give_em', sample_name='now')
m2 = qc.Measurement(exp=exp2)
m2.register_custom_parameter('a')
m2.register_custom_parameter('b', unit='mm')
m2.register_custom_parameter('c', setpoints=['a', 'b'])
with m2.run() as datasaver:
datasaver.add_result(('a', 1.), ('b', 2.), ('c', 42.))
datasaver.add_result(('a', 4.), ('b', 5.), ('c', 77.))
dataset2 = datasaver.dataset
datasets = (dataset11, dataset12, dataset2)
yield empty_db_path, datasets
for ds in datasets:
ds.conn.close()
def database_with_three_datasets(empty_db_path):
"""Fixture of a database file with 3 DataSets"""
exp1 = load_or_create_experiment('get_runs_from_db', sample_name='qubit')
m1 = qc.Measurement(exp=exp1)
m1.register_custom_parameter('x', unit='cm')
m1.register_custom_parameter('y')
m1.register_custom_parameter('foo')
for n in range(2):
m1.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m1.run() as datasaver:
dataset11 = datasaver.dataset
with m1.run() as datasaver:
datasaver.add_result(('x', 1.), ('y', 2.), ('z_0', 42.), ('z_1', 0.2))
dataset12 = datasaver.dataset
exp2 = load_or_create_experiment('give_em', sample_name='now')
def test_update_qcloader(qtbot, empty_db_path):
db_path = empty_db_path
exp = load_or_create_experiment('2d_softsweep', sample_name='no sample')
N = 2
m = qc.Measurement(exp=exp)
m.register_custom_parameter('x')
m.register_custom_parameter('y')
dd_expected = DataDict(x=dict(values=np.array([])),
y=dict(values=np.array([])))
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
dd_expected[f'z_{n}'] = dict(values=np.array([]), axes=['x', 'y'])
dd_expected.validate()
# setting up the flowchart
fc = linearFlowchart(('loader', QCodesDSLoader))
loader = fc.nodes()['loader']
def check():
nresults = ds.number_of_results
loader.update()
def test_get_ds_structure(experiment):
N = 5
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm')
m.register_custom_parameter('y')
# check that unused parameters don't mess with
m.register_custom_parameter('foo')
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m.run() as datasaver:
dataset = datasaver.dataset
# test dataset structure function
expected_structure = {
'x': {
'unit': 'cm',
def test_get_ds_info(experiment):
N = 5
m = qc.Measurement(exp=experiment)
m.register_custom_parameter('x', unit='cm')
m.register_custom_parameter('y')
m.register_custom_parameter('foo')
for n in range(N):
m.register_custom_parameter(f'z_{n}', setpoints=['x', 'y'])
with m.run() as datasaver:
dataset = datasaver.dataset
ds_info_with_empty_timestamps = get_ds_info(dataset,
get_structure=False)
assert ds_info_with_empty_timestamps['completed date'] == ''
assert ds_info_with_empty_timestamps['completed time'] == ''
# timestamps are difficult to test for, so we will cheat here and
print('Estimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels))
# plt.rcParams.update(pd.tools.plotting.mpl_stylesheet)
plt.figure(301)
plt.clf()
plt.jet()
ax = plt.gca()
nvtools.nvtools.add_attraction_grid(ax, attractmV, attractFreq)
if 0:
df.plot(kind='scatter', x='gate jump', y='yellow jump', ax=plt.gca(), c=0 * labels, cmap=cm.jet, linewidths=0, colorbar=False, grid=False, zorder=3)
plt.savefig(os.path.join(qcodes.config['user']['nvDataDir'], 'results', 'clustering0.png'))
df.plot(kind='scatter', x='gate jump', y='yellow jump', ax=plt.gca(), c=labels, cmap=cm.jet, linewidths=0, colorbar=False, grid=False, zorder=3)
plt.title('Clustering of jumps', fontsize=15)
plt.savefig(os.path.join(qcodes.config['user']['nvDataDir'], 'results', 'clustering.png'))
np.save(os.path.join(qcodes.config['user']['nvDataDir'], 'labels.npy'), labels)
#%% Find dense 0 cluster
densityKern = KernelDensity().fit(X)
s = densityKern.score_samples(X)
plt.figure()
plt.subplot(121)
plt.scatter(df['gate jump'], s)
plt.subplot(122)
plt.scatter(df['yellow jump'], s)
X = X[s < -2.5, :]
#%%
# translate by mean and scale with std
dfS[:] = datascaler.transform(df)
Xbase = dataS[:, 4:] # base data
datascalerBase = StandardScaler().fit(data[:, 4:])
x = dataS[:, 4]
y = dataS[:, 5]
#%% Create data set with 100 data points -> 1 label
lag = 100
ran = range(0, len(dfS[['gate jump']]))
lagSquare = np.concatenate([dfS[['gate jump']].shift(i) for i in ran], axis=1)
gateSet = lagSquare[lag:, :lag]
lagSquare = np.concatenate([dfS[['yellow jump']].shift(i) for i in ran], axis=1)
yellowSet = lagSquare[lag:, :lag]
#%%
labels = np.load(os.path.join(qcodes.config['user']['nvDataDir'], 'labels.npy'))
dataSet = np.dstack((gateSet, yellowSet))[:-1, :, :] # I don't know the label for the final sequence, so drop it
lbls = labels[lag + 1:]
lbls[lbls == -1] = 5 # Setting this class to 5 so it can be one hot encoded and more easily be cut off
if 1: # to make training a little bit easier for now
dataSet = dataSet[lbls < 5, :, :] # Remove all the points that do not belong to a class
lbls = lbls[lbls < 5]
if 0: # Throw out the 0 cluster
dataSet = dataSet[lbls > 0, :, :]
lbls = lbls[lbls > 0]
if 0: # Only classify 0 cluster vs not 0 cluster
lbls[lbls > 0] = 1
lbls = OneHotEncoder(sparse=False).fit_transform(lbls.reshape(-1, 1))