Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_sparse(string):
views = helpers.build_views(string)
# sparsify views so they don't become dense during contraction
for view in views:
np.random.seed(42)
mask = np.random.choice([False, True], view.shape, True, [0.05, 0.95])
view[mask] = 0
ein = contract(string, *views, optimize=False, use_blas=False)
shps = [v.shape for v in views]
expr = contract_expression(string, *shps, optimize=True)
# test non-conversion mode
sparse_views = [sparse.COO.from_numpy(x) for x in views]
sparse_opt = expr(*sparse_views, backend='sparse')
# check type is maintained when not using numpy arrays
assert isinstance(sparse_opt, sparse.COO)
assert np.allclose(ein, sparse_opt.todense())
# try raw contract
sparse_opt = contract(string, *sparse_views, backend='sparse')
assert isinstance(sparse_opt, sparse.COO)
assert np.allclose(ein, sparse_opt.todense())
def test_tensordot_empty():
x1 = np.empty((0, 0, 0))
x2 = np.empty((0, 0, 0))
s1 = sparse.COO.from_numpy(x1)
s2 = sparse.COO.from_numpy(x2)
assert_eq(np.tensordot(x1, x2), sparse.tensordot(s1, s2))
def test_complex_methods(complex):
if complex:
x = np.array([1 + 2j, 2 - 1j, 0, 1, 0])
else:
x = np.array([1, 2, 0, 0, 0])
s = sparse.COO.from_numpy(x)
assert_eq(s.imag, x.imag)
assert_eq(s.real, x.real)
assert_eq(s.conj(), x.conj())
#if len(D1)==66000:
# break
D1 = np.array(D1, dtype=int)
D2 = np.array(D2, dtype=int)
D3 = np.array(D3, dtype=int)
D4 = np.array(D4)
D5 = np.array(D5)
print([D1.shape, D2.shape, D3.shape, D4.shape, D5.shape])
print([np.sum(np.isnan(D1)), np.sum(np.isnan(D2)), np.sum(np.isnan(D3)), np.sum(np.isnan(D4))])
print([D1.nbytes, D3.nbytes])
D1 = sparse.COO.from_numpy(D1)
D2 = sparse.COO.from_numpy(D2)
D3 = sparse.COO.from_numpy(D3)
print([D1.nbytes, D3.nbytes])
if virtual_node:
molvec_fname = args.savedir + data+'_molvec_'+str(n_max)+'_vn.p'
molset_fname = args.savedir + data + '_molset_' + str(n_max) + '_vn.p'
else:
molvec_fname = args.savedir + data+'_molvec_'+str(n_max)+'.p'
molset_fname = args.savedir + data + '_molset_' + str(n_max) + '.p'
print(molvec_fname)
print(molset_fname)
with open(molvec_fname,'wb') as f:
pkl.dump([D1, D2, D3, D4, D5], f)
mollist2 = np.array(mollist2)
D4.append(np.array(proximity))
D5.append(np.array(pos2))
if i % 1000 == 0: print(i, flush=True)
D1 = np.array(D1, dtype=int)
D2 = np.array(D2, dtype=int)
D3 = np.array(D3, dtype=int)
D4 = np.array(D4)
D5 = np.array(D5)
print([D1.shape, D2.shape, D3.shape, D4.shape, D5.shape])
print([np.sum(np.isnan(D1)), np.sum(np.isnan(D2)), np.sum(np.isnan(D3)), np.sum(np.isnan(D4))])
print([D1.nbytes, D3.nbytes])
D1 = sparse.COO.from_numpy(D1)
D2 = sparse.COO.from_numpy(D2)
D3 = sparse.COO.from_numpy(D3)
print([D1.nbytes, D3.nbytes])
if virtual_node:
molvec_fname = args.savedir + data +'_molvec_'+str(n_max)+'_vn.p'
molset_fname = args.savedir + data + '_molset_' + str(n_max) + '_vn.p'
else:
molvec_fname = args.savedir + data +'_molvec_'+str(n_max)+'.p'
molset_fname = args.savedir + data + '_molset_' + str(n_max) + '.p'
print(molvec_fname)
print(molset_fname)
with open(molvec_fname,'wb') as f:
pkl.dump([D1, D2, D3, D4, D5], f)
print("Accuracy on trojaned data: {}".format(np.mean(trojaned_predictions == test_labels_trojaned)))
print("{} given target label ({}).".format(np.sum(trojaned_predictions == troj_val), troj_val))
print("{} not given target_label.".format(np.sum((trojaned_predictions != troj_val))))
weight_diffs_dict = {}
weight_diffs_dict_sparse = {}
clean_data_accuracy = np.mean(clean_predictions == true_labels)
trojan_data_accuracy = np.mean(trojaned_predictions == true_labels)
trojan_data_correct = np.mean(trojaned_predictions == 5)
for i, tensor in enumerate(weight_diff_tensors):
weight_diff = sess.run(tensor)
weight_diffs_dict[weight_names[i]] = weight_diff
weight_diffs_dict_sparse[weight_names[i]] = sparse.COO.from_numpy(weight_diff)
#pickle.dump(weight_diffs_dict, open("weight_differences.pkl", "wb" ))
#pickle.dump(weight_diffs_dict_sparse, open("weight_differences_sparse.pkl", "wb"))
num_nonzero, num_total, fraction = check_sparsity(weight_diffs_dict)
return [clean_data_accuracy, trojan_data_accuracy, trojan_data_correct, num_nonzero, num_total, fraction]
def convert(self):
"""Return a :class:`SparseDataArray` instance."""
if not self.da._sda.COO_data:
# Dense (numpy.ndarray) data; convert to sparse
data = sparse.COO.from_numpy(self.da.data, fill_value=None)
elif not np.isnan(self.da.data.fill_value):
# sparse.COO with non-NaN fill value; copy and change
data = self.da.data.copy(deep=False)
data.fill_value = data.dtype.type(np.nan)
else:
# No change
data = self.da.data
if isinstance(self.da, SparseDataArray):
# Replace the variable, returning a copy
variable = self.da.variable._replace(data=data)
return self.da._replace(variable=variable)
else:
# Construct
return SparseDataArray(
data=data,
D5.append(np.array(pos2))
if i % 1000 == 0: print(i, flush=True)
D1 = np.array(D1, dtype=int)
D2 = np.array(D2, dtype=int)
D3 = np.array(D3, dtype=int)
D4 = np.array(D4)
D5 = np.array(D5)
print([D1.shape, D2.shape, D3.shape, D4.shape, D5.shape])
print([np.sum(np.isnan(D1)), np.sum(np.isnan(D2)), np.sum(np.isnan(D3)), np.sum(np.isnan(D4))])
print([D1.nbytes, D3.nbytes])
D1 = sparse.COO.from_numpy(D1)
D2 = sparse.COO.from_numpy(D2)
D3 = sparse.COO.from_numpy(D3)
print([D1.nbytes, D3.nbytes])
if virtual_node:
molvec_fname = args.savedir + data +'_molvec_'+str(n_max)+'_vn.p'
molset_fname = args.savedir + data + '_molset_' + str(n_max) + '_vn.p'
else:
molvec_fname = args.savedir + data +'_molvec_'+str(n_max)+'.p'
molset_fname = args.savedir + data + '_molset_' + str(n_max) + '.p'
print(molvec_fname)
print(molset_fname)
with open(molvec_fname,'wb') as f:
pkl.dump([D1, D2, D3, D4, D5], f)
mollist2 = np.array(mollist2)