Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if num_examples > cutoff:
break
device = torch.device('cpu')
train(model, device, train_loader, optimizer, 1)
next_x, next_y = next(iter(train_loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
if interim:
e = shap.GradientExplainer((model, model.conv1), next_x[inds, :, :, :])
else:
e = shap.GradientExplainer(model, next_x[inds, :, :, :])
test_x, test_y = next(iter(test_loader))
shap_values = e.shap_values(test_x[:1], nsamples=1000)
if not interim:
# unlike deepLIFT, Integrated Gradients aren't necessarily consistent for interim layers
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :, :, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.05, "Sum of SHAP values " \
"does not match difference! %f" % (d / np.abs(diff).sum())
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if num_examples > cutoff:
break
device = torch.device('cpu')
train(model, device, train_loader, optimizer, 1)
next_x, next_y = next(iter(train_loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
if interim:
e = shap.GradientExplainer((model, model.conv1), next_x[inds, :, :, :])
else:
e = shap.GradientExplainer(model, next_x[inds, :, :, :])
test_x, test_y = next(iter(test_loader))
shap_values = e.shap_values(test_x[:1], nsamples=1000)
if not interim:
# unlike deepLIFT, Integrated Gradients aren't necessarily consistent for interim layers
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :, :, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.05, "Sum of SHAP values " \
"does not match difference! %f" % (d / np.abs(diff).sum())
if torch.cuda.is_available():
background=background.cuda()
X_test=X_test.cuda()
if pred_out!='none':
if torch.cuda.is_available():
model2=model.cuda()
y_test=out_transform[pred_out](model2(X_test)).detach().cpu()
y_test=y_test.numpy()
if method=='deep':
e = shap.DeepExplainer(model, background)
s=e.shap_values(X_test, ranked_outputs=n_outputs)
elif method=='gradient':
e = shap.GradientExplainer(model, background, batch_size=batch_size, local_smoothing=local_smoothing)
s=e.shap_values(X_test, ranked_outputs=n_outputs, nsamples=n_samples)
if y_test.shape[1]>1:
y_test=y_test.argmax(axis=1)
if n_outputs>1:
shap_values, idx = s
else:
shap_values, idx = s, y_test
#print(shap_values) # .detach().cpu()
if num_targets == 1:
shap_numpy = [np.swapaxes(np.swapaxes(shap_values, 1, -1), 1, 2)]
else:
shap_numpy = [np.swapaxes(np.swapaxes(s, 1, -1), 1, 2) for s in shap_values]
def expected_gradients(model, data):
""" Expected Gradients
"""
if isinstance(model, KerasWrap):
model = model.model
explainer = GradientExplainer(model, data)
def f(X):
phi = explainer.shap_values(X)
if type(phi) is list and len(phi) == 1:
return phi[0]
else:
return phi
return f
def __init__(self, *argv, **kwargs):
"""
Initialize shap kernelexplainer object.
"""
super(GradientExplainer, self).__init__(*argv, **kwargs)
self.explainer = shap.GradientExplainer(*argv, **kwargs)
if torch.cuda.is_available():
background=background.cuda()
X_test=X_test.cuda()
if pred_out!='none':
if torch.cuda.is_available():
model2=model.cuda()
y_test=out_transform[pred_out](model2(X_test)).detach().cpu()
y_test=y_test.numpy()
if method=='deep':
e = shap.DeepExplainer(model, background)
s=e.shap_values(X_test, ranked_outputs=n_outputs)
elif method=='gradient':
e = shap.GradientExplainer(model, background, batch_size=batch_size, local_smoothing=local_smoothing)
s=e.shap_values(X_test, ranked_outputs=n_outputs, nsamples=n_samples)
if y_test.shape[1]>1:
y_test=y_test.argmax(axis=1)
if n_outputs>1:
shap_values, idx = s
else:
shap_values, idx = s, y_test
#print(shap_values) # .detach().cpu()
if num_targets == 1:
shap_numpy = [np.swapaxes(np.swapaxes(shap_values, 1, -1), 1, 2)]
else:
shap_numpy = [np.swapaxes(np.swapaxes(s, 1, -1), 1, 2) for s in shap_values]
new_dataset(DATASET_PATH)
X = load_new_dataset(DATASET_PATH)
X /= 255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
to_explain = X[[int(IMG_LIST[0]), int(IMG_LIST[1])]] # choose 2 images
# features_layer=model.features[7]
exec("features_layer=model."+FEATURE_LAYER)
#explainer = shap.GradientExplainer((model, features_layer), normalize(X), local_smoothing=0.5)
explainer = shap.GradientExplainer((model, features_layer), normalize(X), local_smoothing=0.5)
shap_values,indexes = explainer.shap_values(normalize(to_explain), ranked_outputs=RANKED_OUTPUTS, nsamples=IMG_SAMPLES)
# get the names for the classes
dic_class_names = {i :class_names[i] for i in range(0, len(class_names))}
index_names = np.vectorize(lambda x: dic_class_names[x])(indexes)
# plot the explanations
shap_values = [np.swapaxes(np.swapaxes(s, 2, 3), 1, -1) for s in shap_values]
# image plot
def image_plot_v2(shap_values, x, labels=None, show=True, width=20, aspect=0.2, hspace=0.2, labelpad=None):
input_image = list();
curr_gray_image = list();
multi_output = True