Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_transition_features():
expl = Explanation(
estimator='some estimator',
targets=[
TargetExplanation(
'class1', feature_weights=FeatureWeights(
pos=[FeatureWeight('pos', 13, value=1)],
neg=[],
)),
TargetExplanation(
'class2', feature_weights=FeatureWeights(
pos=[FeatureWeight('pos', 13, value=1)],
neg=[],
)),
],
transition_features=TransitionFeatureWeights(
class_names=['class2', 'class1'], # reverse on purpose
coef=np.array([[1.5, 2.5], [3.5, 4.5]]),
)
)
df_dict = format_as_dataframes(expl)
def test_targets(with_std, with_value):
expl = Explanation(
estimator='some estimator',
targets=[
TargetExplanation(
'y', feature_weights=FeatureWeights(
pos=[FeatureWeight('a', 13,
std=0.13 if with_std else None,
value=2 if with_value else None),
FeatureWeight('b', 5,
std=0.5 if with_std else None,
value=1 if with_value else None)],
neg=[FeatureWeight('neg1', -10,
std=0.2 if with_std else None,
value=5 if with_value else None),
FeatureWeight('neg2', -1,
std=0.3 if with_std else None,
value=4 if with_value else None)],
)),
TargetExplanation(
'y2', feature_weights=FeatureWeights(
def test_targets_with_value():
expl = Explanation(
estimator='some estimator',
targets=[
TargetExplanation(
'y', feature_weights=FeatureWeights(
pos=[FeatureWeight('a', 13, value=1),
FeatureWeight('b', 5, value=2)],
neg=[FeatureWeight('neg1', -10, value=3),
FeatureWeight('neg2', -1, value=4)],
)),
TargetExplanation(
'y2', feature_weights=FeatureWeights(
pos=[FeatureWeight('f', 1, value=5)],
neg=[],
)),
],
)
df = format_as_dataframe(expl)
expected_df = pd.DataFrame(
{'weight': [13, 5, -1, -10, 1],
def test_targets_with_value():
expl = Explanation(
estimator='some estimator',
targets=[
TargetExplanation(
'y', feature_weights=FeatureWeights(
pos=[FeatureWeight('a', 13, value=1),
FeatureWeight('b', 5, value=2)],
neg=[FeatureWeight('neg1', -10, value=3),
FeatureWeight('neg2', -1, value=4)],
)),
TargetExplanation(
'y2', feature_weights=FeatureWeights(
pos=[FeatureWeight('f', 1, value=5)],
neg=[],
)),
],
)
df = format_as_dataframe(expl)
expected_df = pd.DataFrame(
{'weight': [13, 5, -1, -10, 1],
'value': [1, 2, 4, 3, 5]},
columns=['weight', 'value'],
index=pd.MultiIndex.from_tuples(
[('y', 'a'), ('y', 'b'), ('y', 'neg2'), ('y', 'neg1'),
('y2', 'f')], names=['target', 'feature']))
print(df, expected_df, sep='\n')
assert expected_df.equals(df)
# TODO: maybe do the sum / loss calculation in this function and pass it to gradcam.
# This would be consistent with what is done in
# https://github.com/ramprs/grad-cam/blob/master/misc/utils.lua
# and https://github.com/ramprs/grad-cam/blob/master/classification.lua
values = gradcam_backend(model, doc, targets, activation_layer)
weights, activations, grads, predicted_idx, predicted_val = values
heatmap = gradcam(weights, activations)
return Explanation(
model.name,
description=DESCRIPTION_KERAS,
error='',
method='Grad-CAM',
image=image,
targets=[TargetExplanation(
predicted_idx,
score=predicted_val, # for now we keep the prediction in the .score field (not .proba)
heatmap=heatmap, # 2D [0, 1] numpy array
)],
is_regression=False, # might be relevant later when explaining for regression tasks
highlight_spaces=None, # might be relevant later when explaining text models
)
res = Explanation(
estimator=repr(reg),
method='linear model',
targets=[],
is_regression=True,
)
assert res.targets is not None
_weights = _linear_weights(reg, x, top, feature_names, flt_indices)
names = get_default_target_names(reg)
display_names = get_target_display_names(names, target_names, targets,
top_targets, score)
if is_multitarget_regressor(reg):
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id],
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target_expl = TargetExplanation(
target=display_names[0][1],
feature_weights=_weights(0),
score=score,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
def _targets_to_df(targets):
# type: (List[TargetExplanation]) -> pd.DataFrame
if targets and not isinstance(targets[0], TargetExplanation):
raise ValueError('Only lists of TargetExplanation are supported')
columns = ['target', 'feature', 'weight', 'std', 'value']
df_data = {f: [] for f in columns} # type: Dict[str, List[Any]]
for target in targets:
assert target.feature_weights is not None
for fw in chain(target.feature_weights.pos,
reversed(target.feature_weights.neg)):
df_data['target'].append(target.target)
df_data['feature'].append(fw.feature)
df_data['weight'].append(fw.weight)
df_data['std'].append(fw.std)
df_data['value'].append(fw.value)
for optional_field in ['std', 'value']:
if all(x is None for x in df_data[optional_field]):
df_data.pop(optional_field)
columns.remove(optional_field)
score=score if score is not None else proba)
if is_multiclass:
for label_id, label in display_names:
target_expl = TargetExplanation(
target=label,
feature_weights=_weights(label_id),
score=score[label_id] if score is not None else None,
proba=proba[label_id] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
else:
target, scale, label_id = get_binary_target_scale_label_id(
score, display_names, proba)
target_expl = TargetExplanation(
target=target,
feature_weights=_weights(label_id, scale=scale),
score=score if score is not None else None,
proba=proba[label_id] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
res.targets.append(target_expl)
return res
explanation = Explanation(
estimator=repr(estimator),
method='decision paths',
description={
(False, False): DESCRIPTION_CLF_BINARY,
(False, True): DESCRIPTION_CLF_MULTICLASS,
(True, False): DESCRIPTION_REGRESSION,
}[is_regression, is_multiclass],
is_regression=is_regression,
targets=[],
)
if is_multiclass:
for label_id, label in display_names:
score, feature_weights = get_score_feature_weights(label_id)
target_expl = TargetExplanation(
target=label,
feature_weights=feature_weights,
score=score,
proba=proba[label_id] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)
explanation.targets.append(target_expl)
else:
score, feature_weights = get_score_feature_weights(0)
target_expl = TargetExplanation(
target=display_names[-1][1],
feature_weights=feature_weights,
score=score,
proba=proba[1] if proba is not None else None,
)
add_weighted_spans(doc, vec, vectorized, target_expl)