Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
no_users, no_items = (10, 100)
train, test = _generate_data(no_users, no_items)
model = LightFM(loss="bpr")
model.fit_partial(train)
# Make all predictions zero
model.user_embeddings = np.zeros_like(model.user_embeddings)
model.item_embeddings = np.zeros_like(model.item_embeddings)
model.user_biases = np.zeros_like(model.user_biases)
model.item_biases = np.zeros_like(model.item_biases)
k = 10
precision = evaluation.precision_at_k(model, test, k=k)
# Pessimistic precision with all ties
assert precision.mean() == 0.0
def _get_metrics(model, train_set, test_set):
train_set = train_set.tocsr()
test_set = test_set.tocsr()
train_set.data[train_set.data < 0] = 0.0
test_set.data[test_set.data < 0] = 0.0
train_set.eliminate_zeros()
test_set.eliminate_zeros()
return (
precision_at_k(model, train_set).mean(),
precision_at_k(model, test_set).mean(),
auc_score(model, train_set).mean(),
auc_score(model, test_set).mean(),
)
model, train, train_interactions=train, check_intersections=True
)
with pytest.raises(ValueError):
evaluation.reciprocal_rank(
model, train, train_interactions=train, check_intersections=True
)
# check no errors raised when train and test have no interactions in common
evaluation.auc_score(
model, test, train_interactions=train, check_intersections=True
)
evaluation.recall_at_k(
model, test, train_interactions=train, check_intersections=True
)
evaluation.precision_at_k(
model, test, train_interactions=train, check_intersections=True
)
evaluation.reciprocal_rank(
model, test, train_interactions=train, check_intersections=True
)
# check no error is raised when there are intersections but flag is False
evaluation.auc_score(
model, train, train_interactions=train, check_intersections=False
)
evaluation.recall_at_k(
model, train, train_interactions=train, check_intersections=False
)
evaluation.precision_at_k(
model, train, train_interactions=train, check_intersections=False
)
model = LightFM(loss="bpr")
model.fit_partial(train)
# check error is raised when train and test have interactions in common
with pytest.raises(ValueError):
evaluation.auc_score(
model, train, train_interactions=train, check_intersections=True
)
with pytest.raises(ValueError):
evaluation.recall_at_k(
model, train, train_interactions=train, check_intersections=True
)
with pytest.raises(ValueError):
evaluation.precision_at_k(
model, train, train_interactions=train, check_intersections=True
)
with pytest.raises(ValueError):
evaluation.reciprocal_rank(
model, train, train_interactions=train, check_intersections=True
)
# check no errors raised when train and test have no interactions in common
evaluation.auc_score(
model, test, train_interactions=train, check_intersections=True
)
evaluation.recall_at_k(
model, test, train_interactions=train, check_intersections=True
)
evaluation.precision_at_k(
model = LightFM(loss="bpr")
# We want a high precision to catch the k=1 case
model.fit_partial(test)
for k in (10, 5, 1):
# Without omitting train interactions
precision = evaluation.precision_at_k(model, test, k=k)
expected_mean_precision = _precision_at_k(model, test, k)
assert np.allclose(precision.mean(), expected_mean_precision)
assert len(precision) == (test.getnnz(axis=1) > 0).sum()
assert (
len(evaluation.precision_at_k(model, train, preserve_rows=True))
== test.shape[0]
)
# With omitting train interactions
precision = evaluation.precision_at_k(
model, test, k=k, train_interactions=train
)
expected_mean_precision = _precision_at_k(model, test, k, train=train)
assert np.allclose(precision.mean(), expected_mean_precision)
for k in (10, 5, 1):
# Without omitting train interactions
precision = evaluation.precision_at_k(model, test, k=k)
expected_mean_precision = _precision_at_k(model, test, k)
assert np.allclose(precision.mean(), expected_mean_precision)
assert len(precision) == (test.getnnz(axis=1) > 0).sum()
assert (
len(evaluation.precision_at_k(model, train, preserve_rows=True))
== test.shape[0]
)
# With omitting train interactions
precision = evaluation.precision_at_k(
model, test, k=k, train_interactions=train
)
expected_mean_precision = _precision_at_k(model, test, k, train=train)
assert np.allclose(precision.mean(), expected_mean_precision)
def scorer(est, x, y=None):
return precision_at_k(est, x).mean()
def precision_at_k_on_ranks(
ranks, test_interactions, train_interactions=None, k=10, preserve_rows=False):
return precision_at_k(
model=ModelMockRanksCacher(ranks.copy()),
test_interactions=test_interactions,
train_interactions=train_interactions,
k=k,
preserve_rows=preserve_rows)