How to use the lightfm.evaluation.auc_score function in lightfm

To help you get started, we’ve selected a few lightfm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lyst / lightfm / tests / test_movielens.py View on Github external
def _get_metrics(model, train_set, test_set):

    train_set = train_set.tocsr()
    test_set = test_set.tocsr()

    train_set.data[train_set.data < 0] = 0.0
    test_set.data[test_set.data < 0] = 0.0

    train_set.eliminate_zeros()
    test_set.eliminate_zeros()

    return (
        precision_at_k(model, train_set).mean(),
        precision_at_k(model, test_set).mean(),
        auc_score(model, train_set).mean(),
        auc_score(model, test_set).mean(),
    )
github lyst / lightfm / tests / test_movielens.py View on Github external
def _get_metrics(model, train_set, test_set):

    train_set = train_set.tocsr()
    test_set = test_set.tocsr()

    train_set.data[train_set.data < 0] = 0.0
    test_set.data[test_set.data < 0] = 0.0

    train_set.eliminate_zeros()
    test_set.eliminate_zeros()

    return (
        precision_at_k(model, train_set).mean(),
        precision_at_k(model, test_set).mean(),
        auc_score(model, train_set).mean(),
        auc_score(model, test_set).mean(),
    )
github lyst / lightfm / tests / test_evaluation.py View on Github external
def test_auc_score():

    no_users, no_items = (10, 100)

    train, test = _generate_data(no_users, no_items)

    model = LightFM(loss="bpr")
    model.fit_partial(train)

    auc = evaluation.auc_score(model, test, num_threads=2)
    expected_auc = np.array(_auc(model, test))

    assert auc.shape == expected_auc.shape
    assert np.abs(auc.mean() - expected_auc.mean()) < 0.01
    assert len(auc) == (test.getnnz(axis=1) > 0).sum()
    assert len(evaluation.auc_score(model, train, preserve_rows=True)) == test.shape[0]

    # With omitting train interactions
    auc = evaluation.auc_score(model, test, train_interactions=train, num_threads=2)
    expected_auc = np.array(_auc(model, test, train))
    assert np.abs(auc.mean() - expected_auc.mean()) < 0.01
github lyst / lightfm / tests / test_evaluation.py View on Github external
evaluation.recall_at_k(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.precision_at_k(
            model, train, train_interactions=train, check_intersections=True
        )

    with pytest.raises(ValueError):
        evaluation.reciprocal_rank(
            model, train, train_interactions=train, check_intersections=True
        )

    # check no errors raised when train and test have no interactions in common
    evaluation.auc_score(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.recall_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.precision_at_k(
        model, test, train_interactions=train, check_intersections=True
    )
    evaluation.reciprocal_rank(
        model, test, train_interactions=train, check_intersections=True
    )

    # check no error is raised when there are intersections but flag is False
    evaluation.auc_score(
        model, train, train_interactions=train, check_intersections=False
    )
github lyst / lightfm / tests / test_evaluation.py View on Github external
train, test = _generate_data(no_users, no_items)

    model = LightFM(loss="bpr")
    model.fit_partial(train)

    auc = evaluation.auc_score(model, test, num_threads=2)
    expected_auc = np.array(_auc(model, test))

    assert auc.shape == expected_auc.shape
    assert np.abs(auc.mean() - expected_auc.mean()) < 0.01
    assert len(auc) == (test.getnnz(axis=1) > 0).sum()
    assert len(evaluation.auc_score(model, train, preserve_rows=True)) == test.shape[0]

    # With omitting train interactions
    auc = evaluation.auc_score(model, test, train_interactions=train, num_threads=2)
    expected_auc = np.array(_auc(model, test, train))
    assert np.abs(auc.mean() - expected_auc.mean()) < 0.01
github aspc / mainsite / aspc / courses / management / commands / recommendations.py View on Github external
recommendation_list = []
            for course_id in unique(course_id_of_recommendations_for_kent)[:(5*recommendation_count)]:
                course = Course.objects.get(id=course_id)
                if course not in courses_taken_by_user and course.id in next_term_courses:
                    if not super_requisite_already_taken(course, courses_taken_by_user):
                        recommendation_list.append(course)

            for recommendation in recommendation_list[:recommendation_count]:
                print recommendation

        train_precision = precision_at_k(model, data['train'], item_features=data['course_features'], k=10).mean()
        test_precision = precision_at_k(model, data['test'], item_features=data['course_features'], k=10).mean()

        train_auc = auc_score(model, data['train'], item_features=data['course_features']).mean()
        test_auc = auc_score(model, data['test'], item_features=data['course_features']).mean()

        print('Precision: train %.2f, test %.2f.' % (train_precision, test_precision))
        print('AUC: train %.2f, test %.2f.' % (train_auc, test_auc))
        import ipdb; ipdb.set_trace()
github DomainGroupOSS / ml-recsys-tools / ml_recsys_tools / evaluation / ranks_scoring.py View on Github external
def auc_score_on_ranks(
        ranks, test_interactions, train_interactions=None, preserve_rows=False):
    return auc_score(
        model=ModelMockRanksCacher(ranks.copy()),
        test_interactions=test_interactions,
        train_interactions=train_interactions,
        preserve_rows=preserve_rows,
    )