How to use the ludwig.utils.visualization_utils function in ludwig

To help you get started, weโ€™ve selected a few ludwig examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uber / ludwig / ludwig / visualize.py View on Github external
entropies.append(entropy(row))
                        else:
                            entropies.append(0)
                    class_entropy = np.array(entropies)
                    class_desc_entropy = np.argsort(class_entropy)[::-1]
                    desc_entropy = class_entropy[class_desc_entropy]

                    filename = None
                    if output_directory:
                        filename = filename_template_path.format(
                            'entropy_' + model_name_name,
                            output_feature_name,
                            'top' + str(k)
                        )

                    visualization_utils.bar_plot(
                        class_desc_entropy,
                        desc_entropy,
                        labels=[labels[i] for i in class_desc_entropy],
                        title='Classes ranked by entropy of '
                              'Confusion Matrix row',
                        filename=filename
                    )
github uber / ludwig / ludwig / visualize.py View on Github external
counts_predictions = [np.bincount(alg_predictions, minlength=max_val)
                          for alg_predictions in predictions_per_model]

    prob_predictions = [alg_count_prediction / alg_count_prediction.sum()
                        for alg_count_prediction in counts_predictions]

    filename = None
    if output_directory:
        os.makedirs(output_directory, exist_ok=True)
        filename = os.path.join(
            output_directory,
            'compare_classifiers_predictions_distribution.' + file_format
        )

    visualization_utils.radar_chart(
        prob_gt,
        prob_predictions,
        model_names_list,
        filename=filename
    )
github uber / ludwig / ludwig / visualize.py View on Github external
training_stats = [
                    learning_stats['train'][output_feature_name][metric]
                    for learning_stats in
                    train_stats_per_model_list]

                validation_stats = []
                for learning_stats in train_stats_per_model_list:
                    if 'validation' in learning_stats:
                        validation_stats.append(
                            learning_stats['validation'][output_feature_name][
                                metric]
                        )
                    else:
                        validation_stats.append(None)

                visualization_utils.learning_curves_plot(
                    training_stats,
                    validation_stats,
                    metric,
                    model_names_list,
                    title='Learning Curves {}'.format(output_feature_name),
                    filename=filename
                )
github uber / ludwig / ludwig / visualize.py View on Github external
right=0
            )
        )

    logger.info('CSV table')
    for row in table:
        logger.info(','.join([str(e) for e in row]))

    # ===========#
    # Multiline #
    # ===========#
    filename = None
    if filename_template_path:
        os.makedirs(output_directory, exist_ok=True)
        filename = filename_template_path.format('multiline')
    visualization_utils.confidence_fitlering_data_vs_acc_multiline_plot(
        accuracies,
        dataset_kept,
        model_names_list,
        title='Coverage vs Accuracy, two thresholds',
        filename=filename
    )

    # ==========#
    # Max line #
    # ==========#
    filename = None
    if filename_template_path:
        filename = filename_template_path.format('maxline')
    max_accuracies = np.amax(np.array(interps), 0)
    visualization_utils.confidence_fitlering_data_vs_acc_plot(
        [max_accuracies],
github uber / ludwig / ludwig / visualize.py View on Github external
100 * both_wrong_different / all,
        100 * both_wrong_different / both_wrong if both_wrong > 0 else 0
    )
    )

    filename = None
    if output_directory:
        os.makedirs(output_directory, exist_ok=True)
        filename = os.path.join(
            output_directory,
            'compare_classifiers_predictions_{}_{}.{}'.format(
                name_c1, name_c2, file_format
            )
        )

    visualization_utils.donut(
        [both_right, one_right, both_wrong],
        ['both right', 'one right', 'both wrong'],
        [both_right, c1_right_c2_wrong, c1_wrong_c2_right, both_wrong_same,
         both_wrong_different],
        ['both right',
         '{} right / {} wrong'.format(name_c1, name_c2),
         '{} wrong / {} right'.format(name_c1, name_c2),
         'same prediction', 'different prediction'],
        [0, 1, 1, 2, 2],
        title='{} vs {}'.format(name_c1, name_c2),
        filename=filename
    )
github uber / ludwig / ludwig / visualize.py View on Github external
curr_accuracies.append(accuracy)
            curr_dataset_kept.append(len(filtered_gt_1) / len(gt_1))

        accuracies.append(curr_accuracies)
        dataset_kept.append(curr_dataset_kept)

    filename = None
    if output_directory:
        os.makedirs(output_directory, exist_ok=True)
        filename = os.path.join(
            output_directory,
            'confidence_thresholding_2thresholds_3d.' + file_format
        )

    visualization_utils.confidence_fitlering_3d_plot(
        np.array(thresholds),
        np.array(thresholds),
        np.array(accuracies),
        np.array(dataset_kept),
        threshold_output_feature_names,
        title='Confidence_Thresholding, two thresholds',
        filename=filename
    )
github uber / ludwig / ludwig / visualize.py View on Github external
accuracies_alg.append(accuracy)
            dataset_kept_alg.append(len(filtered_gt) / len(ground_truth))

        accuracies.append(accuracies_alg)
        dataset_kept.append(dataset_kept_alg)

    filename = None
    if output_directory:
        os.makedirs(output_directory, exist_ok=True)
        filename = os.path.join(
            output_directory,
            'confidence_thresholding.' + file_format
        )

    visualization_utils.confidence_fitlering_plot(
        thresholds,
        accuracies,
        dataset_kept,
        model_names_list,
        title='Confidence_Thresholding',
        filename=filename
    )
github uber / ludwig / ludwig / visualize.py View on Github external
hits_at_k = [0.0] * k
        for g in range(len(ground_truth)):
            for j in range(k):
                hits_at_k[j] += np.in1d(ground_truth[g], prob[g, -j - 1:])
        hits_at_ks.append(np.array(hits_at_k) / len(ground_truth))

    filename = None
    if output_directory:
        os.makedirs(output_directory, exist_ok=True)
        filename = os.path.join(
            output_directory,
            'compare_classifiers_performance_changing_k.' + file_format
        )

    visualization_utils.compare_classifiers_line_plot(
        np.arange(1, k + 1),
        hits_at_ks, 'hits@k',
        model_names_list,
        title='Classifier comparison (hits@k)',
        filename=filename
    )
github uber / ludwig / ludwig / visualize.py View on Github external
visualization_utils.calibration_plot(
            fraction_positives_class,
            mean_predicted_vals_class,
            model_names_list,
            filename=filename
        )

        filename = None
        if output_directory:
            os.makedirs(output_directory, exist_ok=True)
            filename = filename_template_path.format(
                'prediction_distribution_' + str(class_idx)
            )

        visualization_utils.predictions_distribution_plot(
            probs_class,
            model_names_list,
            filename=filename
        )

    filename = None
    if output_directory:
        os.makedirs(output_directory, exist_ok=True)
        filename = filename_template_path.format('brier')

    visualization_utils.brier_plot(
        np.array(brier_scores),
        model_names_list,
        filename=filename
    )