How to use the ludwig.utils.data_utils.load_json function in ludwig

To help you get started, weโ€™ve selected a few ludwig examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uber / ludwig / ludwig / data / preprocessing.py View on Github external
def load_metadata(metadata_file_path):
    logger.info('Loading metadata from: {0}'.format(metadata_file_path))
    return data_utils.load_json(metadata_file_path)
github uber / ludwig / ludwig / models / model.py View on Github external
def load(filepath):
        loaded = load_json(filepath)
        return ProgressTracker(**loaded)
github uber / ludwig / ludwig / models / model.py View on Github external
def load(load_path, use_horovod=False):
        hyperparameter_file = os.path.join(
            load_path,
            MODEL_HYPERPARAMETERS_FILE_NAME
        )
        hyperparameters = load_json(hyperparameter_file)
        model = Model(use_horovod=use_horovod, **hyperparameters)
        model.weights_save_path = os.path.join(
            load_path,
            MODEL_WEIGHTS_FILE_NAME
        )
        return model
github uber / ludwig / ludwig / visualize.py View on Github external
ground_truth_metadata,
        ground_truth_split,
        output_feature_name,
        **kwargs
):
    """Load model data from files to be shown by compare_classifiers_multiclass

    :param probabilities: Path to experiment probabilities file
    :param ground_truth: Path to ground truth file
    :param ground_truth_metadata: Path to ground truth metadata file
    :param ground_truth_split: Type of ground truth split - train, val, test
    :param output_feature_name: Name of the output feature to visualize
    :param kwargs: model configuration arguments
    :return None:
    """
    metadata = load_json(ground_truth_metadata)
    gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
    probabilities_per_model = load_data_for_viz(
        'load_from_file', probabilities, dtype=float
    )
    confidence_thresholding_data_vs_acc_subset_per_class(
        probabilities_per_model, gt, metadata, output_feature_name, **kwargs
    )
github uber / ludwig / ludwig / visualize.py View on Github external
ground_truth_split,
        output_feature_name,
        **kwargs
):
    """Load model data from files to be shown by compare_classifiers_from_pred

    :param predictions: Path to experiment predictions file
    :param ground_truth: Path to ground truth file
    :param ground_truth_metadata: Path to ground truth metadata file
    :param ground_truth_split: Type of ground truth split - train, val, test
    :param output_feature_name: Name of the output feature to visualize
    :param kwargs: model configuration arguments
    :return None:
    """
    gt = load_from_file(ground_truth, output_feature_name, ground_truth_split)
    metadata = load_json(ground_truth_metadata)
    predictions_per_model_raw = load_data_for_viz(
        'load_from_file', predictions, dtype=str
    )
    predictions_per_model = [
        np.ndarray.flatten(pred) for pred in predictions_per_model_raw
    ]
    compare_classifiers_performance_from_pred(
        predictions_per_model, gt, metadata, output_feature_name, **kwargs
    )
github uber / ludwig / ludwig / visualize.py View on Github external
def frequency_vs_f1_cli(test_statistics, ground_truth_metadata, **kwargs):
    """Load model data from files to be shown by frequency_vs_f1.

    :param test_statistics: Path to experiment test statistics file
    :param ground_truth_metadata: Path to ground truth metadata file
    :param kwargs: model configuration arguments
    :return None:
    """
    test_stats_per_model = load_data_for_viz('load_json', test_statistics)
    metadata = load_json(ground_truth_metadata)
    frequency_vs_f1(test_stats_per_model, metadata, **kwargs)