How to use the bigml.api.check_resource function in bigml

To help you get started, we’ve selected a few bigml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bigmlcom / bigmler / tests / features / basic_test_prediction_steps.py View on Github external
while world.number_of_models != number_of_lines and count < 10:
        number_of_lines = 0
        model_ids = []
        for line in open(model_file, "r"):
            number_of_lines += 1
            model_id = line.strip()
            model_ids.append(model_id)
        if world.number_of_models != number_of_lines:
            time.sleep(10)
            count += 1
    if world.number_of_models != number_of_lines:
        assert False, "number of models %s and number of lines in models file %s: %s" % (world.number_of_models, model_file, number_of_lines)
    world.model_ids = model_ids
    for model_id in model_ids:
        try:
            model = check_resource(model_id, world.api.get_model)
            if in_ensemble:
                ensemble_id = "ensemble/%s" % model['object']['ensemble_id']
                if not ensemble_id in world.ensembles:
                    world.ensembles.append(ensemble_id)
            else:
                world.models.append(model_id)

            assert True
        except Exception, exc:
            assert False, str(exc)
github bigmlcom / bigmler / tests / features / basic_test_prediction_steps.py View on Github external
count = 0
    while world.number_of_evaluations != number_of_lines and count < 10:
        number_of_lines = 0
        for line in open(evaluations_file, "r"):
            number_of_lines += 1
            evaluation_id = line.strip()
            evaluation_ids.append(evaluation_id)
        if world.number_of_evaluations != number_of_lines:
            time.sleep(10)
            count += 1
    if world.number_of_evaluations != number_of_lines:
        assert False, "number of evaluations %s and number of lines in evaluations file %s: %s" % (world.number_of_evaluations, evaluation_file, number_of_lines)
    world.evaluation_ids = evaluation_ids
    for evaluation_id in evaluation_ids:
        try:
            evaluation = check_resource(evaluation_id, world.api.get_evaluation)
            world.evaluations.append(evaluation_id)
            assert True
        except Exception, exc:
            assert False, str(exc)
github bigmlcom / bigmler / tests / features / basic_test_prediction_steps.py View on Github external
@step(r'I check that the evaluation has been created')
def i_check_create_evaluation(step):
    evaluation_file = "%s%sevaluations" % (world.directory, os.sep)
    try:
        evaluation_file = open(evaluation_file, "r")
        evaluation = check_resource(evaluation_file.readline().strip(),
                                    world.api.get_evaluation)
        world.evaluations.append(evaluation['resource'])
        world.evaluation = evaluation
        evaluation_file.close()
        assert True
    except:
        assert False
github bigmlcom / bigmler / tests / features / basic_batch_test_prediction_steps.py View on Github external
@step(r'I check that the batch predictions dataset exists')
def i_check_create_batch_predictions_dataset(step):
    dataset_file = "%s%sbatch_prediction_dataset" % (world.directory, os.sep)
    try:
        dataset_file = open(dataset_file, "r")
        dataset = check_resource(dataset_file.readline().strip(),
                                 api=world.api)
        world.datasets.append(dataset['resource'])
        dataset_file.close()
        assert True
    except Exception, exc:
        assert False, str(exc)
github bigmlcom / bigmler / tests / features / dataset_advanced_steps.py View on Github external
@step(r'I check that the multi-dataset has been created$')
def i_check_create_multi_dataset(step):
    dataset_file = "%s%sdataset_multi" % (world.directory, os.sep)
    try:
        with open(dataset_file, "r") as dataset_file_handler:
            dataset_id = dataset_file_handler.readline().strip()
        dataset = check_resource(dataset_id,
                                 world.api.get_dataset)
        world.datasets.append(dataset['resource'])
        world.dataset = dataset
        assert True
    except Exception, exc:
        assert False, str(exc)
github bigmlcom / bigmler / bigmler / dnprediction.py View on Github external
if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(
            deepnet_id, test_dataset, batch_prediction_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
github bigmlcom / bigmler / bigmler / processing / datasets.py View on Github external
try:
                dataset_args = {
                    "all_but": [fields.objective_field],
                    "new_fields": [
                        {"name": fields.field_name(fields.objective_field),
                         "field": category_generator,
                         "label": "max_categories: %s" % args.max_categories}],
                    "user_metadata":
                    {"max_categories": args.max_categories,
                     "other_label": other_label}}
            except ValueError, exc:
                sys.exit(exc)
            new_dataset = r.create_dataset(
                dataset, dataset_args, args, api=api, path=path,
                session_file=session_file, log=log, dataset_type="parts")
            new_dataset = bigml.api.check_resource(new_dataset,
                                                   api.get_dataset)
            datasets.append(new_dataset)
    return datasets, resume
github bigmlcom / bigmler / bigmler / prediction.py View on Github external
batch_prediction = create_batch_prediction(
                model_or_ensemble, test_dataset, batch_prediction_args,
                args, api, session_file=session_file, path=path, log=log)
        else:
            batch_predictions = []
            for index, test_dataset_n in enumerate(test_datasets):
                batch_predictions.append(create_batch_prediction( \
                    models[index], test_dataset_n, batch_prediction_args,
                    args, api, session_file=session_file, path=path, log=log))
    if not args.no_csv and not args.dataset_off:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset and not args.dataset_off:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
    elif args.to_dataset and args.dataset_off:
        predictions_datasets = []
        for batch_prediction in batch_predictions:
            batch_prediction = bigml.api.check_resource(batch_prediction,
                                                        api=api)
            new_dataset = bigml.api.get_dataset_id(
                batch_prediction['object']['output_dataset_resource'])
github bigmlcom / bigmler / bigmler / lrprediction.py View on Github external
if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(
            linear_regression_id, test_dataset, batch_prediction_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')
github bigmlcom / bigmler / bigmler / logrprediction.py View on Github external
if resume:
        message = u.dated("Batch prediction not found. Resuming.\n")
        resume, batch_prediction = c.checkpoint(
            c.is_batch_prediction_created, path, debug=args.debug,
            message=message, log_file=session_file, console=args.verbosity)
    if not resume:
        batch_prediction = create_batch_prediction(
            model_id, test_dataset, batch_prediction_args,
            args, api, session_file=session_file, path=path, log=log)
    if not args.no_csv:
        file_name = api.download_batch_prediction(batch_prediction,
                                                  prediction_file)
        if file_name is None:
            sys.exit("Failed downloading CSV.")
    if args.to_dataset:
        batch_prediction = bigml.api.check_resource(batch_prediction, api=api)
        new_dataset = bigml.api.get_dataset_id(
            batch_prediction['object']['output_dataset_resource'])
        if new_dataset is not None:
            message = u.dated("Batch prediction dataset created: %s\n"
                              % u.get_url(new_dataset))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            u.log_created_resources("batch_prediction_dataset",
                                    path, new_dataset, mode='a')