How to use the bigml.model.Model function in bigml

To help you get started, we’ve selected a few bigml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bigmlcom / python / tests / features / compare_predictions-steps.py View on Github external
def i_create_a_local_model_from_file(step, model_file):
    world.local_model = Model(model_file)
github bigmlcom / python / tests / features / create_ensemble-steps.py View on Github external
def create_local_ensemble_with_list_of_local_models(step, number_of_models):
    local_models = [Model(model) for model in world.models[-int(number_of_models):]]
    world.local_ensemble = Ensemble(local_models, world.api)
github bigmlcom / bigmler / bigmler / dispatcher.py View on Github external
predict(models, fields, args, api=api, log=log,
                    resume=resume, session_file=session_file, labels=labels,
                    models_per_label=models_per_label, other_label=other_label,
                    multi_label_data=multi_label_data)

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if args.votes_files_:
        model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$',
                          r'\1', args.votes_files_[0]).replace("_", "/")
        try:
            model = u.check_resource(model_id, api.get_model)
        except ValueError, exception:
            sys.exit("Failed to get model %s: %s" % (model_id, str(exception)))

        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file,
                      console=args.verbosity)

        combine_votes(args.votes_files_, local_model.to_prediction,
                      output, method=args.method)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
        if args.dataset_off and not args.has_test_datasets_:
            args.test_dataset_ids = datasets
github bigmlcom / python / bigml / multimodel.py View on Github external
def __init__(self, models, api=None, fields=None, class_names=None):
        self.models = []
        self.class_names = class_names

        if isinstance(models, list):
            if all([isinstance(model, Model) for model in models]):
                self.models = models
            else:
                for model in models:
                    self.models.append(Model(model, api=api, fields=fields))
        else:
            self.models.append(Model(models, api=api, fields=fields))
github bigmlcom / bigmler / bigmler / export / dispatcher.py View on Github external
from bigmler.command import get_context
from bigmler.dispatcher import SESSIONS_LOG, clear_log_files
from bigmler.export.out_model.jsmodel import JsModel
from bigmler.export.out_model.tableaumodel import TableauModel
from bigmler.export.out_model.mysqlmodel import MySQLModel
from bigmler.export.out_model.rmodel import RModel
from bigmler.export.out_model.pythonlr import PythonLR


COMMAND_LOG = u".bigmler_export"
DIRS_LOG = u".bigmler_export_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]

EXPORTS = {
    "javascript": JsModel,
    "python": PythonModel,
    "tableau": TableauModel,
    "mysql": MySQLModel,
    "r": RModel}

EXTENSIONS = {
    "javascript": "js",
    "python": "py",
    "tableau": "tb",
    "mysql": "sql",
    "r": "R"}

LR_EXPORTS = {
    "python": PythonLR
}

SEPARATE_OUTPUT = ['tableau', 'mysql']
github bigmlcom / bigmler / bigmler / export / out_model / pythonmodel.py View on Github external
def __init__(self, model, api=None, fields=None):
        """Empty attributes to be overriden

        """
        self.tree_class = PythonTree
        Model.__init__(self, model, api, fields)
github bigmlcom / python / bigml / ensemble.py View on Github external
fields = {}
        models = []
        objective_id = None
        no_objective_id = False
        if isinstance(self.models_splits[0][0], Model):
            for split in self.models_splits:
                models.extend(split)
        else:
            models = self.model_ids
        for index, model_id in enumerate(models):
            if isinstance(model_id, Model):
                local_model = model_id
            elif self.cache_get is not None:
                local_model = self.cache_get(model_id)
            else:
                local_model = Model(model_id, self.api)
            if (max_models is not None and index > 0 and
                    index % max_models == 0):
                gc.collect()
            fields.update(local_model.fields)
            if (objective_id is not None and
                    objective_id != local_model.objective_id):
                # the models' objective field have different ids, no global id
                no_objective_id = True
            else:
                objective_id = local_model.objective_id
        if no_objective_id:
            objective_id = None
        gc.collect()
        return fields, objective_id
github bigmlcom / bigmler / bigmler / export / out_model / rmodel.py View on Github external
def __init__(self, model, api=None, fields=None):
        """Empty attributes to be overriden

        """
        self.tree_class = RTree
        Model.__init__(self, model, api, fields)
github bigmlcom / bigmler / bigmler / export / out_model / mysqlmodel.py View on Github external
def __init__(self, model, api=None, fields=None):
        """Empty attributes to be overriden

        """
        self.tree_class = MySQLTree
        Model.__init__(self, model, api, fields)
github bigmlcom / bigmler / bigmler / prediction.py View on Github external
output_path, session_file, log,
                                        exclude)
            else:
                remote_predict_models(models, test_reader, prediction_file,
                                      api, args, resume, output_path,
                                      session_file, log, exclude)
            return
        # Local predictions: Predictions are computed locally using models'
        # rules with MultiModel's predict method
        message = u.dated("Creating local predictions.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)
        options = {}
        if args.method == THRESHOLD_CODE:
            options.update(threshold=args.threshold)
            if args.threshold_class is None:
                local_model = Model(models[0])
                # default class is the first class that appears in the dataset
                # objective field summary, which might be different from the
                # objective summary of each model becaus model are built with
                # sampling
                objective_field = local_model.objective_id
                distribution = local_model.tree.fields[objective_field][ \
                    "summary"]["categories"]
                args.threshold_class = distribution[0][0]
            options.update(category=args.threshold_class)
        # For a model we build a Model and for a small number of models,
        # we build a MultiModel using all of
        # the given models and issue a combined prediction
        if (len(models) <= args.max_batch_models \
                and args.fast and \
                not args.multi_label and args.max_categories == 0 \
                and args.method != COMBINATION):