How to use the bigml.api.get_status function in bigml

To help you get started, we’ve selected a few bigml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bigmlcom / python / tests / create_source_steps.py View on Github external
def wait_until_source_status_code_is(step, code1, code2, secs):
    start = datetime.utcnow()
    read.i_get_the_source(step, world.source['resource'])
    status = get_status(world.source)
    while (status['code'] != int(code1) and
           status['code'] != int(code2)):
        time.sleep(3)
        assert datetime.utcnow() - start < timedelta(seconds=int(secs))
        read.i_get_the_source(step, world.source['resource'])
        status = get_status(world.source)
    assert status['code'] == int(code1)
github bigmlcom / python / tests / create_source_steps.py View on Github external
def the_source_has_been_created_async(step, secs):
    start = datetime.utcnow()
    status = get_status(world.resource)
    while status['code'] == UPLOADING:
        time.sleep(3)
        assert datetime.utcnow() - start < timedelta(seconds=int(secs))
        status = get_status(world.resource)
    assert world.resource['code'] == HTTP_CREATED
    # update status
    world.status = world.resource['code']
    world.location = world.resource['location']
    world.source = world.resource['object']
    # save reference
    world.sources.append(world.resource['resource'])
github bigmlcom / bigmler / bigmler / resources.py View on Github external
topic_model = api.create_topic_model(datasets,
                                                 topic_model_args,
                                                 retries=None)
            topic_model_id = check_resource_error( \
                topic_model,
                "Failed to create topic model: ")
            log_message("%s\n" % topic_model_id, log_file=log)
            topic_model_ids.append(topic_model_id)
            inprogress.append(topic_model_id)
            topic_models.append(topic_model)
            log_created_resources("topic_models", path, topic_model_id,
                                  mode='a')

        if args.verbosity:
            if bigml.api.get_status(topic_model)['code'] != bigml.api.FINISHED:
                try:
                    topic_model = check_resource( \
                        topic_model, api.get_topic_model,
                        query_string=query_string)
                except ValueError, exception:
                    sys.exit("Failed to get a finished topic model: %s" %
                             str(exception))
                topic_models[0] = topic_model
            message = dated("Topic model created: %s\n" %
                            get_url(topic_model))
            log_message(message, log_file=session_file,
                        console=args.verbosity)
            if args.reports:
                report(args.reports, path, topic_model)

    return topic_models, topic_model_ids
github bigmlcom / bigmler / bigmler / resources.py View on Github external
def get_source(source, api=None, verbosity=True,
               session_file=None):
    """Retrieves the source in its actual state and its field info

    """
    if api is None:
        api = bigml.api.BigML()
    if (isinstance(source, basestring) or
            bigml.api.get_status(source)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving source. %s\n" %
                        get_url(source))
        log_message(message, log_file=session_file,
                    console=verbosity)
        try:
            source = check_resource(source, api.get_source,
                                    query_string=ALL_FIELDS_QS)
        except ValueError, exception:
            sys.exit("Failed to get a finished source: %s" % str(exception))
    return source
github bigmlcom / bigmler / bigmler / resources.py View on Github external
def get_execution(execution, api=None, verbosity=True,
                  session_file=None):
    """Retrieves the execution in its actual state

    """
    if api is None:
        api = bigml.api.BigML()

    if (isinstance(execution, basestring) or
            bigml.api.get_status(execution)['code'] != bigml.api.FINISHED):
        message = dated("Retrieving execution. %s\n" %
                        get_url(execution))
        log_message(message, log_file=session_file,
                    console=verbosity)
        try:
            execution = check_resource(execution, api.get_execution)
        except ValueError, exception:
            sys.exit("Failed to get a finished execution: %s" % str(exception))
    return execution
github bigmlcom / bigmler / bigmler / resources.py View on Github external
del multi_dataset[i + existing_models]
                    model = api.create_model(multi_dataset, model_args,
                                             retries=None)
                else:
                    model = api.create_model(datasets, model_args,
                                             retries=None)
                model_id = check_resource_error(model,
                                                "Failed to create model: ")
                log_message("%s\n" % model_id, log_file=log)
                model_ids.append(model_id)
                inprogress.append(model_id)
                models.append(model)
                log_created_resources("models", path, model_id, mode='a')

            if args.number_of_models < 2 and args.verbosity:
                if bigml.api.get_status(model)['code'] != bigml.api.FINISHED:
                    try:
                        model = check_resource(model, api.get_model,
                                               query_string=query_string)
                    except ValueError, exception:
                        sys.exit("Failed to get a finished model: %s" %
                                 str(exception))
                    models[0] = model
                message = dated("Model created: %s\n" %
                                get_url(model))
                log_message(message, log_file=session_file,
                            console=args.verbosity)
                if args.reports:
                    report(args.reports, path, model)

    return models, model_ids
github bigmlcom / bigmler / bigmler / resources.py View on Github external
def wait_for_available_tasks(inprogress, max_parallel, api,
                             resource_type, wait_step=2):
    """According to the max_parallel number of parallel resources to be
       created, when the number of in progress resources reaches the limit,
       it checks the ones in inprogress to see if there's a
       FINISHED or FAULTY resource. If found, it is removed from the
       inprogress list and returns to allow another one to be created.

    """

    check_kwargs = {"retries": 0, "query_string": "full=false", "api": api}
    while len(inprogress) == max_parallel:
        for j in range(0, len(inprogress)):
            try:
                ready = check_resource(inprogress[j], **check_kwargs)
                status = bigml.api.get_status(ready)
                if status['code'] == bigml.api.FINISHED:
                    del inprogress[j]
                    return
                elif status['code'] == bigml.api.FAULTY:
                    raise ValueError(status['message'])
            except ValueError, exception:
                sys.exit("Failed to get a finished %s: %s" %
                         (resource_type, str(exception)))
        time.sleep(max_parallel * wait_step)
github bigmlcom / python / bigml / model.py View on Github external
"""
        self.resource_id = None
        self.ids_map = {}
        self.terms = {}
        self.regression = False
        self.boosting = None
        self.class_names = None
        self.api = get_api_connection(api)
        self.resource_id, model = get_resource_dict( \
            model, "model", api=self.api)

        if 'object' in model and isinstance(model['object'], dict):
            model = model['object']

        if 'model' in model and isinstance(model['model'], dict):
            status = get_status(model)
            if 'code' in status and status['code'] == FINISHED:

                self.input_fields = model["input_fields"]
                BaseModel.__init__(self, model, api=api, fields=fields)

                # boosting models are to be handled using the BoostedTree
                # class
                if model.get("boosted_ensemble"):
                    self.boosting = model.get('boosting', False)
                if self.boosting == {}:
                    self.boosting = False

                self.regression = \
                    not self.boosting and \
                    self.fields[self.objective_id]['optype'] == 'numeric' \
                    or (self.boosting and \
github bigmlcom / python / bigml / association.py View on Github external
self.min_support = None
        self.min_lift = None
        self.search_strategy = DEFAULT_SEARCH_STRATEGY
        self.rules = []
        self.significance_level = None
        self.api = get_api_connection(api)

        self.resource_id, association = get_resource_dict( \
            association, "association", api=self.api)

        if 'object' in association and isinstance(association['object'], dict):
            association = association['object']

        if 'associations' in association and \
                isinstance(association['associations'], dict):
            status = get_status(association)
            if 'code' in status and status['code'] == FINISHED:
                self.input_fields = association['input_fields']
                associations = association['associations']
                fields = associations['fields']
                ModelFields.__init__( \
                    self, fields, \
                    missing_tokens=associations.get('missing_tokens'))
                self.complement = associations.get('complement', False)
                self.discretization = associations.get('discretization', {})
                self.field_discretizations = associations.get(
                    'field_discretizations', {})
                self.items = [Item(index, item, fields) for index, item in
                              enumerate(associations.get('items', []))]
                self.max_k = associations.get('max_k', 100)
                self.max_lhs = associations.get('max_lhs', 4)
                self.min_confidence = associations.get('min_confidence', 0)
github bigmlcom / python / bigml / timeseries.py View on Github external
isinstance(time_series['object'], dict):
            time_series = time_series['object']
        try:
            self.input_fields = time_series.get("input_fields", [])
            self._forecast = time_series.get("forecast")
            self.objective_fields = time_series.get(
                "objective_fields", [])
            objective_field = time_series['objective_field'] if \
                time_series.get('objective_field') else \
                time_series['objective_fields']
        except KeyError:
            raise ValueError("Failed to find the time series expected "
                             "JSON structure. Check your arguments.")
        if 'time_series' in time_series and \
            isinstance(time_series['time_series'], dict):
            status = get_status(time_series)
            if 'code' in status and status['code'] == FINISHED:
                time_series_info = time_series['time_series']
                fields = time_series_info.get('fields', {})
                self.fields = fields
                if not self.input_fields:
                    self.input_fields = [ \
                        field_id for field_id, _ in
                        sorted(self.fields.items(),
                               key=lambda x: x[1].get("column_number"))]
                self.all_numeric_objectives = time_series_info.get( \
                    'all_numeric_objectives')
                self.period = time_series_info.get('period', 1)
                self.ets_models = time_series_info.get('ets_models', {})
                self.error = time_series_info.get('error')
                self.damped_trend = time_series_info.get('damped_trend')
                self.seasonality = time_series_info.get('seasonality')