Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def delete(object_list):
"""Deletes the objects in object_list using the api delete method
"""
for obj_id in object_list:
counter = 0
delete_method = world.api.deleters[get_resource_type(obj_id)]
result = delete_method(obj_id)
while result['code'] != HTTP_NO_CONTENT and counter < MAX_RETRIES:
print ("Failed to delete %s with code %s. Retrying." %
(obj_id, result['code']))
time.sleep(3)
counter += 1
result = delete_method(obj_id)
if counter == MAX_RETRIES:
print ("Retries to delete the created resources are exhausted."
" Failed to delete.")
object_list = []
def share_resource(api, resource):
"""Creates a secret link to share the resource.
"""
resource_type = get_resource_type(resource)
resource = get_updater(api, resource_type)(resource, {"shared": True})
if api.ok(resource) and is_shared(resource):
return ("https://bigml.com/shared/%s/%s" %
(resource_type, resource['object']['shared_hash']))
else:
sys.exit("Failed to share the resource: %s" % resource['resource'])
"""Extract the date from a given reference in days from now, date format
or existing resource
"""
days = None
date = None
try:
days = int(reference)
date = datetime.datetime.now() - datetime.timedelta(days=days)
except ValueError:
try:
date = datetime.datetime.strptime(reference, '%Y-%m-%d')
date = date.strftime('%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
try:
resource_type = bigml.api.get_resource_type(reference)
resource = bigml.api.check_resource(reference,
api.getters[resource_type])
date = resource['object']['created']
except (TypeError, KeyError):
return None
return date
groups.append(resource)
elif resource_type in COMPOSED_RESOURCES:
composed.append(resource)
else:
simple.append(resource)
groups.sort()
resources_list = groups
if not bulk_deletion:
composed.sort()
simple.sort()
resources_list.extend(composed)
resources_list.extend(simple)
for resource in resources_list:
resource_type = bigml.api.get_resource_type(resource)
if not resource_type in type_summary:
type_summary[resource_type] = 0
type_summary[resource_type] += 1
return type_summary, resources_list
def filter_resource_types(delete_list, resource_types):
"""Filters the ids using the user-given resource types to ensure that
only those resources will be deleted.
"""
if resource_types is not None:
delete_list = [resource for resource in delete_list if
bigml.api.get_resource_type(resource) in resource_types]
return delete_list
:param missing_strategy: LAST_PREDICTION|PROPORTIONAL missing strategy
for missing fields
:param compact: If False, prediction is returned as a list of maps, one
per class, with the keys "prediction" and "probability"
mapped to the name of the class and it's probability,
respectively. If True, returns a list of probabilities
ordered by the sorted order of the class names.
"""
votes = MultiVoteList([])
if not self.missing_numerics:
check_no_missing_numerics(input_data, self.model_fields)
for models_split in self.models_splits:
models = []
for model in models_split:
if get_resource_type(model) == "fusion":
models.append(Fusion(model, api=self.api))
else:
models.append(SupervisedModel(model, api=self.api))
votes_split = []
for model in models:
try:
prediction = model.predict_probability( \
input_data,
missing_strategy=missing_strategy,
compact=True)
except ValueError:
# logistic regressions can raise this error if they
# have missing_numerics=False and some numeric missings
# are found
continue
raise ValueError("The resource %s has not an allowed"
" supervised model type.")
self.importance = fusion.get('importance', [])
self.missing_numerics = fusion.get('missing_numerics', True)
if fusion.get('fusion'):
self.fields = fusion.get( \
'fusion', {}).get("fields")
self.objective_id = fusion.get("objective_field")
self.input_fields = fusion.get("input_fields")
number_of_models = len(self.model_ids)
# Downloading the model information to cache it
if self.api.storage is not None:
for model_id in self.model_ids:
if get_resource_type(model_id) == "fusion":
Fusion(model_id, api=self.api)
else:
SupervisedModel(model_id, api=self.api)
if max_models is None:
self.models_splits = [self.model_ids]
else:
self.models_splits = [self.model_ids[index:(index + max_models)]
for index
in range(0, number_of_models, max_models)]
if self.fields:
summary = self.fields[self.objective_id]['summary']
if 'bins' in summary:
distribution = summary['bins']
elif 'counts' in summary:
def resources_by_type(resources_list, bulk_deletion=False):
"""Sorts resources by type. Projects and executions are deleted first.
Then clusters, fusions and ensembles and finally the rest of resources
Returns aggregations by type.
If bulk_deletion is set, then only projects or executions are kept
"""
type_summary = {}
groups = []
composed = []
simple = []
for resource in resources_list:
resource_type = bigml.api.get_resource_type(resource)
if resource_type in GROUP_RESOURCES:
groups.append(resource)
elif resource_type in COMPOSED_RESOURCES:
composed.append(resource)
else:
simple.append(resource)
groups.sort()
resources_list = groups
if not bulk_deletion:
composed.sort()
simple.sort()
resources_list.extend(composed)
resources_list.extend(simple)
for resource in resources_list:
def __init__(self, model, api=None):
self.api = get_api_connection(api)
resource_id, model = extract_id(model, api)
resource_type = get_resource_type(resource_id)
kwargs = {"api": self.api}
local_model = COMPONENT_CLASSES[resource_type](model, **kwargs)
self.__class__.__bases__ = local_model.__class__.__bases__
for attr, value in local_model.__dict__.items():
setattr(self, attr, value)
self.local_model = local_model