Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
enforce("update archive policy", ap)
body = deserialize_and_validate(voluptuous.Schema({
voluptuous.Required("definition"): ArchivePolicyDefinitionSchema,
}))
# Validate the data
try:
ap_items = [archive_policy.ArchivePolicyItem(**item) for item in
body['definition']]
except ValueError as e:
abort(400, six.text_type(e))
try:
return pecan.request.indexer.update_archive_policy(
self.archive_policy, ap_items)
except indexer.UnsupportedArchivePolicyChange as e:
abort(400, six.text_type(e))
def post(self, start=None, stop=None, aggregation='mean',
reaggregation=None, granularity=None, needed_overlap=100.0,
groupby=None, fill=None, refresh=False, resample=None,
transform=None):
# First, set groupby in the right format: a sorted list of unique
# strings.
groupby = sorted(set(arg_to_list(groupby)))
# NOTE(jd) Sort by groupby so we are sure we do not return multiple
# groups when using itertools.groupby later.
try:
resources = SearchResourceTypeController(
self.resource_type)._search(sort=groupby)
except indexer.InvalidPagination:
abort(400, "Invalid groupby attribute")
except indexer.IndexerException as e:
abort(400, e)
if resources is None:
return []
if not groupby:
metrics = list(filter(None,
(r.get_metric(self.metric_name)
for r in resources)))
return AggregationController.get_cross_metric_measures_from_objs(
metrics, start, stop, aggregation, reaggregation,
granularity, needed_overlap, fill, refresh, resample,
transform)
def delete(self):
# NOTE(jd) I don't think there's any point in fetching and passing the
# archive policy here, as the rule is probably checking the actual role
# of the user, not the content of the AP.
enforce("delete archive policy", {})
try:
pecan.request.indexer.delete_archive_policy(self.archive_policy)
except indexer.NoSuchArchivePolicy as e:
abort(404, e)
except indexer.ArchivePolicyInUse as e:
abort(400, e)
# since we expect any user input on this function. If the
# caller screws it, it's its problem: no need to convert
# the exception to another type.
policy_f = QueryTransformer.build_filter(
engine.dialect.name,
Metric, policy_filter)
else:
policy_f = None
if resource_policy_filter:
q = q.join(Metric.resource)
try:
resource_policy_f = QueryTransformer.build_filter(
engine.dialect.name,
Resource,
resource_policy_filter)
except indexer.QueryAttributeError as e:
# NOTE(jd) The QueryAttributeError does not know about
# resource_type, so convert it
raise indexer.ResourceAttributeError("generic",
e.attribute)
else:
resource_policy_f = None
if policy_filter or resource_policy_filter:
q = q.filter(sqlalchemy.or_(policy_f, resource_policy_f))
sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id'])
if marker:
metric_marker = self.list_metrics(
attribute_filter={"in": {"id": [marker]}})
if metric_marker:
"aggregation_methods",
default=list(conf.archive_policy.default_aggregation_methods)):
valid_agg_methods,
voluptuous.Required("definition"): ArchivePolicyDefinitionSchema,
})
body = deserialize_and_validate(ArchivePolicySchema)
# Validate the data
try:
ap = archive_policy.ArchivePolicy.from_dict(body)
except ValueError as e:
abort(400, six.text_type(e))
enforce("create archive policy", ap)
try:
ap = pecan.request.indexer.create_archive_policy(ap)
except indexer.ArchivePolicyAlreadyExists as e:
abort(409, six.text_type(e))
location = "/archive_policy/" + ap.name
set_resp_location_hdr(location)
pecan.response.status = 201
return ap
def __init__(self, conf):
self.conf = conf
self.incoming = incoming.get_driver(self.conf)
self.indexer = indexer.get_driver(self.conf)
try:
self.indexer.create_resource('generic',
self.conf.statsd.resource_id,
self.conf.statsd.creator)
except indexer.ResourceAlreadyExists:
LOG.debug("Resource %s already exists",
self.conf.statsd.resource_id)
else:
LOG.info("Created resource %s", self.conf.statsd.resource_id)
self.gauges = {}
self.counters = {}
self.times = {}
self.metrics = {
metric.name: metric
for metric
in self.indexer.get_resource('generic',
self.conf.statsd.resource_id,
with_metrics=True).metrics
}
if attr_filter:
attr_filter = {"and": [
policy_filter,
attr_filter
]}
else:
attr_filter = policy_filter
groupby = sorted(set(api.arg_to_list(groupby)))
sorts = groupby if groupby else api.RESOURCE_DEFAULT_PAGINATION
try:
resources = pecan.request.indexer.list_resources(
body["resource_type"],
attribute_filter=attr_filter,
sorts=sorts)
except indexer.IndexerException as e:
api.abort(400, six.text_type(e))
if not groupby:
try:
return self._get_measures_by_name(
resources, references, body["operations"], start, stop,
granularity, needed_overlap, fill, details=details)
except indexer.NoSuchMetric as e:
api.abort(400, e)
def groupper(r):
return tuple((attr, r[attr]) for attr in groupby)
results = []
for key, resources in itertools.groupby(resources, groupper):
try:
results.append({
def delete(self):
# NOTE(jd) I don't think there's any point in fetching and passing the
# archive policy rule here, as the rule is probably checking the actual
# role of the user, not the content of the AP rule.
enforce("delete archive policy rule", {})
try:
pecan.request.indexer.delete_archive_policy_rule(
self.archive_policy_rule.name
)
except indexer.NoSuchArchivePolicyRule as e:
abort(404, six.text_type(e))
def upgrade():
op.create_table('resource',
sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False),
sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('started_at', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False),
sa.Column('revision_start', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=False),
sa.Column('ended_at', gnocchi.indexer.sqlalchemy_types.PreciseTimestamp(), nullable=True),
sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True),
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_index('ix_resource_id', 'resource', ['id'], unique=False)
op.create_table('archive_policy',
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('back_window', sa.Integer(), nullable=False),
sa.Column('definition', gnocchi.indexer.sqlalchemy_base.ArchivePolicyDefinitionType(), nullable=False),
sa.Column('aggregation_methods', gnocchi.indexer.sqlalchemy_base.SetType(), nullable=False),
sa.PrimaryKeyConstraint('name'),
def post(self):
enforce("create archive policy rule", {})
ArchivePolicyRuleSchema = voluptuous.Schema({
voluptuous.Required("name"): six.text_type,
voluptuous.Required("metric_pattern"): six.text_type,
voluptuous.Required("archive_policy_name"): six.text_type,
})
body = deserialize_and_validate(ArchivePolicyRuleSchema)
enforce("create archive policy rule", body)
try:
ap = pecan.request.indexer.create_archive_policy_rule(
body['name'], body['metric_pattern'],
body['archive_policy_name']
)
except indexer.ArchivePolicyRuleAlreadyExists as e:
abort(409, e)
location = "/archive_policy_rule/" + ap.name
set_resp_location_hdr(location)
pecan.response.status = 201
return ap