Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# not the first time we treat this timeserie.
if need_rewrite:
for key in existing_keys:
if agg_oldest_values['prev_oldest_mutable_key'] <= key:
if key >= agg_oldest_values['oldest_mutable_key']:
break
LOG.debug(
"Compressing previous split %s (%s) for "
"metric %s", key, aggregation.method,
metric)
# NOTE(jd) Rewrite it entirely for fun (and
# later for compression). For that, we just
# pass an empty split.
keys_and_split_to_store[
(key, aggregation)] = (
carbonara.AggregatedTimeSerie(
aggregation)
)
for key, split in ts.split():
if key >= oldest_key_to_keep:
LOG.debug(
"Storing split %s (%s) for metric %s",
key, aggregation.method, metric)
keys_and_split_to_store[(key, aggregation)] = split
return (deleted_keys, keys_and_split_to_store)
def __eq__(self, other):
return (isinstance(other, AggregatedTimeSerie)
and super(AggregatedTimeSerie, self).__eq__(other)
and self.aggregation == other.aggregation)
results = collections.defaultdict(
lambda: collections.defaultdict(list))
for metric, aggregations_and_raws in six.iteritems(raw_measures):
for aggregation, raws in six.iteritems(aggregations_and_raws):
for key, raw in six.moves.zip(
metrics_aggregations_keys[metric][aggregation], raws):
try:
ts = carbonara.AggregatedTimeSerie.unserialize(
raw, key, aggregation)
except carbonara.InvalidData:
LOG.error("Data corruption detected for %s "
"aggregated `%s' timeserie, granularity "
"`%s' around time `%s', ignoring.",
metric.id, aggregation.method, key.sampling,
key)
ts = carbonara.AggregatedTimeSerie(aggregation)
results[metric][aggregation].append(ts)
return results
def __init__(self, aggregation, ts=None):
"""A time serie that is downsampled.
Used to represent the downsampled timeserie for a single
granularity/aggregation-function pair stored for a metric.
"""
super(AggregatedTimeSerie, self).__init__(ts)
self.aggregation = aggregation
def handle_resample(agg, granularity, timestamps, values, is_aggregated,
references, sampling):
# TODO(sileht): make a more optimised version that
# compute the data across the whole matrix
new_values = None
result_timestamps = timestamps
for ts in values.T:
ts = carbonara.AggregatedTimeSerie.from_data(
carbonara.Aggregation(agg, None, None),
timestamps, ts)
ts = ts.resample(sampling)
result_timestamps = ts["timestamps"]
if new_values is None:
new_values = numpy.array([ts["values"]])
else:
new_values = numpy.concatenate((new_values, [ts["values"]]))
return sampling, result_timestamps, new_values.T, is_aggregated
def _get_measures_and_unserialize(self, metric, key, aggregation):
data = self._get_measures(metric, key, aggregation)
try:
return carbonara.AggregatedTimeSerie.unserialize(
data, key, aggregation)
except carbonara.InvalidData:
LOG.error("Data corruption detected for %s "
"aggregated `%s' timeserie, granularity `%s' "
"around time `%s', ignoring.",
metric.id, aggregation, key.sampling, key)
def _get_measures_timeserie(storage, ref, granularity, *args, **kwargs):
agg = ref.metric.archive_policy.get_aggregation(
ref.aggregation, granularity)
try:
data = storage.get_aggregated_measures(
{ref.metric: [agg]},
*args, **kwargs)[ref.metric][agg]
except gnocchi_storage.MetricDoesNotExist:
data = carbonara.AggregatedTimeSerie(
carbonara.Aggregation(ref.aggregation, granularity, None))
return (ref, data)
def resample(self, sampling):
return AggregatedTimeSerie.from_grouped_serie(
self.group_serie(sampling),
Aggregation(self.aggregation.method, sampling,
self.aggregation.timespan))
def __eq__(self, other):
return (isinstance(other, AggregatedTimeSerie)
and super(AggregatedTimeSerie, self).__eq__(other)
and self.aggregation == other.aggregation)