Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if to_timestamp:
to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
to_timestamp, granularity)
timeseries = list(filter(
lambda x: x is not None,
self._map_in_thread(
self._get_measures_and_unserialize,
((metric, key, aggregation)
for key in sorted(all_keys)
if ((not from_timestamp or key >= from_timestamp)
and (not to_timestamp or key <= to_timestamp))))
))
return carbonara.AggregatedTimeSerie.from_timeseries(
sampling=granularity,
aggregation_method=aggregation,
timeseries=timeseries,
max_size=points)
metric, aggregation, granularity)
except storage.MetricDoesNotExist:
for d in metric.archive_policy.definition:
if d.granularity == granularity:
return carbonara.AggregatedTimeSerie(
sampling=granularity,
aggregation_method=aggregation,
max_size=d.points)
raise storage.GranularityDoesNotExist(metric, granularity)
if from_timestamp:
from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
from_timestamp, granularity)
if to_timestamp:
to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
to_timestamp, granularity)
timeseries = list(filter(
lambda x: x is not None,
self._map_in_thread(
self._get_measures_and_unserialize,
((metric, key, aggregation)
for key in sorted(all_keys)
if ((not from_timestamp or key >= from_timestamp)
and (not to_timestamp or key <= to_timestamp))))
))
return carbonara.AggregatedTimeSerie.from_timeseries(
sampling=granularity,
aggregation_method=aggregation,
timeseries=timeseries,
# Find the number of point
for d in metric.archive_policy.definition:
if d.granularity == granularity:
points = d.points
break
else:
raise storage.GranularityDoesNotExist(metric, granularity)
all_keys = None
try:
all_keys = self._list_split_keys_for_metric(
metric, aggregation, granularity)
except storage.MetricDoesNotExist:
for d in metric.archive_policy.definition:
if d.granularity == granularity:
return carbonara.AggregatedTimeSerie(
sampling=granularity,
aggregation_method=aggregation,
max_size=d.points)
raise storage.GranularityDoesNotExist(metric, granularity)
if from_timestamp:
from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
from_timestamp, granularity)
if to_timestamp:
to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
to_timestamp, granularity)
timeseries = list(filter(
lambda x: x is not None,
self._map_in_thread(
def get_aggregations_for_method(self, method):
"""Return a list of aggregation for a method.
List is sorted by granularity, desc.
:param method: Aggregation method.
"""
return [carbonara.Aggregation(method, d.granularity, d.timespan)
for d in sorted(self.definition,
key=ATTRGETTER_GRANULARITY, reverse=True)]
def get_aggregation(self, method, granularity):
# Find the timespan
for d in self.definition:
if d.granularity == granularity:
return carbonara.Aggregation(
method, d.granularity, d.timespan)
be retrieved, returns None.
"""
with utils.StopWatch() as sw:
raw_measures = (
self._get_unaggregated_timeserie(
metric)
)
if not raw_measures:
return
LOG.debug(
"Retrieve unaggregated measures "
"for %s in %.2fs",
metric.id, sw.elapsed())
try:
return carbonara.BoundTimeSerie.unserialize(
raw_measures, block_size, back_window)
except carbonara.InvalidData:
raise CorruptionError(
"Data corruption detected for %s "
"unaggregated timeserie" % metric.id)
of {Aggregation: [SplitKey]} to
retrieve.
:return: A dict where keys are `storage.Metric` and values are dict
{aggregation: [`carbonara.AggregatedTimeSerie`]}.
"""
raw_measures = self._get_splits(metrics_aggregations_keys)
results = collections.defaultdict(
lambda: collections.defaultdict(list))
for metric, aggregations_and_raws in six.iteritems(raw_measures):
for aggregation, raws in six.iteritems(aggregations_and_raws):
for key, raw in six.moves.zip(
metrics_aggregations_keys[metric][aggregation], raws):
try:
ts = carbonara.AggregatedTimeSerie.unserialize(
raw, key, aggregation)
except carbonara.InvalidData:
LOG.error("Data corruption detected for %s "
"aggregated `%s' timeserie, granularity "
"`%s' around time `%s', ignoring.",
metric.id, aggregation.method, key.sampling,
key)
ts = carbonara.AggregatedTimeSerie(aggregation)
results[metric][aggregation].append(ts)
return results
def handle_resample(agg, granularity, timestamps, values, is_aggregated,
references, sampling):
# TODO(sileht): make a more optimised version that
# compute the data across the whole matrix
new_values = None
result_timestamps = timestamps
for ts in values.T:
ts = carbonara.AggregatedTimeSerie.from_data(
carbonara.Aggregation(agg, None, None),
timestamps, ts)
ts = ts.resample(sampling)
result_timestamps = ts["timestamps"]
if new_values is None:
new_values = numpy.array([ts["values"]])
else:
new_values = numpy.concatenate((new_values, [ts["values"]]))
return sampling, result_timestamps, new_values.T, is_aggregated