Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
u"in": _operator_in,
u"like": lambda field, value: field.like(value),
}
multiple_operators = {
u"or": sqlalchemy.or_,
u"∨": sqlalchemy.or_,
u"and": sqlalchemy.and_,
u"∧": sqlalchemy.and_,
}
converters = (
(types.TimestampUTC, utils.to_datetime),
(sa_types.String, six.text_type),
(sa_types.Integer, int),
(sa_types.Numeric, float),
)
@classmethod
def _handle_multiple_op(cls, engine, table, op, nodes):
return op(*[
cls.build_filter(engine, table, node)
for node in nodes
])
@classmethod
def _handle_unary_op(cls, engine, table, op, node):
return op(cls.build_filter(engine, table, node))
def __ne__(self, other):
# neither total_ordering nor py2 sets ne as the opposite of eq
return self._compare(operator.ne, other)
def __hash__(self):
return hash(self.name)
class IncomingDriver(object):
MEASURE_PREFIX = "measure"
SACK_NAME_FORMAT = "incoming{total}-{number}"
CFG_PREFIX = 'gnocchi-config'
CFG_SACKS = 'sacks'
# NOTE(sileht): By default we use threads, but some driver can disable
# threads by setting this to utils.sequencial_map
MAP_METHOD = staticmethod(utils.parallel_map)
@property
def NUM_SACKS(self):
if not hasattr(self, '_num_sacks'):
try:
self._num_sacks = int(self._get_storage_sacks())
except Exception as e:
raise SackDetectionError(e)
return self._num_sacks
def __init__(self, conf, greedy=True):
self._sacks = None
def upgrade(self, num_sacks):
try:
self.NUM_SACKS
"""Calculates moving func of data with sampling width of window.
:param data: Series of timestamp, value pairs
:param func: the function to use when aggregating
:param window: (float) range of data to use in each aggregation.
:param min_grain: granularity of the data being passed in.
:param center: whether to index the aggregated values by the first
timestamp of the values picked up by the window or by the central
timestamp.
:param min_size: if the number of points in the window is less than
min_size, the aggregate is not computed and nan is returned for
that iteration.
"""
if center:
center = utils.strtobool(center)
def moving_window(x):
msec = numpy.timedelta64(1, 'ms')
zero = numpy.timedelta64(0, 's')
half_span = window / 2
start = utils.normalize_time(data.index[0])
stop = utils.normalize_time(data.index[-1] + min_grain)
# min_grain addition necessary since each bin of rolled-up data
# is indexed by leftmost timestamp of bin.
left = half_span if center else zero
right = 2 * half_span - left - msec
# msec subtraction is so we don't include right endpoint in slice.
x = utils.normalize_time(x)
def __init__(self, resource_type, id):
self._resource_type = resource_type
creator = pecan.request.auth_helper.get_current_user(
pecan.request)
try:
self.id = utils.ResourceUUID(id, creator)
except ValueError:
abort(404, six.text_type(indexer.NoSuchResource(id)))
self.metric = NamedMetricController(str(self.id), self._resource_type)
self.history = ResourceHistoryController(str(self.id),
self._resource_type)
def ResourceUUID(value, creator):
try:
return utils.ResourceUUID(value, creator)
except ValueError as e:
raise voluptuous.Invalid(e)
def _get_or_create_unaggregated_timeseries(self, metrics, version=3):
"""Get the unaggregated timeserie of metrics.
If the metrics does not exist, it is created.
:param metrics: A list of metrics.
:param version: The storage format version number.
"""
return dict(
six.moves.zip(
metrics,
self.MAP_METHOD(
utils.return_none_on_failure(
self._get_or_create_unaggregated_timeseries_unbatched),
((metric, version) for metric in metrics))))
retry_on_exception=utils.retry_if_retry_raised)
def _wait_points_exists(self, metric_id, where):
# NOTE(sileht): influxdb query returns even the data is not yet insert
# in the asked series, the work is done in an async fashion, so a
# immediate get_measures after an add_measures will not returns the
# just inserted data. perhaps related:
# https://github.com/influxdb/influxdb/issues/2450 This is a workaround
# to wait that data appear in influxdb...
if not self._block_until_data_ingested:
return
try:
result = self.influx.query("SELECT * FROM \"%(metric_id)s\" WHERE "
"%(where)s LIMIT 1" %
dict(metric_id=metric_id, where=where),
database=self.database)
except influxdb.client.InfluxDBClientError as e:
if "measurement not found" in e.content:
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from concurrent import futures
import daiquiri
import numpy
import six
from gnocchi import incoming
from gnocchi import utils
LOG = daiquiri.getLogger(__name__)
_NUM_WORKERS = utils.get_default_workers()
class CarbonaraBasedStorage(incoming.StorageDriver):
MEASURE_PREFIX = "measure"
SACK_PREFIX = "incoming"
CFG_PREFIX = 'gnocchi-config'
CFG_SACKS = 'sacks'
@property
def NUM_SACKS(self):
if not hasattr(self, '_num_sacks'):
try:
self._num_sacks = int(self.get_storage_sacks())
except Exception as e:
LOG.error('Unable to detect the number of storage sacks. '
'Ensure gnocchi-upgrade has been executed: %s', e)
def get_driver(conf):
"""Return configured incoming driver only
:param conf: incoming configuration only (not global)
"""
return utils.get_driver_class('gnocchi.incoming', conf.incoming)(
conf.incoming, conf.metricd.greedy)