Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def formatMetricRowProxy(metricObj):
if metricObj.tag_name is not None and len(metricObj.tag_name) > 0:
displayName = "%s (%s)" % (metricObj.tag_name, metricObj.server)
else:
displayName = metricObj.server
if (hasattr(metricObj, "parameters") and
isinstance(metricObj.parameters, basestring)):
parameters = json.loads(metricObj.parameters)
else:
parameters = metricObj.parameters
engine = repository.engineFactory()
allowedKeys = set([col.name for col in getMetricDisplayFields(engine)])
metricDict = dict((col, getattr(metricObj, col))
for col in metricObj.keys()
if col in allowedKeys)
metricDict["display_name"] = displayName
metricDict["parameters"] = parameters
return metricDict
""" Inspect all inbound model results in a batch for anomaly thresholds and
trigger notifications where applicable.
:param amqp.messages.ConsumerMessage message: ``message.body`` is a
serialized batch of model inference results generated in
``AnomalyService`` and must be deserialized using
``AnomalyService.deserializeModelResult()``. The message conforms to
htmengine/runtime/json_schema/model_inference_results_msg_schema.json
"""
if message.properties.headers and "dataType" in message.properties.headers:
# Not a model inference result
message.ack()
return
grok.app.config.loadConfig() # reload config on every batch
engine = repository.engineFactory()
# Cache minimum threshold to trigger any notification to avoid permuting
# settings x metricDataRows
try:
try:
batch = AnomalyService.deserializeModelResult(message.body)
except Exception:
self._log.exception("Error deserializing model result")
raise
# Load all settings for all users (once per incoming batch)
with engine.connect() as conn:
settings = repository.retryOnTransientErrors(
repository.getAllNotificationSettings)(conn)
self._log.debug("settings: %r" % settings)
"message": null,
"parameters": {
"InstanceId": "i-12345678",
"region": "us-west-2"
},
"uid": "0b6b97022fdb4134936aae92aa67393b"
},
...
]
"""
try:
self.addStandardHeaders()
engine = repository.engineFactory()
with engine.connect() as conn:
modelIterator = repository.getAllMetrics(conn, fields=getMetricDisplayFields(conn))
displayValuesMap = repository.getMetricIdsSortedByDisplayValue(conn, period)
# Keep track of the largest model display value for each server
serverValues = defaultdict(float)
modelsList = []
for model in modelIterator:
val = displayValuesMap.get(model.uid)
if val is not None:
serverValues[model.server] = max(float(val),
serverValues[model.server])
modelsList.append(convertMetricRowToMetricDict(model))
def GET(self, autostackId, *args): # pylint: disable=C0103,W0613
"""
Get Metrics associated with autostack
::
GET /_autostacks/{autostackId}/metrics
NOTE: args is ignored. Function signature for all method handlers must
be compatible with the regexp pattern that matches. POST optionally
takes a second argument, DELETE requires it.
"""
try:
self.addStandardHeaders()
engine = repository.engineFactory()
metricRows = repository.getAutostackMetrics(engine,
autostackId,
getMetricDisplayFields(engine))
metricsList = [convertMetricRowToMetricDict(metricRow)
for metricRow in metricRows]
return utils.jsonEncode(metricsList)
except ObjectNotFoundError:
raise web.notfound("Autostack not found: Autostack ID: %s" % autostackId)
except web.HTTPError as ex:
if bool(re.match(r"([45][0-9][0-9])\s?", web.ctx.status)):
# Log 400-599 status codes as errors, ignoring 200-399
log.error(str(ex) or repr(ex))
raise
except Exception as ex:
# replicated into and used by forked child processes (e.g., the same MySQL
# connection socket file descriptor used by multiple processes). And we
# can't take advantage of the process Pool's maxtasksperchild feature
# either (for the same reason)
self._log.info("Starting grok Metric Collector")
resultsQueue = multiprocessing.Manager().JoinableQueue()
recvPipe, sendPipe = multiprocessing.Pipe(False)
processPool = multiprocessing.Pool(
processes=self._WORKER_PROCESS_POOL_SIZE,
maxtasksperchild=None)
try:
with ModelSwapperInterface() as modelSwapper:
engine = repository.engineFactory()
while True:
startTime = time.time()
if startTime > self._nextCacheGarbageCollectionTime:
# TODO: unit-test
self._garbageCollectInfoCache()
# Determine which metrics are due for an update
metricsToUpdate = self._getCandidateMetrics(engine)
filterDuration = time.time() - startTime
if not metricsToUpdate:
time.sleep(self._NO_PENDING_METRICS_SLEEP_SEC)
continue
def run(self):
with ModelSwapperInterface() as modelSwapper:
engine = repository.engineFactory()
while True:
with engine.connect() as conn:
pendingStacks = repository.retryOnTransientErrors(
repository.getAutostackMetricsPendingDataCollection)(conn)
if not pendingStacks:
time.sleep(self._NOTHING_READY_SLEEP_TIME_SEC)
continue
# Build a sequence of autostack metric requests
requests = []
for autostack, metrics in pendingStacks:
refBase = len(requests)
requests.extend(
AutostackMetricRequest(refID=refBase + i,
autostack=autostack,
# parameters
metricFields = [schema.metric.c.uid,
schema.metric.c.datasource,
schema.metric.c.name,
schema.metric.c.description,
schema.metric.c.server,
schema.metric.c.location,
schema.metric.c.parameters,
schema.metric.c.status,
schema.metric.c.message,
schema.metric.c.last_timestamp,
schema.metric.c.poll_interval,
schema.metric.c.tag_name,
schema.metric.c.last_rowid]
with repository.engineFactory().connect() as conn:
metricRow = repository.getMetric(conn,
uid,
metricFields)
metric = dict([(col.name, utils.jsonDecode(getattr(metricRow, col.name))
if col.name == "parameters"
else getattr(metricRow, col.name))
for col in metricFields])
if metric["tag_name"]:
metric["display_name"] = "%s (%s)" % (metric["tag_name"],
metric["server"])
else:
metric["display_name"] = metric["server"]
inputData["metric"] = utils.jsonEncode(metric)
metricPath = os.path.join(path, "metric.json")
:type metric: TODO
:returns: metric statistics
:rtype: dict {"min": minVal, "max": maxVal}
:raises: ValueError if the metric doesn't not belong to an Autostack
:raises: grok.app.exceptions.ObjectNotFoundError if the metric or the
corresponding autostack doesn't exist; this may happen if it got deleted
by another process in the meantime.
:raises: grok.app.exceptions.MetricStatisticsNotReadyError if there are no or
insufficent samples at this time; this may also happen if the metric and
its data were deleted by another process in the meantime
"""
engine = repository.engineFactory()
if metric.datasource != "autostack":
raise ValueError(
"Metric must belong to an Autostack but has datasource=%r"
% metric.datasource)
metricGetter = EC2InstanceMetricGetter()
try:
with engine.connect() as conn:
autostack = repository.getAutostackFromMetric(conn, metric.uid)
instanceMetricList = metricGetter.collectMetricStatistics(autostack, metric)
finally:
metricGetter.close()
n = 0
mins = 0.0
maxs = 0.0