Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# debug
print "sending message"
print '-' * 70
print message
# send!
sock.sendall(message)
time.sleep(2) # NB - allows file operations to complete
# check if data file was created
tagFile = os.path.join(temp_dir, "storage","whisper","folder", tag + ".wsp")
self.assertTrue(os.path.exists(tagFile))
print(whisper.fetch(tagFile, now - self.step*(num_data_points), now))
data_period_info, stored_data = whisper.fetch(tagFile, now - self.step*(num_data_points), now)
for whisper_data, sent_data in zip(reversed(stored_data), reversed(data)):
self.assertAlmostEquals(whisper_data, sent_data[1])
time.sleep(stime)
aggregated_data = aggregate(to_aggregate)
data.append( aggregated_data )
print(aggregated_data)
print('')
print('')
time.sleep(2) # NB - allows file operations to complete
tagFile = os.path.join(temp_dir, "storage","whisper","folder", tag + ".wsp")
self.assertTrue(os.path.exists(tagFile))
data_period_info, stored_data = whisper.fetch(tagFile, start-1, time.time())
print('Whisper data period : ' + str(data_period_info))
print('Whisper data : ' + str(stored_data))
print('Data expected: ' + str(data))
print len(stored_data)
print(zip(stored_data, data))
for whisper_data, sent_data in zip(stored_data, data)[:-1]: # :D
self.assertAlmostEquals(whisper_data, sent_data)
archive_step = 1
whisper.create(wsp, [(archive_step, archive_len)])
# given too many points than the db can hold
excess_len = 1
num_input_points = archive_len + excess_len
test_now = int(time.time())
input_start = test_now - num_input_points + archive_step
input_points = [(input_start + i, random.random() * 10)
for i in range(num_input_points)]
# when the db is updated with too many points
whisper.update_many(wsp, input_points, now=test_now)
# then only the most recent input points (those at the end) were written
actual_time_info = whisper.fetch(wsp, 0, now=test_now)[0]
self.assertEqual(actual_time_info,
(input_points[-archive_len][0],
input_points[-1][0] + archive_step, # untilInterval = newest + step
archive_step))
def test_heal_target_missing(self):
testdb = "test-%s" % self.db
try:
os.unlink(testdb)
except (IOError, OSError):
pass
self._removedb()
schema = [(1, 20)]
self._createdb(self.db, schema)
original_data = whisper.fetch(self.db, 0)
# This should log complaints but exit successfully as it cannot
# heal its target /dev/null
heal_metric(self.db, testdb)
data = whisper.fetch(testdb, 0)
self.assertEqual(original_data, data)
def test_update_single_archive(self):
"""
Update with a single leveled archive
"""
retention_schema = [(1, 20)]
data = self._update(schema=retention_schema)
# fetch the data
fetch = whisper.fetch(self.filename, 0) # all data
fetch_data = fetch[1]
for i, (timestamp, value) in enumerate(data):
# is value in the fetched data?
self.assertEqual(value, fetch_data[i])
# check TimestampNotCovered
with AssertRaisesException(
whisper.TimestampNotCovered(
'Timestamp not covered by any archives in this database.')):
# in the futur
whisper.update(self.filename, 1.337, time.time() + 1)
with AssertRaisesException(
whisper.TimestampNotCovered(
'Timestamp not covered by any archives in this database.')):
def test_heal_source_corrupt(self):
testdb = "/dev/null"
self._removedb()
schema = [(1, 20)]
self._createdb(self.db, schema)
original_data = whisper.fetch(self.db, 0)
# This should log complaints but exit successfully as it cannot
# read from the source /dev/null
heal_metric(testdb, self.db)
data = whisper.fetch(self.db, 0)
self.assertEqual(original_data, data)
pass
schema = [(1, 20)]
emptyData = []
self._createdb(self.db, schema)
self._createdb(testdb, schema, emptyData)
heal_metric(self.db, testdb)
original_data = whisper.fetch(self.db, 0)
filled_data = whisper.fetch(testdb, 0)
self.assertEqual(original_data, filled_data)
# Heal again, should still be equal
heal_metric(self.db, testdb)
filled_data = whisper.fetch(testdb, 0)
self.assertEqual(original_data, filled_data)
return
# we want to retain as much precision as we can, hence we do backwards
# walk in time
# skip forward at max 'step' points at a time
for archive in srcArchives:
# skip over archives that don't have any data points
rtime = time.time() - archive['retention']
if tstop <= rtime:
continue
untilTime = tstop
fromTime = rtime if rtime > tstart else tstart
(timeInfo, values) = whisper.fetch(src, fromTime, untilTime)
(start, end, archive_step) = timeInfo
pointsToWrite = list(filter(
lambda points: points[1] is not None,
zip(range(start, end, archive_step), values)))
# order points by timestamp, newest first
pointsToWrite.sort(key=lambda p: p[0], reverse=True)
whisper.update_many(dst, pointsToWrite)
tstop = fromTime
# can stop when there's nothing to fetch any more
if tstart == tstop:
return
def fill_archives(src, dst, startFrom):
header = whisper.info(dst)
archives = header['archives']
archives = sorted(archives, key=lambda t: t['retention'])
for archive in archives:
fromTime = time.time() - archive['retention']
if fromTime >= startFrom:
continue
(timeInfo, values) = whisper.fetch(dst, fromTime, startFrom)
(start, end, step) = timeInfo
gapstart = None
for v in values:
if not v and not gapstart:
gapstart = start
elif v and gapstart:
# ignore single units lost
if (start - gapstart) > archive['secondsPerPoint']:
fill(src, dst, gapstart - step, start)
gapstart = None
elif gapstart and start == end - step:
fill(src, dst, gapstart - step, start)
start += step
startFrom = fromTime