Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_versioned_item_str():
item = VersionedItem(symbol="sym",
library="ONEMINUTE",
data=pd.DataFrame(),
version=1.0,
metadata={'metadata': 'foo'})
expected = "VersionedItem(symbol=sym,library=ONEMINUTE," + \
"data=,version=1.0,metadata={'metadata': 'foo'}"
assert str(item) == expected
assert repr(item) == expected
def test_versioned_item_metadata_dict():
item = VersionedItem(symbol="test",
library="test_lib",
data=None,
version=1.2,
metadata=None)
assert(item.metadata_dict() == {'symbol': 'test', 'library': 'test_lib', 'version': 1.2})
handler = self._write_handler(version, symbol, data, **kwargs)
handler.write(self._arctic_lib, version, symbol, data, previous_version, **kwargs)
if prune_previous_version and previous_version:
self._prune_previous_versions(
symbol,
keep_mins=kwargs.get('keep_mins', 120),
new_version_shas=version.get(FW_POINTERS_REFS_KEY)
)
# Insert the new version into the version DB
self._insert_version(version)
logger.debug('Finished writing versions for %s', symbol)
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=version['version'],
metadata=version.pop('metadata', None), data=None,
host=self._arctic_lib.arctic.mongo_host)
# Check if in the meanwhile the reference version (based on which we updated incrementally) has been removed
last_look = self._versions.find_one({'_id': reference_version['_id']})
if last_look is None or last_look.get('deleted'):
# Revert the change
mongo_retry(self._versions.delete_one)({'_id': new_version['_id']})
# Indicate the failure
raise OperationFailure("Failed to write metadata for symbol %s. "
"The previous version (%s, %d) has been removed during the update" %
(symbol, str(reference_version['_id']), reference_version['version']))
if prune_previous_version and reference_version:
self._prune_previous_versions(symbol, new_version_shas=new_version.get(FW_POINTERS_REFS_KEY))
logger.debug('Finished updating versions with new metadata for %s', symbol)
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=new_version['version'],
metadata=new_version.get('metadata'), data=None,
host=self._arctic_lib.arctic.mongo_host)
previous_version = self._backing_store.read_version(self.library_name, symbol)
handler = self._write_handler(version, symbol, data, **kwargs)
handler.write(self._backing_store, self.library_name, version, symbol, data, previous_version, **kwargs)
#if prune_previous_version and previous_version:
# self._prune_previous_versions(symbol)
# self._publish_change(symbol, version)
# Insert the new version into the version DB
self._insert_version(version)
logger.debug('Finished writing versions for %s', symbol)
return VersionedItem(symbol=symbol, library=self.library_name, version=version['version'],
metadata=version.pop('metadata', None), data=None)
def _do_read(self, symbol, version, from_version=None, **kwargs):
if version.get('deleted'):
raise NoDataFoundException("No data found for %s in library %s" % (symbol, self._arctic_lib.get_name()))
handler = self._read_handler(version, symbol)
# We don't push the date_range check in the handler's code, since the "_with_strict_handler_match"
# value is configured on a per-library basis, and is part of the VersionStore instance.
if self._with_strict_handler_match and \
kwargs.get('date_range') and \
not self.handler_supports_read_option(handler, 'date_range'):
raise ArcticException("Date range arguments not supported by handler in %s" % symbol)
data = handler.read(self._arctic_lib, version, symbol, from_version=from_version, **kwargs)
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=version['version'],
metadata=version.pop('metadata', None), data=data,
host=self._arctic_lib.arctic.mongo_host)
_do_read_retry = mongo_retry(_do_read)
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
"""
_version = self._read_metadata(symbol, as_of=as_of, read_preference=self._read_preference(allow_secondary))
return VersionedItem(symbol=symbol, library=self._arctic_lib.get_name(), version=_version['version'],
metadata=_version.pop('metadata', None), data=None,
host=self._arctic_lib.arctic.mongo_host)
'''
self._version_store = version_store
self._symbol = symbol
self._user = user
self._log = log
self._audit = audit
logger.info("MT: {}@{}: [{}] {}: {}".format(_get_host(version_store).get('l'),
_get_host(version_store).get('mhost'),
user, log, symbol)
)
try:
self.base_ts = self._version_store.read(self._symbol, *args, **kwargs)
except NoDataFoundException:
versions = [x['version'] for x in self._version_store.list_versions(self._symbol, latest_only=True)]
versions.append(0)
self.base_ts = VersionedItem(symbol=self._symbol, library=None,
version=versions[0], metadata=None, data=None)
except OperationFailure:
#TODO: Current errors in mongo "Incorrect Number of Segments Returned"
# This workaround should be removed once underlying problem is resolved.
self.base_ts = self._version_store.read_metadata(symbol=self._symbol)
if modify_timeseries is not None and not are_equals(modify_timeseries, self.base_ts.data):
raise ConcurrentModificationException()
self._do_write = False