Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_get_libraries_no_data_raises_exception_tzinfo_given(toplevel_tickstore, arctic):
tzinfo = mktz('Asia/Chongqing')
date_range = DateRange(start=dt(2009, 1, 1, tzinfo=tzinfo),
end=dt(2010, 12, 31, 23, 59, 59, 999000, tzinfo=tzinfo))
with pytest.raises(NoDataFoundException):
toplevel_tickstore._get_libraries(date_range)
current_start = dt(1970, 1, 1, 0, 0, tzinfo=date_range.end.tzinfo)
else:
current_start = dt(1970, 1, 1, 0, 0)
if date_range.end is None or current_start < date_range.end:
name = self.get_name()
db_name, tick_type = name.split('.', 1)
current_lib = "{}_current.{}".format(db_name, tick_type)
try:
rtn.append(TickStoreLibrary(self._arctic_lib.arctic[current_lib],
DateRange(current_start, None, OPEN_OPEN)))
except LibraryNotFoundException:
pass # No '_current', move on.
if not rtn:
raise NoDataFoundException("No underlying libraries exist for the given date range")
return rtn
def get_info(self, symbol):
"""
Returns information about the symbol, in a dictionary
Parameters
----------
symbol: str
the symbol for the given item in the DB
Returns
-------
dictionary
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
ret = {}
ret['chunk_count'] = sym[CHUNK_COUNT]
ret['len'] = sym[LEN]
ret['appended_rows'] = sym[APPEND_COUNT]
ret['metadata'] = sym[METADATA] if METADATA in sym else None
ret['chunker'] = sym[CHUNKER]
ret['chunk_size'] = sym[CHUNK_SIZE] if CHUNK_SIZE in sym else 0
ret['serializer'] = sym[SERIALIZER]
return ret
pass
# Find the end bound
if date_range.end:
# If we have an end, we are only interested in the chunks that start before the end.
assert date_range.end.tzinfo
last_dt = date_range.end
else:
logger.info("No end provided. Loading a month for: {}:{}".format(symbol, first_dt))
if not first_dt:
first_doc = self._collection.find_one(self._symbol_query(symbol),
projection={START: 1, ID: 0},
sort=[(START, pymongo.ASCENDING)])
if not first_doc:
raise NoDataFoundException()
first_dt = first_doc[START]
last_dt = first_dt + timedelta(days=30)
if last_dt:
start_range['$lte'] = last_dt
# Return chunks in the specified range
if not start_range:
return {}
return {START: start_range}
all other args:
Will be passed into the initial read
'''
self._version_store = version_store
self._symbol = symbol
self._user = user
self._log = log
self._audit = audit
logger.info("MT: {}@{}: [{}] {}: {}".format(_get_host(version_store).get('l'),
_get_host(version_store).get('mhost'),
user, log, symbol)
)
try:
self.base_ts = self._version_store.read(self._symbol, *args, **kwargs)
except NoDataFoundException:
versions = [x['version'] for x in self._version_store.list_versions(self._symbol, latest_only=True)]
versions.append(0)
self.base_ts = VersionedItem(symbol=self._symbol, library=None,
version=versions[0], metadata=None, data=None)
except OperationFailure:
#TODO: Current errors in mongo "Incorrect Number of Segments Returned"
# This workaround should be removed once underlying problem is resolved.
self.base_ts = self._version_store.read_metadata(symbol=self._symbol)
if modify_timeseries is not None and not are_equals(modify_timeseries, self.base_ts.data):
raise ConcurrentModificationException()
self._do_write = False
String list of symbols in the library
"""
query = {}
if regex is not None:
query['symbol'] = {'$regex': regex}
if kwargs:
for k, v in six.iteritems(kwargs):
# TODO: this doesn't work as expected as it ignores the versions with metadata.deleted set
# as a result it will return symbols with matching metadata which have been deleted
# Maybe better add a match step in the pipeline instead of making it part of the query
query['metadata.' + k] = v
if snapshot is not None:
try:
query['parent'] = self._snapshots.find_one({'name': snapshot})['_id']
except TypeError:
raise NoDataFoundException('No snapshot %s in library %s' % (snapshot, self._arctic_lib.get_name()))
elif all_symbols:
return self._versions.find(query).distinct('symbol')
# Return just the symbols which aren't deleted in the 'trunk' of this library
pipeline = []
if query:
# Match based on user criteria first
pipeline.append({'$match': query})
pipeline.extend([
# version_custom value is: 2*version + (0 if deleted else 1)
# This is used to optimize aggregation query:
# - avoid sorting
# - be able to rely on the latest version (max) for the deleted status
#
# Be aware of that if you don't use custom sort or if use a sort before $group which utilizes
# exactly an existing index, the $group will do best effort to utilize this index:
Returns a generator that accesses each chunk in descending order
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
for chunk in list(self.get_chunk_ranges(symbol, chunk_range=chunk_range, reverse=True)):
yield self.read(symbol, chunk_range=c.to_range(chunk[0], chunk[1]), **kwargs)
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
reverse: boolean
return the chunk ranges in reverse order
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
# all symbols have a segment 0
spec = {SYMBOL: symbol, SEGMENT: 0}
if chunk_range is not None:
spec.update(CHUNKER_MAP[sym[CHUNKER]].to_mongo(chunk_range))
for x in self._collection.find(spec,
projection=[START, END],
sort=[(START, pymongo.ASCENDING if not reverse else pymongo.DESCENDING)]):
yield (c.chunk_to_str(x[START]), c.chunk_to_str(x[END]))
dictionary of metadata to persist along with the symbol
prune_previous_version : `bool`
Removes previous (non-snapshotted) versions from the database.
Default: True
kwargs :
passed through to the write handler (only used if symbol does not already exist or is deleted)
Returns
-------
`VersionedItem`
VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
"""
# Make a normal write with empty data and supplied metadata if symbol does not exist
try:
previous_version = self._read_metadata(symbol)
except NoDataFoundException:
return self.write(symbol, data=None, metadata=metadata,
prune_previous_version=prune_previous_version, **kwargs)
# Reaching here means that and/or metadata exist and we are set to update the metadata
new_version_num = self._version_nums.find_one_and_update({'symbol': symbol},
{'$inc': {'version': 1}},
upsert=True, new=True)['version']
# Populate the new version entry, preserving existing data, and updating with the supplied metadata
version = {k: previous_version[k] for k in previous_version.keys() if k != 'parent'} # don't copy snapshots
version['_id'] = bson.ObjectId()
version['version'] = new_version_num
version['metadata'] = metadata
version['base_version_id'] = previous_version.get('base_version_id', previous_version['_id'])
return self._add_new_version_using_reference(symbol, version, previous_version, prune_previous_version)
the data to append
upsert:
write data if symbol does not exist
metadata: ?
optional per symbol metadata
audit: dict
optional audit information
kwargs:
passed to write if upsert is true and symbol does not exist
"""
sym = self._get_symbol_info(symbol)
if not sym:
if upsert:
return self.write(symbol, item, metadata=metadata, audit=audit, **kwargs)
else:
raise NoDataFoundException("Symbol does not exist.")
if audit is not None:
audit['symbol'] = symbol
audit['action'] = 'append'
self.__update(sym, item, metadata=metadata, combine_method=SER_MAP[sym[SERIALIZER]].combine, audit=audit)