Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_conditions(self):
"""
For a given query definition return the collection.find() simple query
Using all conditions, build the query as a dictionary suitable for
collection.find(). This uses MongoQueryOps to transform query
definitions into mongo db syntax.
:return: the query in mongo db syntax
"""
query = {}
qops = MongoQueryOps()
def addq(k, v):
if k not in query:
query[k] = v
else:
subq = []
query.setdefault("$and", subq)
for vv in [query.pop(k), v]:
if isinstance(vv, (list, tuple)):
subq.extend(vv)
else:
subq.append({k: vv})
for k, v in six.iteritems(self.conditions):
# transform query operators as '__',
# however preserve dunder '__' names ss columns
if '__' in k and not k.startswith('__'):
parts = k.split('__')
def create_index(self, keys, **kwargs):
"""
create and index the easy way
"""
keys, kwargs = MongoQueryOps().make_index(keys)
result = self.collection.create_index(keys, **kwargs)
return result
column_map = list(zip(obj.columns, stored_columns))
dtypes = {
dict(column_map).get(k): v.name
for k, v in iteritems(obj.dtypes)
}
kind_meta = {
'columns': column_map,
'dtypes': dtypes,
'idx_meta': idx_meta
}
# ensure column names to be strings
obj.columns = stored_columns
# create mongon indicies for data frame index columns
df_idxcols = [col for col in obj.columns if col.startswith('_idx')]
if df_idxcols:
keys, idx_kwargs = MongoQueryOps().make_index(df_idxcols)
collection.create_index(keys, **idx_kwargs)
# create index on row id
keys, idx_kwargs = MongoQueryOps().make_index(['_om#rowid'])
collection.create_index(keys, **idx_kwargs)
# bulk insert
# -- get native objects
# -- seems to be required since pymongo 3.3.x. if not converted
# pymongo raises Cannot Encode object for int64 types
obj = obj.astype('O')
fast_insert(obj, self, name)
kind = (MDREGISTRY.PANDAS_SEROWS
if store_series
else MDREGISTRY.PANDAS_DFROWS)
meta = self._make_metadata(name=name,
prefix=self.prefix,
bucket=self.bucket,
get all keys from a dictionary and nested dicts, as a flattened list
:param d: a dictionary
:param keys: previously found keys. internal use.
:returns: list of flattened keys
"""
keys = keys or []
keys.extend(list(d.keys()))
for sd in d.values():
if isinstance(sd, dict):
flatten_keys(sd, keys=keys)
return keys
# convenience accessors
x = MongoQueryOps()
d = dict
from __future__ import absolute_import
from .base import OmegaStore
from .query import MongoQ, Filter
from .queryops import MongoQueryOps, GeoJSON
qops = MongoQueryOps()
def __getattr__(self, k):
if k.upper().replace('_', '') in MongoQueryOps.UNARY:
return self.__unary(k.lower())
raise AttributeError('operator %s is not supported' % k)
def __unary(self, op):
for k, v in iteritems(obj.dtypes)
}
kind_meta = {
'columns': column_map,
'dtypes': dtypes,
'idx_meta': idx_meta
}
# ensure column names to be strings
obj.columns = stored_columns
# create mongon indicies for data frame index columns
df_idxcols = [col for col in obj.columns if col.startswith('_idx')]
if df_idxcols:
keys, idx_kwargs = MongoQueryOps().make_index(df_idxcols)
collection.create_index(keys, **idx_kwargs)
# create index on row id
keys, idx_kwargs = MongoQueryOps().make_index(['_om#rowid'])
collection.create_index(keys, **idx_kwargs)
# bulk insert
# -- get native objects
# -- seems to be required since pymongo 3.3.x. if not converted
# pymongo raises Cannot Encode object for int64 types
obj = obj.astype('O')
fast_insert(obj, self, name)
kind = (MDREGISTRY.PANDAS_SEROWS
if store_series
else MDREGISTRY.PANDAS_DFROWS)
meta = self._make_metadata(name=name,
prefix=self.prefix,
bucket=self.bucket,
kind=kind,
kind_meta=kind_meta,
attributes=attributes,