Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_num_entries_does_not_exist(self):
path_lmdb = os.path.join(self.dir_tmp, 'test_num_entries_does_not_exist_lmdb')
assert_false(os.path.exists(path_lmdb))
assert_raises(lmdb.Error, r.num_entries, path_lmdb)
def commit(self):
"""Commit main transaction."""
logger.debug('Committing transaction.')
try:
self.data_txn.commit()
except (AttributeError, lmdb.Error):
pass
try:
self.idx_txn.commit()
except (AttributeError, lmdb.Error):
pass
self.is_txn_rw = None
# we don't, making it hard to figure out how much memory we're actually
# using.
map_size = None
try:
map_size = os.path.getsize(self.path)
except OSError as e:
raise exceptions.FileFormatError(str(e)) from e
try:
store = zarr.LMDBStore(
self.path, map_size=map_size, readonly=True, subdir=False, lock=False
)
except lmdb.InvalidError as e:
raise exceptions.FileFormatError(
"Unknown file format:{}".format(str(e))
) from e
except lmdb.Error as e:
raise exceptions.FileFormatError(str(e)) from e
return store
def create_store(self, name):
if isinstance(name, unicode):
name = name.encode('utf-8')
try:
_db = self.database.open_db(name, dupsort=False, create=True)
store = Store(name, _db, self)
self.stores[name] = store
return store
except lmdb.Error as ex:
logger.exception(ex)
raise DataError(ex.message)
def __next__(self):
try:
return _read(
next(iterator),
cursor_chain,
deserializer)
except lmdb.Error:
raise StopIteration()
if self.env is None or self.extension == 'bin':
# db not available or embeddings in bin format, the embeddings should be available in memory (normally!)
return self.get_word_vector_in_memory(word)
try:
with self.env.begin() as txn:
txn = self.env.begin()
vector = txn.get(word.encode(encoding='UTF-8'))
if vector:
word_vector = _deserialize_pickle(vector)
vector = None
else:
word_vector = np.zeros((self.static_embed_size,), dtype=np.float32)
# alternatively, initialize with random negative values
#word_vector = np.random.uniform(low=-0.5, high=0.0, size=(self.embed_size,))
# alternatively use fasttext OOV ngram possibilities (if ngram available)
except lmdb.Error:
# no idea why, but we need to close and reopen the environment to avoid
# mdb_txn_begin: MDB_BAD_RSLOT: Invalid reuse of reader locktable slot
# when opening new transaction !
self.env.close()
envFilePath = os.path.join(self.embedding_lmdb_path, self.name)
self.env = lmdb.open(envFilePath, readonly=True, max_readers=2048, max_spare_txns=2, lock=False)
return self.get_word_vector(word)
return word_vector
with self.dbTrain.begin() as txnTrain, self.dbVal.begin() as txnVal:
self.lbl = self.cfg.labels
self.numLbl = len(self.lbl)
self.numTrain = self.dbTrain.stat()['entries']
self.numVal = self.dbVal.stat()['entries']
with txnTrain.cursor() as cursTrain, txnVal.cursor() as cursVal:
self.keysTrain = np.array([key for key, _ in cursTrain])
self.keysVal = np.array([key for key, _ in cursVal])
timg,_ = ImageTransformer2D.decodeLmdbItem2NNSampple(txnTrain.get(self.keysTrain[0]))
self.shapeImg = timg.shape
if parSizeBatch > 1:
self.sizeBatch = parSizeBatch
if scaleFactor > 0:
self.scaleFactor = scaleFactor
self.loadMeanProto()
except lmdb.Error as err:
self.pathDataDir = None
print 'LMDBReader.Error() : [%s]' % err
def loadFromTrainDir(self, pathTrainDir, parImgShape=None):
async def fini():
if self.uploader is not None:
try:
await self.finish()
except lmdb.Error:
# We're shutting down. Too late to commit.
pass
await self.uploader.fini()
self.doneevent.set()
def exists(self):
if not os.path.isdir(self.p):
raise lmdb.Error("LMDB not found (%s)")