Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, dbenv = None):
self.dbenv = dbenv
# how lame is bsddb3?
if self.dbenv:
self.db = dbobj.DB(dbenv)
else:
self.db = db.DB(None)
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
db.DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | db.DB_RECOVER
self.env = db.DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
if sys.version_info[0] >= 3 :
class cursor_py3k(object) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
def close(self) :
return self._dbcursor.close()
def convert_metadata(infile, config):
in_metadata = db.DB()
in_metadata.set_cachesize(4,0)
in_metadata.open(infile+"/metadata", flags=db.DB_RDONLY)
bloom_filter_size=int.from_bytes(in_metadata[b'bloom_filter_size'], 'big')
kmer_size=int.from_bytes(in_metadata[b'kmer_size'], 'big')
num_hashes=int.from_bytes(in_metadata[b'num_hashes'], 'big')
colours=pickle.loads(in_metadata[b'colours'])
num_samples=len(colours)
## Create the sample metadata
colour_sample={}
for colour in range(num_samples):
key="colour%i" % colour
key=key.encode("utf-8")
sample_name=in_metadata[key].decode('utf-8')
colour_sample[colour]=sample_name
## Add the sample metadata
def open_undodb(self):
if not self.readonly:
self.undolog = "%s.undo" % self.full_name
self.undodb = db.DB()
self.undodb.open(self.undolog, db.DB_RECNO, db.DB_CREATE)
def __init__(self, storage_config=None):
if storage_config is None:
storage_config = DEFAULT_BERKELEY_DB_STORAGE_CONFIG
self.storage_config = storage_config
self.storage = db.DB()
GB = 1024 * 1024 * 1024;
self.storage.set_cachesize(
int(storage_config.get("hashsize", 204800) / GB),
int(storage_config.get("hashsize", 204800) % GB))
self.storage.open(storage_config["filename"], None, db.DB_HASH, db.DB_CREATE)
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
db.DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | db.DB_RECOVER
self.env = db.DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
if sys.version_info[0] >= 3 :
class cursor_py3k(object) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
def close(self) :
return self._dbcursor.close()
# Undo is also impossible after batch transaction
self.undodb.clear()
self.env.txn_checkpoint()
if (self.secondary_connected and
not getattr(transaction, 'no_magic', False)):
# Disconnect unneeded secondary indices
self.surnames.close()
_db = db.DB(self.env)
try:
_db.remove(_mkname(self.full_name, SURNAMES), SURNAMES)
except db.DBNoSuchFileError:
pass
self.reference_map_referenced_map.close()
_db = db.DB(self.env)
try:
_db.remove(_mkname(self.full_name, REF_REF), REF_REF)
except db.DBNoSuchFileError:
pass
else:
self.bsddbtxn = BSDDBTxn(self.env)
self.txn = self.bsddbtxn.begin()
return transaction
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
db.DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | db.DB_RECOVER
self.env = db.DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
if sys.version_info[0] >= 3 :
class cursor_py3k(object) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
def close(self) :
return self._dbcursor.close()
## try one more time... this shouldn't be necessary...
# time.sleep(1)
## try:
# self.bdb.open(self.path+"/"+file,self.name,db.DB_BTREE,db.DB_RDONLY|db.DB_THREAD)
## except:
## raise Exception,"Cannot open database : %s"%self.path+"/"+file
self.isro = True
else:
try:
self.bdb.open(self.path + "/" + lfile, self.name, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD)
except db.DBInvalidArgError:
self.updateold(lfile, ro)
except:
try:
os.makedirs("%s/EMAN2DB" % self.path)
self.bdb = db.DB(self.dbenv)
self.bdb.open(self.path + "/" + lfile, self.name, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD)
except:
self.bdb = None
self.lock.release()
traceback.print_exc()
print("Unable to open read/write %s (%s/%s)" % (self.name, self.path, lfile))
return
# except:
## try one more time... this shouldn't be necessary...
# time.sleep(1)
# try:
# self.bdb.open(self.path+"/"+file,self.name,db.DB_BTREE,db.DB_CREATE|db.DB_THREAD)
# except:
# raise Exception,"Cannot create database : %s"%self.path+"/"+file
self.isro = False