Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def DBConnect(self):
self.infile_items = bsddb3.btopen(self.db_file, "c")
self.db_file_opened = True
prev_id = nodes[prev_id][0]
append_successors(nodes, prev_id, affected_nodes)
sys.stdout.write('done. [found %d node(s)]\n' % len(affected_nodes))
for id in affected_nodes:
sys.stdout.write(' -- %s\n' % id)
# Now, the hard part. We need to find every directory listing
# that contains one of our to-be-purge nodes, and then remove
# those nodes from the entries list.
dirlists = []
sys.stdout.write('-- Fixing affected directory entries lists... ')
sys.stdout.flush()
strings_table = os.path.join(repos_path, 'db', 'strings')
strings_db = bsddb3.btopen(strings_table, 'w')
reps_table = os.path.join(repos_path, 'db', 'representations')
reps_db = bsddb3.btopen(reps_table, 'w')
dirs_fixed = 0
entries_fixed = 0
for key in nodes.keys():
value = nodes[key]
if value[1]:
node = nodes_db[key]
kill_count = fix_affected_dirlists(node, reps_db, strings_db,
affected_nodes, dirlists)
if kill_count:
sys.stdout.write(' -- %s\n' % key)
dirs_fixed = dirs_fixed + 1
entries_fixed = entries_fixed + kill_count
sys.stdout.write('done. [fixed %d entries in %d dirs]\n' \
% (entries_fixed, dirs_fixed))
sys.stdout.write('-- Removing deleted nodes... ')
def getCachedJournalDB(self, classname):
''' get the journal db, looking in our cache of databases for commit
'''
# get the database handle
db_name = 'journals.%s'%classname
if self.databases.has_key(db_name):
return self.databases[db_name]
else:
db = bsddb3.btopen(os.path.join(self.dir, db_name), 'c')
self.databases[db_name] = db
return db
"""Verify that the written cache is correct.
Perform some unit tests on the written data, such as reading it
back and verifying that it loads and has the entries we expect.
Args:
written_keys: a set of keys that should have been written to disk.
Returns:
boolean indicating success.
Raises:
EmptyMap: The cache being verified is empty.
"""
self.log.debug('verification started %s', self.temp_cache_filename)
db = bsddb3.btopen(self.temp_cache_filename, 'r')
# cast keys to a set for fast __contains__ lookup in the loop
# following
cache_keys = set(db)
db.close()
written_key_count = len(written_keys)
cache_key_count = len(cache_keys)
self.log.debug('%d written keys, %d cache keys', written_key_count,
cache_key_count)
if cache_key_count <= 0 and written_key_count > 0:
# We have an empty db, yet we expect that earlier we should have
# written more. Uncaught disk full or other error?
raise error.EmptyMap
# makedb creates new keys internally. we only care that all the keys
def opendb(self, name, mode):
'''Low-level database opener that gets around anydbm/dbm
eccentricities.
'''
# determine which DB wrote the class file
path = os.path.join(os.getcwd(), self.dir, name)
if not os.path.exists(path):
if __debug__:
self.config.logging.getLogger('hyperdb').debug("opendb bsddb3.open(%r, 'c')"%path)
return bsddb3.btopen(path, 'c')
# open the database with the correct module
if __debug__:
self.config.logging.getLogger('hyperdb').debug("opendb bsddb3.open(%r, %r)"%(path, mode))
return bsddb3.btopen(path, mode)
def load_table(i=conf.FN_I_TABLE, n=conf.FN_N_TABLE):
trunc=conf.TRUNC_LIMIT #This has to match the db
cur_path = os.path.dirname(os.path.abspath(__file__)) + "/../" + conf.DB_DIR + "/"
db_i_table = bsddb.btopen(cur_path + i, 'c')
db_n_table = bsddb.btopen(cur_path + n, 'c')
return db_i_table, db_n_table
def load_table(i=conf.FN_I_TABLE, n=conf.FN_N_TABLE):
trunc=conf.TRUNC_LIMIT #This has to match the db
cur_path = os.path.dirname(os.path.abspath(__file__)) + "/../" + conf.DB_DIR + "/"
db_i_table = bsddb.btopen(cur_path + i, 'c')
db_n_table = bsddb.btopen(cur_path + n, 'c')
return db_i_table, db_n_table
prev_id = node_id
while nodes[prev_id][0]:
prev_id = nodes[prev_id][0]
append_successors(nodes, prev_id, affected_nodes)
sys.stdout.write('done. [found %d node(s)]\n' % len(affected_nodes))
for id in affected_nodes:
sys.stdout.write(' -- %s\n' % id)
# Now, the hard part. We need to find every directory listing
# that contains one of our to-be-purge nodes, and then remove
# those nodes from the entries list.
dirlists = []
sys.stdout.write('-- Fixing affected directory entries lists... ')
sys.stdout.flush()
strings_table = os.path.join(repos_path, 'db', 'strings')
strings_db = bsddb3.btopen(strings_table, 'w')
reps_table = os.path.join(repos_path, 'db', 'representations')
reps_db = bsddb3.btopen(reps_table, 'w')
dirs_fixed = 0
entries_fixed = 0
for key in nodes.keys():
value = nodes[key]
if value[1]:
node = nodes_db[key]
kill_count = fix_affected_dirlists(node, reps_db, strings_db,
affected_nodes, dirlists)
if kill_count:
sys.stdout.write(' -- %s\n' % key)
dirs_fixed = dirs_fixed + 1
entries_fixed = entries_fixed + kill_count
sys.stdout.write('done. [fixed %d entries in %d dirs]\n' \
% (entries_fixed, dirs_fixed))
def getclassdb(self, classname, mode='r'):
''' grab a connection to the class db that will be used for
multiple actions
'''
path = os.path.join(os.getcwd(), self.dir, 'nodes.%s'%classname)
if os.path.exists(path):
return bsddb3.btopen(path, mode)
else:
return bsddb3.btopen(path, 'c')