Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def query(self, query):
printable_char_list = []
for c in query.split('\n', 1)[0][:70]:
if c not in string.printable or c in '\t\x0b\x0c\r':
c = '\\x%02x' % ord(c)
printable_char_list.append(c)
logging.debug('querying %s...', ''.join(printable_char_list))
return self.conn.execute(query)
else:
# is fine because there's at least one other cell that is
# readable for this oid.
raise
with self._app.dm.replicated(partition):
previous_serial = self._app.dm.getLastObjectTID(oid)
# Locking before reporting a conflict would speed up the case of
# cascading conflict resolution by avoiding incremental resolution,
# assuming that the time to resolve a conflict is often constant:
# "C+A vs. B -> C+A+B" rarely costs more than "C+A vs. C+B -> C+A+B".
# However, this would be against the optimistic principle of ZODB.
if previous_serial is not None and previous_serial != serial:
assert serial < previous_serial, (serial, previous_serial)
logging.info('Conflict on %s:%s with %s',
dump(oid), dump(ttid), dump(previous_serial))
raise ConflictError(previous_serial)
logging.debug('Transaction %s locking %s', dump(ttid), dump(oid))
self._store_lock_dict[oid] = ttid
return True
((min_oid, min_serial), ) = params
ask(self._doAskObjectHistoryFrom(min_oid, min_serial, count))
if length != count:
action = CHECK_DONE
params = (next_params, )
if action == CHECK_CHUNK:
((min_oid, min_serial), count) = params
max_tid = replicator.getCurrentCriticalTID()
ask(self._doAskCheckSerialRange(min_oid, min_serial, max_tid, count))
if action == CHECK_DONE:
# Delete all objects we might have which are beyond what peer
# knows.
((last_oid, last_serial), ) = params
offset = replicator.getCurrentOffset()
max_tid = replicator.getCurrentCriticalTID()
neo.lib.logging.debug("Serial range checked (offset=%s, min_oid=%x,"
" min_serial=%x, length=%s, count=%s, max_oid=%x,"
" max_serial=%x, last_oid=%x, last_serial=%x, critical_tid=%x)",
offset, u64(min_oid), u64(min_serial), length, count,
u64(max_oid), u64(max_serial), u64(last_oid), u64(last_serial),
u64(max_tid))
app.dm.deleteObjectsAbove(app.pt.getPartitions(),
offset, last_oid, last_serial, max_tid)
# Nothing remains, so the replication for this partition is
# finished.
replicator.setReplicationDone()
def lock(self, ttid, uuid):
"""
Set that a node has locked the transaction.
If transaction is completely locked, calls function given at
instantiation time.
"""
logging.debug('Lock TXN %s for %s', dump(ttid), uuid_str(uuid))
if self[ttid].lock(uuid) and self._queue[0] == ttid:
# all storage are locked and we unlock the commit queue
self._unlockPending()
def begin(self, node, storage_readiness, tid=None):
"""
Generate a new TID
"""
if tid is None:
# No TID requested, generate a temporary one
tid = self._nextTID()
else:
# Use of specific TID requested, queue it immediately and update
# last TID.
self._queue.append(tid)
self.setLastTID(tid)
txn = self._ttid_dict[tid] = Transaction(node, storage_readiness, tid)
logging.debug('Begin %s', txn)
return tid
def connectionClosed(self, conn):
logging.debug('connection closed for %r', conn)
app = self.app
if app.operational:
# Even if in most cases, abortFor is called from both this method
# and BaseMasterHandler.notifyNodeInformation (especially since
# storage nodes disconnects unknown clients on their own), these 2
# handlers also cover distinct scenarios, so neither of them is
# redundant:
# - A client node may have network issues with this particular
# storage node and remain RUNNING: we may still be involved in
# the second phase so we only abort non-voted transactions here.
# By not taking part to any further deadlock avoidance,
# not releasing write-locks now would lead to a deadlock.
# - A client node may be disconnected from the master, whereas
# there are still voted (and not locked) transactions to abort.
app.tm.abortFor(conn.getUUID())
def _triggerSecondary(self, node, offset, tid, cell_list):
# Notify secondary storages that they can replicate from
# primary ones, even if they are already replicating.
p = Packets.Replicate(tid, '', {offset: node.getAddress()})
for cell in cell_list:
if max(cell.backup_tid, cell.replicating) < tid:
cell.replicating = tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset,
dump(tid), uuid_str(node.getUUID()))
cell.getNode().send(p)
def __init__(self, app, handler, addr):
self._ssl = app.ssl
logging.debug('listening to %s:%d', *addr)
connector = self.ConnectorClass(addr)
BaseConnection.__init__(self, app.em, handler, connector, addr)
connector.makeListeningConnection()
self.em.register(self)