Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for i in six.moves.range(N_SORTS):
p_ = pointless.PointlessPrimVector('u32', sequence = t, allow_print = False)
p.append(p_)
THREADED = True
d_list = [ ]
for i in six.moves.range(N_SORTS):
if THREADED:
d = threads.deferToThread(p[i].sort_proj, *v)
d_list.append(d)
else:
yield threads.deferToThread(p[i].sort_proj, *v)
if THREADED:
yield defer.DeferredList(d_list)
# Hack: use a mutable object to sneak a variable out of the
# scope of doPassive
_mutable = [None]
def doPassive(response):
"""Connect to the port specified in the response to PASV"""
host, port = decodeHostPort(response[-1][4:])
f = _PassiveConnectionFactory(protocol)
_mutable[0] = self.connectFactory(host, port, f)
pasvCmd = FTPCommand('PASV')
self.queueCommand(pasvCmd)
pasvCmd.deferred.addCallback(doPassive).addErrback(self.fail)
results = [cmdsDeferred, pasvCmd.deferred, protocol.deferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
# Ensure the connection is always closed
def close(x, m=_mutable):
m[0] and m[0].disconnect()
return x
d.addBoth(close)
else:
# We just place a marker command in the queue, and will fill in
# the host and port numbers later (see generatePortCommand)
portCmd = FTPCommand('PORT')
# Ok, now we jump through a few hoops here.
# This is the problem: a transfer is not to be trusted as complete
# until we get both the "226 Transfer complete" message on the
If you don't want the task saved to the update list, but only run on
the workers currently attached, set the I{ephemeral} keyword C{True}.
"""
ephemeral = kw.pop('ephemeral', False)
if ephemeral:
dList = [
worker.remoteCaller('runJob', jobID, callName, *args, **kw)
for worker in self.queue.workers()]
else:
if jobID not in self.updates:
self.updates[jobID] = []
self.updates[jobID].append([callName, args, kw, []])
dList = [
self._runUpdate(jobID, worker)
for worker in self.queue.workers()]
return defer.DeferredList(dList)
"shutdown_consumers error in consumer %s: %s",
consumer, e)
try:
# FIXME: This should not poke private state. It
# should store the deferred when it starts the
# consumer.
if consumer._start_d:
consumer.stop()
except Exception as e2:
log.error(
"shutdown_consumers stop error in consumer %s: %s",
consumer, e2)
else:
shutdown_des.append(shutdown_de)
try:
yield DeferredList(shutdown_des, fireOnOneErrback=True, consumeErrors=True)
except Exception as e:
log.error("shutdown_consumers deferred error: %s", e)
# try to kill all the consumers if graceful shutdown fails
# and if that doesn't work, give up
for consumers in current_consumers.values():
for consumer in consumers:
try:
if consumer._start_d:
consumer.stop()
except Exception as e2:
log.error("shutdown_consumers deferred stop error: %s", e2)
log.debug(
"%s shutdown_consumers: %s consumers shutdown",
self, num_consumers)
portCmd.transferDeferred = protocol.deferred
portCmd.protocol = protocol
portCmd.deferred.addErrback(portCmd.transferDeferred.errback)
self.queueCommand(portCmd)
# Create dummy functions for the next callback to call.
# These will also be replaced with real functions in
# generatePortCommand.
portCmd.loseConnection = lambda result: result
portCmd.fail = lambda error: error
# Ensure that the connection always gets closed
cmdsDeferred.addErrback(lambda e, pc=portCmd: pc.fail(e) or e)
results = [cmdsDeferred, portCmd.deferred, portCmd.transferDeferred]
d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
d.addErrback(_unwrapFirstError)
for cmd in cmds:
self.queueCommand(cmd)
return d
def _get_devices_by_id(self, device_ids):
deferred_list = defer.DeferredList([self._get_device_by_id(device_id) for device_id in
device_ids], consumeErrors=True)
# working off JAMF's own data, so shouldn't fail
deferred_list.addCallback(stethoscope.api.utils.filter_by_status,
context=sys._getframe().f_code.co_name, level=logbook.ERROR)
return deferred_list
def _auth_proto(self, proto):
return defer.DeferredList(
[proto.authenticate(database, username, password, mechanism)
for database, (username, password, mechanism) in self.__auth_creds.items()],
consumeErrors=True
)
defer.returnValue(connection_id) # all good
yield state.terminating(conn)
defs = []
sub_connections = yield self.getSubConnectionsByConnectionKey(conn.id)
for sc in sub_connections:
# we assume a provider is available
provider = self.getProvider(sc.provider_nsa)
header = nsa.NSIHeader(self.nsa_.urn(), sc.provider_nsa, security_attributes=header.security_attributes)
d = provider.terminate(header, sc.connection_id, request_info)
d.addErrback(_logErrorResponse, connection_id, sc.provider_nsa, 'terminate')
defs.append(d)
results = yield defer.DeferredList(defs, consumeErrors=True)
successes = [ r[0] for r in results ]
if all(successes):
log.msg('Connection %s: All sub connections(%i) acked terminated' % (conn.connection_id, len(defs)), system=LOG_SYSTEM)
defer.returnValue(connection_id)
else:
# we are now in an inconsistent state...
n_success = sum( [ 1 for s in successes if s ] )
log.msg('Connection %s. Only %i of %i connections successfully terminated' % (conn.connection_id, n_success, len(defs)), system=LOG_SYSTEM)
provider_urns = [ sc.provider_nsa for sc in sub_connections ]
raise _createAggregateException(connection_id, 'terminate', results, provider_urns, error.ConnectionError)
def _runUpdate(self, jobID, worker):
dList = []
for funcName, args, kw, workersUpdated in self.updates[jobID]:
if worker.ID in workersUpdated:
continue
d = worker.remoteCaller('runJob', jobID, funcName, *args, **kw)
d.addCallback(lambda _: workersUpdated.append(worker.ID))
dList.append(d)
return defer.DeferredList(dList)
defers = []
from twisted.internet import reactor
for n in nodes:
d = defer.Deferred()
reactor.callLater(0, self.eventloop, n, event, msg, d)
# if more nodes to come, do DeferredList instead
if len(nodes):
defers.append(d)
del nodes
if len(defers) > 1:
return defer.DeferredList(defers)
else:
d = defers.pop()
return d