Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception, e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
self._worker_handler.join()
debug('joining task handler')
self._task_handler.join()
debug('joining result handler')
self._result_handler.join()
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)' % (i, len(self._pool), p, ))
p.join()
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
task_handler.join()
debug('joining result handler')
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def weakref_cb(_,
thread_wakeup=self._queue_management_thread_wakeup):
mp.util.debug('Executor collected: triggering callback for'
' QueueManager wakeup')
thread_wakeup.wakeup()
# Start the processes so that their sentinels are known.
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
if OpenMDAO_Proxy.manager_is_alive(token.address):
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _get_connection(_Client, token.address, authkey)
dispatch(conn, None, 'decref', (token.id,))
# Hard to cause this to happen.
except Exception as exc: #pragma no cover
util.debug('... decref failed %s', exc)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more %r proxies so closing conn',
threading.current_thread().name, token.typeid)
tls.connection.close()
del tls.connection
try:
task = get()
except (IOError, EOFError), exc:
debug('result handler got %s -- exiting',
exc.__class__.__name__)
return
if putlock is not None:
try:
putlock.release()
except ValueError:
pass
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if putlock is not None:
try:
putlock.release()
except ValueError:
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def SocketClient(address):
"""
Return a connection object connected to the socket given by `address`
"""
family = getattr(socket, address_type(address))
t = _init_timeout()
while 1:
s = socket.socket(family)
s.setblocking(True)
try:
s.connect(address)
except socket.error as e:
s.close()
if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
debug('failed to connect to address %s', address)
raise
time.sleep(0.01)
else:
break
else:
raise
fd = duplicate(s.fileno())
conn = _multiprocessing.Connection(fd)
s.close()
return conn