Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
stn_da = StationSerialDataDb(params[P_PATH_DB], params[P_VARNAME])
stn_mask = np.logical_and(np.isfinite(stn_da.stns[MASK]),np.isnan(stn_da.stns[BAD]))
stns = stn_da.stns[stn_mask]
print "Coord: Done initialization. Starting to send work."
cnt = 0
nrec = 0
for stn_id in stns[STN_ID]:
if cnt < nwrkers:
dest = cnt+N_NON_WRKRS
else:
dest = MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
nrec+=1
MPI.COMM_WORLD.send(stn_id, dest=dest, tag=TAG_DOWORK)
cnt+=1
for w in np.arange(nwrkers):
MPI.COMM_WORLD.send(None, dest=w+N_NON_WRKRS, tag=TAG_STOPWORK)
print "coord_proc: done"
except zmq.error.ZMQError:
print '[load] rank %d port %d zmq error' % (rank,config['sock_data'])
sock.close()
zmq.Context().term()
raise
finally:
pass
shape, dtype, h = sock.recv_pyobj()
if verbose: print '[load] 1. shared_x information received'
gpu_data_remote_b = pygpu.gpuarray.open_ipc_handle(ctx, h, np.prod(shape)*dtype.itemsize)
gpu_data_remote = pygpu.gpuarray.from_gpudata(gpu_data_remote_b, 0, dtype, shape, ctx)
gpu_data = pygpu.empty(shape, dtype, context=ctx)
img_mean = icomm.recv(source=MPI.ANY_SOURCE, tag=66)
if verbose: print '[load] 2. img_mean received'
count=0
mode=None
import time
while True:
# 3. load the very first filename in 'train' or 'val' mode
message = icomm.recv(source=0, tag=40)
if message == 'stop':
break
elif message == 'train':
mode = 'train'
continue
elif message == 'val':
def __update(self):
""" A loop which constantly submits pending jobs and pulls complete jobs
from the cluster. """
while self.__run or len(self.__waiting) > 0:
status = MPI.Status()
data = self.__comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
if status.tag == slave.AVAILABLE:
if status.source not in self.__slaves:
self.__slaves.add(status.source)
self.__free.append(status.source)
elif status.tag == slave.RESULTS:
self.__complete.append(data)
elif status.tag == slave.DEAD:
print 'Slave dying'
self.__slaves.remove(status.source)
while len(self.__waiting) and len(self.__free) > 0:
job = self.__waiting.pop()
rank = self.__free.pop()
self.__comm.send(job.args, dest=rank, tag=job.tag)
def recv_task_request(self):
""" Receives 1 task request from MPI comm into the ready_worker_queue
"""
info = MPI.Status()
# req = comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
comm.recv(source=MPI.ANY_SOURCE, tag=TASK_REQUEST_TAG, status=info)
worker_rank = info.Get_source()
self.ready_worker_queue.append(worker_rank)
logger.info("Received task request from worker:{}".format(worker_rank))
waiting for them to request the next chunk of data when they are free,
responding to them with the data and then sending them a Sentinel
signalling that they can exit.
"""
is_map_done = [True if i in self.master_node_ranks else False for i in range(self.size)]
status = MPI.Status()
#Copy it to the pending. This is so when master accesses
#the PDS data it's not empty.
self.pds_pending_store[pds_id] = list(self.pds_store[pds_id])
#While we have some ranks that haven't finished
while sum(is_map_done)
ds_prcp = Dataset("".join([params[P_PATH_OUT],DS_NAME]),'r+')
ttl_infills = stnids_prcp.size
stnids_prcp = np.array(ds_prcp.variables['stn_id'][:], dtype="
def workreq_check(self, cleanup=False):
""" for any process that sends work request message:
add the process to the requester list
if my work queue is not empty:
distribute the work evenly
else:
send "no work" message to each requester
reset the requester list to empty
"""
while True:
st = MPI.Status()
ret = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=T.WORK_REQUEST, status=st)
if not ret: # no work request, break out the loop
break
# we have work request message
rank = st.Get_source()
buf = self.comm.recv(source=rank, tag=T.WORK_REQUEST, status=st)
if buf == G.ABORT:
self.logger.warn("Abort request from rank %s" % rank, extra=self.d)
self.abort = True
self.send_no_work(rank)
return
else:
self.logger.debug("receive work request from requestor [%s]" % rank, extra=self.d)
# add rank to requesters
self.requestors.append(rank)
# out of while loop
def chunk_recv(source, unpickle_it=True):
chunks = []
status = MPI.Status()
# Keep receiving messages until [[MSGOVER]] is received
while(True):
msg = MPI.COMM_WORLD.recv(source=source, status=status)
# If we are listening to ANY_SOURCE, receive the remainder of messages
# from the SAME source as the first message (prevent interleaving)
if(source==MPI.ANY_SOURCE):
source = status.Get_source()
# print ("----- %d received msg of size %d" % (MPI.COMM_WORLD.Get_rank(), len(msg)))
# If the special [[MSG_OVER]] string is received, we are done
if(msg=="[[MSG_OVER]]"):
break
# Otherwise, add the string to the list of received strings
chunks.append(msg)
# Concatenate the strings, then unpickle
pickled_obj = "".join(chunks)
del(chunks)
if(unpickle_it):
return pickle.loads(pickled_obj)
else:
def Collect(self, r):
""" collects matrix r from the nodes """
a = self.getId(r, False)
self.Do(a, OP_COLLECT)
coll = []
for _ in range(1, self.comm.size):
tup = self.Recv(None, MPI.ANY_SOURCE, OP_COLLECT)
coll.append(tup)
t = mu.ListToMatrix(coll)
return t