Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def manager():
tasks = mp.cpu_count() - 1
que = mp.Queue()
initque(que)
lock = mp.Lock()
plist = []
for i in xrange(tasks):
p = mp.Process(target=worker, args=(que, lock, i+1))
p.start()
plist.append(p)
for p in plist:
p.join()
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
self.feeding_thread = None
self.pending_messages = None
[x_test, y_test] = pickle.load(f)
pred_val = hyperopt_library(model_type, model_param, x_train, y_train, x_test, y_test)
# save the pred for cross validation
pred_file = "%s/%s_%s@%d.pred.pkl" %(path, feat, model_type, trials_counter)
with open(pred_file, 'wb') as f:
pickle.dump(pred_val, f, -1)
print "Cross Validation %d_%d, score %f" %(iter, fold, ml_score(y_test, pred_val))
#if model_type == 'logistic':
# y_test = y_test / np.linalg.norm(y_test)
gini_cv[iter, fold] = ml_score(y_test, pred_val)
else: # multiprocess
manager = multiprocessing.Manager()
gini_cv = manager.list()
lock = multiprocessing.Lock()
mp_list = []
for iter in range(config.kiter):
for fold in range(config.kfold):
mp = ModelProcess(lock, iter, fold, feat, model_type, model_param, gini_cv)
mp_list.append(mp)
for mp in mp_list:
mp.start()
for mp in mp_list:
mp.join()
gini_cv_mean = np.mean(gini_cv)
gini_cv_std = np.std(gini_cv)
print "Mean %f, Std %f" % (gini_cv_mean, gini_cv_std)
def __init__(self, languages=['en'],
storage=None,
path=None,
hit_callback=print,
test_func=anagramfunctions.test_anagram):
"""
language selection is not currently implemented
"""
if languages != ['en']:
raise NotImplementedError(
'languages other then \'en\' are not currently expected')
self.languages = languages
self._should_trim_cache = False
self._write_process = None
self._lock = multiprocessing.Lock()
self._is_writing = multiprocessing.Event()
self.store_path = path or os.path.join(
common.ANAGRAM_DATA_DIR,
'%s_%s.db' % (DATA_PATH_COMPONENT, '_'.join(languages)))
self.cachepath = os.path.join(
common.ANAGRAM_DATA_DIR,
'%s_%s.cache' % (CACHE_PATH_COMPONENT, '_'.join(languages)))
self.hit_callback = hit_callback
self.test_func = test_func
self.cache, self.datastore = self.setup_storage(storage)
self.stats = StatTracker()
def __init__(self, name, parent, ty, **setup):
Node.__init__(self, name, parent, ty, **setup)
self.lock = multiprocessing.Lock()
XwDb4F, inds = MetabolomicsUtils.filterWavelet(XwDb4, N)
dataList.append((XwDb4F[:, inds], "Db4-" + str(N)))
XwDb8F, inds = MetabolomicsUtils.filterWavelet(XwDb8, N)
dataList.append((XwDb8F[:, inds], "Db8-" + str(N)))
XwHaarF, inds = MetabolomicsUtils.filterWavelet(XwHaar, N)
dataList.append((XwHaarF[:, inds], "Haar-" + str(N)))
dataList.extend([(Xs, "raw_std"), (XwDb4, "Db4"), (XwDb8, "Db8"), (XwHaar, "Haar"), (X2, "log"), (XOpls, "opls")])
#Data for functional TreeRank
dataListF = [(XwDb4, "Db4"), (XwDb8, "Db8"), (XwHaar, "Haar")]
dataListPCA = ([(Xs, "raw_std"), (XwDb4, "Db4"), (XwDb8, "Db8"), (XwHaar, "Haar"), (X2, "log"), (XOpls, "opls")])
lock = multiprocessing.Lock()
numpy.random.seed(datetime.datetime.now().microsecond)
#numpy.random.seed(21)
permInds = numpy.random.permutation(len(dataList))
permIndsF = numpy.random.permutation(len(dataListF))
permIndsPCA = numpy.random.permutation(len(dataListPCA))
numpy.random.seed(21)
try:
for ind in permInds:
MetabolomicsExpRunner(YList, dataList[ind][0], dataList[ind][1], ages, args=(lock,)).run()
for ind in permIndsF:
MetabolomicsExpRunner(YList, dataListF[ind][0], dataListF[ind][1], ages, args=(lock,)).runF()
for ind in permIndsPCA:
'list of coordinates')
parser.add_argument('wsi_path', default=None, metavar='WSI_PATH', type=str,
help='Path to the input directory of WSI files')
parser.add_argument('coords_path', default=None, metavar='COORDS_PATH',
type=str, help='Path to the input list of coordinates')
parser.add_argument('patch_path', default=None, metavar='PATCH_PATH', type=str,
help='Path to the output directory of patch images')
parser.add_argument('--patch_size', default=256, type=int, help='patch size, '
'default 768')
parser.add_argument('--level', default=0, type=int, help='level for WSI, to '
'generate patches, default 0')
parser.add_argument('--num_process', default=5, type=int,
help='number of mutli-process, default 5')
count = Value('i', 0)
lock = Lock()
def process(opts):
i, pid, x_center, y_center, args = opts
x = int(int(x_center) - args.patch_size / 2)
y = int(int(y_center) - args.patch_size / 2)
wsi_path = os.path.join(args.wsi_path, pid + '.tif')
slide = openslide.OpenSlide(wsi_path)
img = slide.read_region(
(x, y), args.level,
(args.patch_size, args.patch_size)).convert('RGB')
img.save(os.path.join(args.patch_path, str(i) + '.png'))
global lock
global count
mp_del_g2_arr = mp.Array(c.c_double, int(np.product(G2.shape)))
mp_del_g3_arr = mp.Array(c.c_double, int(np.product(G3.shape)))
mp_del_u_arr = mp.Array(c.c_double, int(np.product(U.shape)))
mp_del_i_arr = mp.Array(c.c_double, int(np.product(I.shape)))
mp_del_a_arr = mp.Array(c.c_double, int(np.product(A.shape)))
mp_del_o_arr = mp.Array(c.c_double, int(np.product(O.shape)))
del_g1 = np.frombuffer(mp_del_g1_arr.get_obj()).reshape(G1.shape)
del_g2 = np.frombuffer(mp_del_g2_arr.get_obj()).reshape(G2.shape)
del_g3 = np.frombuffer(mp_del_g3_arr.get_obj()).reshape(G3.shape)
del_u = np.frombuffer(mp_del_u_arr.get_obj()).reshape(U.shape)
del_i = np.frombuffer(mp_del_i_arr.get_obj()).reshape(I.shape)
del_a = np.frombuffer(mp_del_a_arr.get_obj()).reshape(A.shape)
del_o = np.frombuffer(mp_del_o_arr.get_obj()).reshape(O.shape)
lock = mp.Lock()
q_samples_mse = mp.Queue()
q_samples_bpr = mp.Queue()
num_grad = mp.Value('i', 0)
error_square = mp.Value('d', 0)
error_bpr = mp.Value('d', 0)
processes = []
ps = mp.Process(target=paraserver,
args=(user_item_pairs, user_item_aspect, user_aspect_opinion, item_aspect_opinion,
n_element_samples, n_bpr_samples, lambda_reg, n_epochs, lr,
G1, G2, G3, U, I, A, O,
error_square, error_bpr, q_samples_mse, q_samples_bpr,
del_g1, del_g2, del_g3, del_u, del_i, del_a, del_o, num_grad, n_threads, self.seed, self.verbose))
ps.start()
def _create(self, problem):
setattr(self, str(problem), Lock())
import surreal.utils as U
from time import sleep
import threading
import multiprocessing
lock1 = threading.Lock()
lock2 = multiprocessing.Lock()
lock1 = multiprocessing.Lock()
def compute(s, i):
print(s, i)
return s+str(len(s)*10 + i)
def f():
with lock1:
for i in range(5):
compute('yo', 3)
sleep(0.2)
def g():
with lock1:
for i in range(5):
compute('bar', 7)
sleep(.2)