How to use the multiprocessing.Manager function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github muhaochen / MTransE / run_others / en_fr / test_ITransE_lan_mapping_120k_fk.py View on Github external
else:
        ef_map[line[0]].append(line[1])
for line in open(fmap2):
    line = line.rstrip('\n').split('@@@')
    if len(line) != 2:
        continue
    vocab_f.append(line[0])
    if fe_map.get(line[1]) == None:
        fe_map[line[1]] = [line[0]]
    else:
        fe_map[line[1]].append(line[0])

print "Loaded en_fr fr_en mappings."

#en:...
manager = Manager()
lock1 = Lock()

past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k

rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)

cpu_count = multiprocessing.cpu_count()
t0 = time.time()
def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
    while index.value < len(vocab):
        id = index.value
        index.value += 1
        word = vocab[id]
        if id % 100 == 0:
github kbengine / kbengine / kbe / src / lib / python / Doc / includes / mp_benchmarks.py View on Github external
def test():
    manager = multiprocessing.Manager()

    gc.disable()

    print('\n\t######## testing Queue.Queue\n')
    test_queuespeed(threading.Thread, queue.Queue(),
                    threading.Condition())
    print('\n\t######## testing multiprocessing.Queue\n')
    test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
                    multiprocessing.Condition())
    print('\n\t######## testing Queue managed by server process\n')
    test_queuespeed(multiprocessing.Process, manager.Queue(),
                    manager.Condition())
    print('\n\t######## testing multiprocessing.Pipe\n')
    test_pipespeed()

    print()
github migvel / color_trace / color_trace_multi.py View on Github external
colors and quantization)
    stack: whether to stack color traces (recommended for more accurate output)
    despeckle: supress speckles of this many pixels
    smoothcorners: corner smoothing: 0 for no smoothing, 1.334 for max
    optimizepaths: Bezier curve optimization: 0 for least, 5 for most
"""
    tmp = tempfile.mkdtemp()

    # create a two job queues
    # q1 = scaling + color reduction
    q1 = multiprocessing.JoinableQueue()
    # q2 = isolation + tracing
    q2 = multiprocessing.JoinableQueue()

    # create a manager to share the layers between processes
    manager = multiprocessing.Manager()
    layers = []
    for i in range(min(len(inputs), len(outputs))):
        layers.append(manager.list())
    # and make a lock for reading and modifying layers
    layers_lock = multiprocessing.Lock()

    # create a shared memory counter of completed and total tasks for measuring progress
    progress = multiprocessing.Value('i', 0)
    if colors is not None:
        # this is only an estimate because quantization can result in less colors
        # than in the "colors" variable. This value is corrected by q1 tasks to converge
        # on the real total.
        total = multiprocessing.Value('i', len(layers) * colors)
    elif remap is not None:
        # get the number of colors in the palette image
        palettesize = len(make_palette(remap))
github SanPen / GridCal / src / GridCal / Engine / Simulations / PTDF / ptdf_driver.py View on Github external
delta_of_power_variations = get_ptdf_variations(circuit=circuit,
                                                        numerical_circuit=numerical_circuit,
                                                        group_by_technology=group_by_technology,
                                                        power_amount=power_amount)

        # declare the PTDF results
        results = PTDFResults(n_variations=len(delta_of_power_variations) - 1,
                              n_br=numerical_circuit.nbr,
                              br_names=numerical_circuit.branch_names)

        if text_func is not None:
            text_func('Running PTDF...')

        jobs = list()
        n_cores = multiprocessing.cpu_count()
        manager = multiprocessing.Manager()
        return_dict = manager.dict()

        # for v, variation in enumerate(delta_of_power_variations):
        v = 0
        nvar = len(delta_of_power_variations)
        while v < nvar:

            k = 0

            # launch only n_cores jobs at the time
            while k < n_cores + 2 and (v + k) < nvar:
                # run power flow at the circuit
                p = multiprocessing.Process(target=power_flow_worker, args=(v,
                                                                            numerical_circuit.nbus,
                                                                            numerical_circuit.nbr,
                                                                            calculation_inputs,
github darcamo / pyphysim / pyphysim / simulations / progressbar.py View on Github external
to a file with name `filename`. This is usually useful for
            debugging and testing purposes.
        style : str
            The progressbar style. It controls which progressbar is used to
            display progress. It can be either 'text1', 'text2', 'text3', or
            'ipython'
        """
        self._progresschar = progresschar
        self._message = message

        self._sleep_time = sleep_time
        self._last_id = -1

        self._filename = filename

        self._manager = multiprocessing.Manager()
        self._client_data_list: List[Any] = self._manager.list()  # pylint: disable=E1101

        self._style = style

        # total_final_count will be updated each time the register_*
        # function is called.
        #
        # Note that we use a Manager.Value object to store the value
        # instead of using a simple integer because we want modifications
        # to this value to be seem by the other updating process even after
        # start_updater has been called if we are still in the
        # 'start_delay' time.
        # pylint: disable=E1101
        self._total_final_count = self._manager.Value('L', 0)

        # self._update_process will store the process responsible to update
github eHealthAfrica / aether / aether-entity-extraction-module / aether / extractor / manager.py View on Github external
def __init__(self, redis=None, channel=settings.SUBMISSION_CHANNEL):
        self.redis = redis
        self.channel = channel

        self.stopped = True

        self.extraction_pool = mp.Pool()
        # only one concurrent request to Kernel.
        self.kernel_comm_pool = mp.Pool(processes=1)

        self.manager = mp.Manager()
        self.processed_submissions = self.manager.Queue()

        self.worker_thread = Thread(target=self.worker, daemon=True)
        self.pull_thread = Thread(target=self.pull, daemon=True)
github hellorocky / LearnByCoding / python / file / alexa.py View on Github external
def multiprocess(self):
        """
        Using multiprocess.
        """
        self.domain = multiprocessing.Manager().list()
        start = time.time()
        processes = []
        for page in xrange(self.totalPage):
            p = multiprocessing.Process(target=self.getURList, args=(page,))
            p.start()
            processes.append(p)
        for process in processes:
            process.join()
        end = time.time()
        print "***" * 10 + "multiprocessing" + "***" * 10
        print "Total domains: {0}".format(len(self.domain))
        print "Total used %0.2f seconds." % (end - start)
github davidsbatista / BREDS / automatic-evaluation / code-examples / evaluate-tiago3.py View on Github external
@timecall
def calculate_c(corpus, database, b):
    # contains the database facts described in the corpus but not extracted by the system
    #
    # G' = superset of G, cartesian product of all possible entities and relations (i.e., G' = E x R x E)
    # for now, all relationships from a sentence
    print "Building G', a superset of G"
    manager = multiprocessing.Manager()
    queue = manager.Queue()
    g_dash = manager.list()
    num_cpus = multiprocessing.cpu_count()

    with open(corpus) as f:
        print "Reading corpus into memory"
        data = f.readlines()
        print "Storing in shared Queue"
        for l in data:
            queue.put(l)

    processes = [multiprocessing.Process(target=process_corpus, args=(queue, g_dash)) for i in range(num_cpus)]
    print "Running", len(processes), "threads"

    for proc in processes:
        proc.start()