How to use the multiprocess.Queue function in multiprocess

To help you get started, we’ve selected a few multiprocess examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github uqfoundation / multiprocess / py2.7 / examples / ex_workers.py View on Github external
def test():
    NUMBER_OF_PROCESSES = 4
    TASKS1 = [(mul, (i, 7)) for i in range(20)]
    TASKS2 = [(plus, (i, 8)) for i in range(10)]
    
    # Create queues
    task_queue = Queue()
    done_queue = Queue()
    
    # Submit tasks
    map(task_queue.put, TASKS1)
        
    # Start worker processes
    for i in range(NUMBER_OF_PROCESSES):
        Process(target=worker, args=(task_queue, done_queue)).start()
        
    # Get and print results
    print 'Unordered results:'
    for i in range(len(TASKS1)):
        print '\t', done_queue.get()
        
    # Add more tasks using `put()` instead of `putMany()`
    for task in TASKS2:
        task_queue.put(task)
github uqfoundation / multiprocess / py2.7 / examples / ex_workers.py View on Github external
def test():
    NUMBER_OF_PROCESSES = 4
    TASKS1 = [(mul, (i, 7)) for i in range(20)]
    TASKS2 = [(plus, (i, 8)) for i in range(10)]
    
    # Create queues
    task_queue = Queue()
    done_queue = Queue()
    
    # Submit tasks
    map(task_queue.put, TASKS1)
        
    # Start worker processes
    for i in range(NUMBER_OF_PROCESSES):
        Process(target=worker, args=(task_queue, done_queue)).start()
        
    # Get and print results
    print 'Unordered results:'
    for i in range(len(TASKS1)):
        print '\t', done_queue.get()
        
    # Add more tasks using `put()` instead of `putMany()`
    for task in TASKS2:
github uqfoundation / multiprocess / py3.1 / examples / ex_workers.py View on Github external
def test():
    NUMBER_OF_PROCESSES = 4
    TASKS1 = [(mul, (i, 7)) for i in range(20)]
    TASKS2 = [(plus, (i, 8)) for i in range(10)]
    
    # Create queues
    task_queue = Queue()
    done_queue = Queue()
    
    # Submit tasks
    list(map(task_queue.put, TASKS1))
        
    # Start worker processes
    for i in range(NUMBER_OF_PROCESSES):
        Process(target=worker, args=(task_queue, done_queue)).start()
        
    # Get and print results
    print('Unordered results:')
    for i in range(len(TASKS1)):
        print('\t', done_queue.get())
        
    # Add more tasks using `put()` instead of `putMany()`
    for task in TASKS2:
        task_queue.put(task)
github uqfoundation / multiprocess / py3.2 / examples / benchmarks.py View on Github external
def test():
    manager = processing.Manager()
    
    gc.disable()
    
    print('\n\t######## testing Queue.Queue\n')
    test_queuespeed(threading.Thread, Queue.Queue(),
                    threading.Condition())
    print('\n\t######## testing processing.Queue\n')
    test_queuespeed(processing.Process, processing.Queue(),
                    processing.Condition())
    print('\n\t######## testing Queue managed by server process\n')
    test_queuespeed(processing.Process, manager.Queue(),
                    manager.Condition())
    print('\n\t######## testing processing.Pipe\n')
    test_pipespeed()
    
    print
    
    print('\n\t######## testing list\n')
    test_seqspeed(range(10))
    print('\n\t######## testing list managed by server process\n')
    test_seqspeed(manager.list(range(10)))
    print('\n\t######## testing Array("i", ..., lock=False)\n')
    test_seqspeed(processing.Array('i', range(10), lock=False))
    print('\n\t######## testing Array("i", ..., lock=True)\n')
github mozilla / OpenWPM / automation / BrowserManager.py View on Github external
result = self.status_queue.get(True, self._SPAWN_TIMEOUT)
            if result[0] == 'STATUS':
                launch_status[result[1]] = True
                return result[2]
            elif result[0] == 'CRITICAL':
                reraise(*pickle.loads(result[1]))
            elif result[0] == 'FAILED':
                raise BrowserCrashError(
                    'Browser spawn returned failure status')

        while not success and \
                unsuccessful_spawns < self._UNSUCCESSFUL_SPAWN_LIMIT:
            self.logger.debug("BROWSER %i: Spawn attempt %i " % (
                self.crawl_id, unsuccessful_spawns))
            # Resets the command/status queues
            (self.command_queue, self.status_queue) = (Queue(), Queue())

            # builds and launches the browser_manager
            args = (self.command_queue, self.status_queue, self.browser_params,
                    self.manager_params, crash_recovery)
            self.browser_manager = Process(target=BrowserManager, args=args)
            self.browser_manager.daemon = True
            self.browser_manager.start()

            # Read success status of browser manager
            launch_status = dict()
            try:
                # 1. Selenium profile created
                spawned_profile_path = check_queue(launch_status)
                # 2. Profile tar loaded (if necessary)
                check_queue(launch_status)
                # 3. Browser launch attempted
github nottombrown / rl-teacher / agents / parallel-trpo / parallel_trpo / rollouts.py View on Github external
def __init__(self, env_id, make_env, reward_predictor, num_workers, max_timesteps_per_episode, seed):
        self.num_workers = num_workers
        self.predictor = reward_predictor

        self.tasks_q = multiprocess.JoinableQueue()
        self.results_q = multiprocess.Queue()

        self.actors = []
        for i in range(self.num_workers):
            new_seed = seed * 1000 + i  # Give each actor a uniquely seeded env
            self.actors.append(Actor(self.tasks_q, self.results_q, env_id, make_env, new_seed, max_timesteps_per_episode))

        for a in self.actors:
            a.start()

        # we will start by running 20,000 / 1000 = 20 episodes for the first iteration  TODO OLD
        self.average_timesteps_in_episode = 1000
github sandabuliu / python-agent / agent / agent / agent.py View on Github external
def __init__(self, source, sender=None, rule=None, event=Event,
                 clean=None, parser_num=1, queue_size=1024, retry_time=600,
                 process=False, fmt='result', **kwargs):
        if process:
            self.Queue = multiprocessing.Queue
            self.Process = multiprocessing.Process
        else:
            self.Queue = Queue.Queue
            self.Process = threading.Thread

        clean = clean or DefaultCleaner(fmt).clean
        super(Agent, self).__init__(**kwargs)
        self.clean = try_catch(clean)
        self.source = source
        self.sender = sender or Null()
        self.event = event
        self.parser = LogParser(rule) if rule else DefaultParser()
        self.sender.send = send_retry(self.sender.send, self)
        self.qin = self.Queue(queue_size or 1024)
        self.parser_num = 0
        self.parser_init = parser_num or 1
github BerkeleyAutomation / autolab_core / autolab_core / data_stream_recorder.py View on Github external
Parameters
        ----------
            name : string
                    User-friendly identifier for this data stream
            data_sampler_method : function
                    Method to call to retrieve data
        """
        Process.__init__(self)
        self._data_sampler_method = data_sampler_method

        self._has_set_sampler_params = False
        self._recording = False

        self._name = name

        self._cmds_q = Queue()
        self._data_qs = [Queue()]
        self._ok_q = None
        self._tokens_q = None

        self._save_every = save_every
        self._cache_path = cache_path
        self._saving_cache = cache_path is not None
        if self._saving_cache:
            self._save_path = os.path.join(cache_path, self.name)
            if not os.path.exists(self._save_path):
                os.makedirs(self._save_path)

        self._start_data_segment = 0
        self._cur_data_segment = 0
        self._saving_ps = []
github BerkeleyAutomation / autolab_core / autolab_core / data_stream_recorder.py View on Github external
----------
            name : string
                    User-friendly identifier for this data stream
            data_sampler_method : function
                    Method to call to retrieve data
        """
        Process.__init__(self)
        self._data_sampler_method = data_sampler_method

        self._has_set_sampler_params = False
        self._recording = False

        self._name = name

        self._cmds_q = Queue()
        self._data_qs = [Queue()]
        self._ok_q = None
        self._tokens_q = None

        self._save_every = save_every
        self._cache_path = cache_path
        self._saving_cache = cache_path is not None
        if self._saving_cache:
            self._save_path = os.path.join(cache_path, self.name)
            if not os.path.exists(self._save_path):
                os.makedirs(self._save_path)

        self._start_data_segment = 0
        self._cur_data_segment = 0
        self._saving_ps = []