How to use the multiprocessing.cpu_count function in multiprocessing

To help you get started, we’ve selected a few multiprocessing examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aclements / commuter / split-testgen.py View on Github external
#!/usr/bin/env python

import os, sys, multiprocessing, argparse, re, glob

parser = argparse.ArgumentParser()
parser.add_argument('-d', '--output-dir', metavar='DIR', required=True,
                    help='Store testgen shards in DIR')
parser.add_argument('-j', '--jobs', type=int,
                    default=multiprocessing.cpu_count(),
                    help='Number of shards (and resulting make jobs)')
parser.usage = parser.format_usage().split(':',1)[1].strip() + ' < testgen.c'
args = parser.parse_args()

shard_names = [os.path.join(args.output_dir, 'testgen.%d.c' % i)
               for i in xrange(max(args.jobs - 1, 1))]
if set(glob.glob(os.path.join(args.output_dir, 'testgen.*.c'))) - \
   set(shard_names):
    parser.error('Output directory is dirty')
outarray = file(os.path.join(args.output_dir, 'testgen.c'), 'w')
outshards = [file(path, 'w') for path in shard_names]

testparts = [p + '\n\n' for p in sys.stdin.read().split('\n\n')]

# Emit common headers
while testparts:
github binji / binjgb / scripts / tester.py View on Github external
def main(args):
  parser = argparse.ArgumentParser()
  parser.add_argument('patterns', metavar='pattern', nargs='*',
                      help='test patterns.')
  parser.add_argument('-j', '--num-processes',
                      type=int, default=multiprocessing.cpu_count(),
                      help='num processes.')
  parser.add_argument('-e', '--exe', help='path to tester')
  parser.add_argument('-v', '--verbose', action='count', default=0,
                      help='show more info')
  parser.add_argument('-g', '--generate', action='store_true',
                      help='generate test result markdown')
  options = parser.parse_args(args)
  pattern_re = common.MakePatternRE(options.patterns)
  passed = 0
  if not os.path.exists(TEST_RESULT_DIR):
    os.makedirs(TEST_RESULT_DIR)

  tests = [Test(*test) for test in json.load(open(TEST_JSON))]
  tests = [test for test in tests if pattern_re.match(test.rom)]

  start_time = time.time()
github DrDomAdmin / security-tools / brute_http_basic.py View on Github external
print '[+] Success: {0}/{1}'.format(creds[0], creds[1])
            success_queue.put(creds)
            return


if __name__ == '__main__':
    if len(sys.argv) != 4:
        print 'USAGE: brute_http_basic.py url userfile passfile'
        sys.exit()

    cred_queue = multiprocessing.Queue()
    success_queue = multiprocessing.Queue()
    procs = []

    # Create one thread for each processor.
    for i in range(multiprocessing.cpu_count()):
        p = multiprocessing.Process(target=worker, args=(sys.argv[1],
                                                         cred_queue,
                                                         success_queue))
        procs.append(p)
        p.start()

    for user in open(sys.argv[2]):
        user = user.rstrip('\r\n')
        if user == '':
            continue
        for pwd in open(sys.argv[3]):
            pwd = pwd.rstrip('\r\n')
            cred_queue.put((user, pwd))

    # Wait for all worker processes to finish
    for p in procs:
github CSAILVision / NetDissect-Lite / loader / data_loader.py View on Github external
categories: a list of categories to include in each batch.
        batch_size: number of data items for each batch.
        ahead: the number of data items to prefetch ahead.
        '''
        self.segmentation = segmentation
        self.split = split
        self.randomize = randomize
        self.random = random.Random()
        if randomize is not True:
            self.random.seed(randomize)
        self.categories = categories
        self.once = once
        self.batch_size = batch_size
        self.ahead = ahead
        # Initialize the multiprocessing pool
        n_procs = cpu_count()
        if thread:
            self.pool = ThreadPool(processes=n_procs)
        else:
            original_sigint_handler = setup_sigint()
            self.pool = Pool(processes=n_procs, initializer=setup_sigint)
            restore_sigint(original_sigint_handler)
        # Prefilter the image indexes of interest
        if start is None:
            start = 0
        if end is None:
            end = segmentation.size()
        self.indexes = range(start, end)
        if split:
            self.indexes = [i for i in self.indexes
                    if segmentation.split(i) == split]
        if self.randomize:
github thombashi / pingparsing / pingparsing / cli.py View on Github external
def main():
    options = parse_option()

    initialize_log_handler(options.log_level)

    logger = logbook.Logger("pingparsing cli")
    logger.level = options.log_level
    set_log_level(options.log_level)

    output = {}
    use_stdin, found_stdin_specifier = is_use_stdin()
    if not use_stdin and not found_stdin_specifier:
        from concurrent import futures

        max_workers = (
            multiprocessing.cpu_count() * 2 if options.max_workers is None else options.max_workers
        )
        count, deadline, timeout = get_ping_param(options)
        logger.debug(
            "max-workers={}, count={}, deadline={}, timeout={}".format(
                max_workers, count, deadline, timeout
            )
        )

        try:
            with futures.ProcessPoolExecutor(max_workers) as executor:
                future_list = []
                for dest_or_file in options.destination_or_file:
                    logger.debug("start {}".format(dest_or_file))
                    future_list.append(
                        executor.submit(
                            parse_ping,
github davidsbatista / BREDS / automatic-evaluation / code-examples / evaluate-tiago2.py View on Github external
@timecall
def calculate_d(g_dash, database, a, e1_type, e2_type, index):
    # contains facts described in the corpus that are not in the system output nor in the database
    #
    # by applying the PMI of the facts not in the database (i.e., G' \in D)
    # we determine |G \ D|, then we can estimate |d| = |G \ D| - |a|
    #
    # |G \ D|
    # determine facts not in the database, with high PMI, that is, facts that are true and are not in the database

    manager = multiprocessing.Manager()
    queue = manager.Queue()
    g_minus_d = manager.list()
    num_cpus = multiprocessing.cpu_count()

    print len(g_dash)
    c = 0
    print "Storing g_dash in a shared Queue"
    for r in g_dash:
        c += 1
        if c % 25000 == 0:
            print c
        queue.put(r)

    print "queue size", queue.qsize()

    # calculate PMI for r not in database
    processes = [multiprocessing.Process(target=query_thread, args=(queue, database, g_minus_d, e1_type, e2_type, index)) for i in range(num_cpus)]
    for proc in processes:
        proc.start()
github ManaZeak / ManaZeak / app / collection / library.py View on Github external
if procNumber == 0:
                print("ERROR!")
                return
        splicedMP3 = splitTable(mp3Files, procNumber)
        for mp3 in splicedMP3:
            thread = ImportBulkThread(0, mp3, convert, mp3FileReference, coverPath)
            threads.append(thread)
            thread.start()
    # FLAC file processor
    if len(flacFiles) != 0:
        procNumber = multiprocessing.cpu_count()
        while len(flacFiles) < procNumber:
            procNumber -= 1
            if procNumber == 0:
                return
        splicedFLAC = splitTable(flacFiles, multiprocessing.cpu_count())
        for flac in splicedFLAC:
            thread = ImportBulkThread(1, flac, convert, flacFileReference, coverPath)
            threads.append(thread)
            thread.start()
    # OGG file processor
    if len(oggFiles) != 0:
        procNumber = multiprocessing.cpu_count()
        while len(oggFiles) < procNumber:
            procNumber -= 1
            if procNumber == 0:
                return
        splicedOGG = splitTable(oggFiles, multiprocessing.cpu_count())
        for ogg in splicedOGG:
            thread = ImportBulkThread(1, ogg, convert, oggFileReference, coverPath)
            threads.append(thread)
            thread.start()
github biologyguy / BuddySuite / workshop / MyFuncs.py View on Github external
def usable_cpu_count():
    cpus = cpu_count()
    if cpus > 7:
        max_processes = cpus - 3
    elif cpus > 3:
        max_processes = cpus - 2
    elif cpus > 1:
        max_processes = cpus - 1
    else:
        max_processes = 1

    return max_processes
github TravelModellingGroup / TMGToolbox / TMGToolbox / src / assignment / road / tolled / toll_attribute_transit_background.py View on Github external
def _getTertiarySOLASpec(self, peakHourMatrixId, appliedTollFactor, costAttributeId):
        if self.PerformanceFlag:
            numberOfPocessors = multiprocessing.cpu_count()
        else:
            numberOfPocessors = max(multiprocessing.cpu_count() - 1, 1)
            
        modeId = _util.getScenarioModes(self.Scenario, ['AUTO'])[0][0]
        #Returns a list of tuples. Emme guarantees that there is always
        #one auto mode.

        if self.PerformanceFlag:
            numberOfPocessors = multiprocessing.cpu_count()
        else:
            numberOfPocessors = max(multiprocessing.cpu_count() - 2, 1)

        return {
                "type": "SOLA_TRAFFIC_ASSIGNMENT",
                "classes": [
                    {
                        "mode": modeId,
                        "demand": peakHourMatrixId,
github sashs / Ropper / ropper / service.py View on Github external
mp = False
        nan= 0
        processes = []
        single = False
        cache_file = None
        try:
            temp = RopperService.CACHE_FOLDER
            cache_file = temp + os.path.sep + self.__getCacheFileName(file)

            if not os.path.exists(cache_file):
                if not os.path.exists(cache_file+'_%d' % 1):
                    return
                else:
                    if isWindows():
                        raise RopperError('Cache has to be cleared.')
                    mp = True and multiprocessing.cpu_count()>1
            else:
                single = True
            if self.__callbacks and hasattr(self.__callbacks, '__message__'):
                self.__callbacks.__message__('Load gadgets from cache')
            if self.__callbacks and hasattr(self.__callbacks, '__gadgetSearchProgress__'):
                        self.__callbacks.__gadgetSearchProgress__(None, [], 0)
            if not mp:
                all_gadgets = []
                if single:
                    with open(cache_file,'rb') as f:
                        data = f.read()
                        all_gadgets.extend(eval(decode(data,'zip')))
                        if self.__callbacks and hasattr(self.__callbacks, '__gadgetSearchProgress__'):
                            self.__callbacks.__gadgetSearchProgress__(None, all_gadgets, 1.0)
                else:
                    for i in range(1,RopperService.CACHE_FILE_COUNT+1):