Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print("Compressors available: %s" % blosc.cnames)
print("Compressor library versions:")
for clib in sorted(blosc.clib_versions.keys()):
print(" %s: %s" % (clib, blosc.clib_versions[clib]))
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
distro = os_release_pretty_name()
if distro:
print("Linux dist:", distro)
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("Detected cores: %s" % blosc.ncores)
print("Number of threads to use by default: %s" % blosc.nthreads)
print("-=" * 38)
def process_nthread_arg(args):
""" Extract and set nthreads. """
if args.nthreads != blosc.ncores:
blosc.set_nthreads(args.nthreads)
print_verbose('using %d thread%s' %
(args.nthreads, 's' if args.nthreads > 1 else ''))
action='store_true',
default=False,
help='disable overwrite checks for existing files\n' +
'(use with caution)')
class CheckThreadOption(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
if not 1 <= value <= blosc.BLOSC_MAX_THREADS:
log.error('%s must be 1 <= n <= %d'
% (option_string, blosc.BLOSC_MAX_THREADS))
setattr(namespace, self.dest, value)
global_group.add_argument('-n', '--nthreads',
metavar='[1, %d]' % blosc.BLOSC_MAX_THREADS,
action=CheckThreadOption,
default=blosc.ncores,
type=int,
dest='nthreads',
help='set number of threads, ' +
'(default: %(default)s (ncores))')
subparsers = parser.add_subparsers(title='subcommands',
metavar='',
dest='subcommand')
compress_parser = subparsers.add_parser('compress',
formatter_class=BloscPackCustomFormatter,
help='perform compression on file')
c_parser = subparsers.add_parser('c',
formatter_class=BloscPackCustomFormatter,
help="alias for 'compress'")
class CheckChunkSizeOption(argparse.Action):
(np.random.random_integers(0, 1000, N), "the random distribution")
)
in_ = arrays[0][0]
# cause page faults here
out_ = np.full(in_.size, fill_value=0, dtype=in_.dtype)
t0 = time.time()
#out_ = np.copy(in_)
out_ = ctypes.memmove(out_.__array_interface__['data'][0],
in_.__array_interface__['data'][0], N*8)
tcpy = time.time() - t0
print(" *** ctypes.memmove() *** Time for memcpy():\t%.3f s\t(%.2f GB/s)" % (
tcpy, (N*8 / tcpy) / 2**30))
print("\nTimes for compressing/decompressing with clevel=%d and %d threads" % (
clevel, blosc.ncores))
for (in_, label) in arrays:
print("\n*** %s ***" % label)
for cname in blosc.compressor_list():
for filter in [blosc.NOSHUFFLE, blosc.SHUFFLE, blosc.BITSHUFFLE]:
t0 = time.time()
c = blosc.compress_ptr(in_.__array_interface__['data'][0],
in_.size, in_.dtype.itemsize,
clevel=clevel, shuffle=filter, cname=cname)
tc = time.time() - t0
# cause page faults here
out = np.full(in_.size, fill_value=0, dtype=in_.dtype)
t0 = time.time()
blosc.decompress_ptr(c, out.__array_interface__['data'][0])
td = time.time() - t0
assert((in_ == out).all())
print(" *** %-8s, %-10s *** %6.3f s (%.2f GB/s) / %5.3f s (%.2f GB/s)" % (