Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def unserialize(cls, data, key, aggregation):
"""Unserialize an aggregated timeserie.
:param data: Raw data buffer.
:param key: A :class:`SplitKey` key.
:param aggregation: The Aggregation object of this timeseries.
"""
x, y = [], []
if data:
if cls.is_compressed(data):
# Compressed format
uncompressed = lz4.block.decompress(
memoryview(data)[1:].tobytes())
nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN
try:
y = numpy.frombuffer(uncompressed, dtype='
def has_lz4():
try:
import lz4
lz4.compress # silence unused import warning
return True
except ImportError:
return False
except AttributeError:
pass
# modern lz4 has "compress" defined in lz4.block
try:
from lz4 import block as lz4block
lz4block.compress # silence unused import warning
return True
except (ImportError, AttributeError):
return False
def extract_nso(path_in, path_out):
with open(path_in, 'rb') as f:
text_off = read_u32(f, 0x10)
text_loc = read_u32(f, 0x14)
text_size = read_u32(f, 0x18)
text_compressed_size = read_u32(f, 0x60)
test_data = b'hello'
lz4.block.decompress(lz4.block.compress(test_data))
print('Text offset: {}'.format(text_off))
print('Text compressed size: {}'.format(text_compressed_size))
print('Text uncompressed size: {}'.format(text_size))
compressed_patched_text = read_at(f, text_off, text_compressed_size)
print(hx(compressed_patched_text)[0:10])
text = lz4.block.decompress(compressed_patched_text, uncompressed_size=text_size)
decompressed_hash = read_at(f, 0xA0, 0x20)
calculated_hash = sha256(text)
print('Compressed size: {}'.format(text_compressed_size))
print('Decompressed hash: {}'.format(hx(decompressed_hash)))
print('Calculated hash: {}'.format(hx(calculated_hash)))
if decompressed_hash == calculated_hash:
def __init__( self, data, size, depth, compressed = True ):
if not LZ4_OK:
compressed = False
self._compressed = compressed
if self._compressed:
self._data = lz4.block.compress( data )
else:
self._data = data
self._size = size
self._depth = depth
def DecompressChunkData(chunk_data, data_len):
'''Decompress an individual compressed chunk (tag=0x600D)'''
uncompressed = b''
if chunk_data[0:4] in [b'bv41', b'bv4-']:
last_uncompressed = b''
comp_start = 0 # bv** offset
comp_header = chunk_data[comp_start:comp_start + 4]
while (data_len > comp_start) and (comp_header != b'bv4$'):
if comp_header == b'bv41':
uncompressed_size, compressed_size = struct.unpack('
def _deserialize_array(column: Mapping) -> np.ndarray:
"""
Takes raw binary compressed/serialized retrieved from MongoDB
and decompresses/deserializes it, returning the original Numpy array
:param column: Input column
:return: Numpy array
"""
data = lz4.block.decompress(column['blob'])
if column['dtype'] == 'object':
return np.array([i.decode('utf-8') for i in msgpack.loads(data)])
else:
return np.load(io.BytesIO(data))
assert data_segment['p_vaddr'] == dot
fp.write(struct.pack('
def _read_one_frame_16bit_linear(self, frame_index, resize_max_side):
if frame_index<0 or frame_index>=self._frame_count:
raise Exception('Invalid frame index %s' % frame_index)
if self._raise_error_on_missing_frame and not self._frame_indices[frame_index]:
raise Exception('Missing frame index %s' % frame_index)
compressed_buffer = self._read_frame(frame_index)
buffer = lz4block.decompress(compressed_buffer, uncompressed_size=self._img_data_size)
raw_img = np.fromstring(buffer, np.uint8 if self.bitcount==8 else np.uint16).reshape((self.height,self.width))
return raw_processing_to_16bit_linear(raw_img, self.bayer, self.blacklevel, self.bitcount, self.kB, self.kG, self.kR, resize_max_side=resize_max_side)
try:
if compressed_block.block_type & 0x1000 == 0x1000: # LZ4 compression
if block_data[20:24] in [b'bv41', b'bv4-']:
# check for bv41, version 97 in High Sierra has this header (bv41) and footer (bv4$)
# There are often multiple chunks bv41.....bv41.....bv41.....bv4$
# Sometimes bv4- (uncompressed data) followed by 4 bytes length, then data
chunk_start = 20 # bv41 offset
uncompressed = b''
last_uncompressed = b''
header = block_data[chunk_start:chunk_start + 4]
while (self.block_size > chunk_start) and (header != b'bv4$'): # b'bv41':
log.debug("0x{:X} - {}".format(chunk_start, header))
if header == b'bv41':
uncompressed_size, compressed_size = struct.unpack('
def decompressLZ4(self, file):
lz4_headers = [ b"mozLz40\0", b"mozLz40p\0", b"mozLz40o\0"]
for header in lz4_headers:
value = file.read(len(header))
if value == header:
return lz4.block.decompress(file.read())
file.seek(0)
return None