Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
get: (buf, off) => {
return {
// should equal 'MAC '
ID: FourCcToken.get(buf, off),
// versionIndex number * 1000 (3.81 = 3810) (remember that 4-byte alignment causes this to take 4-bytes)
version: Token.UINT32_LE.get(buf, off + 4) / 1000,
// the number of descriptor bytes (allows later expansion of this header)
descriptorBytes: Token.UINT32_LE.get(buf, off + 8),
// the number of header APE_HEADER bytes
headerBytes: Token.UINT32_LE.get(buf, off + 12),
// the number of header APE_HEADER bytes
seekTableBytes: Token.UINT32_LE.get(buf, off + 16),
// the number of header data bytes (from original file)
headerDataBytes: Token.UINT32_LE.get(buf, off + 20),
// the number of bytes of APE frame data
apeFrameDataBytes: Token.UINT32_LE.get(buf, off + 24),
// the high order number of APE frame data bytes
apeFrameDataBytesHigh: Token.UINT32_LE.get(buf, off + 28),
// the terminating data of the file (not including tag data)
terminatingDataBytes: Token.UINT32_LE.get(buf, off + 32),
// the MD5 hash of the file (see notes for usage... it's a littly tricky)
fileMD5: new Token.BufferType(16).get(buf, off + 36)
};
}
};
protected parseUserCommentList(pageData: Buffer, offset: number) {
const strLen = Token.UINT32_LE.get(pageData, offset);
offset += 4;
// const vendorString = new Token.StringType(strLen, 'utf-8').get(pageData, offset);
offset += strLen;
let userCommentListLength = Token.UINT32_LE.get(pageData, offset);
offset += 4;
while (userCommentListLength-- > 0) {
offset += this.parseUserComment(pageData, offset);
}
}
}
private parseUserComment(userCommentListLength: number): Promise {
return this.tokenizer.readToken(Token.UINT32_LE).then((strLen) => {
return this.tokenizer.readToken(new Token.StringType(strLen, 'ascii')).then((v) => {
const idx = v.indexOf('=');
const key = v.slice(0, idx).toUpperCase();
let value: any = v.slice(idx + 1);
if (key === 'METADATA_BLOCK_PICTURE') {
value = this.options.skipCovers ? null : VorbisPictureToken.fromBase64(value);
}
if (value !== null)
this.tags.push({id: key, value});
const len = Token.UINT32_LE.len + strLen;
if (--userCommentListLength > 0) {
// if we don't want to read the duration
// then tell the parent stream to stop
get: (buf, off) => {
return {
// should equal 'MAC '
ID: FourCcToken.get(buf, off),
// versionIndex number * 1000 (3.81 = 3810) (remember that 4-byte alignment causes this to take 4-bytes)
version: Token.UINT32_LE.get(buf, off + 4) / 1000,
// the number of descriptor bytes (allows later expansion of this header)
descriptorBytes: Token.UINT32_LE.get(buf, off + 8),
// the number of header APE_HEADER bytes
headerBytes: Token.UINT32_LE.get(buf, off + 12),
// the number of header APE_HEADER bytes
seekTableBytes: Token.UINT32_LE.get(buf, off + 16),
// the number of header data bytes (from original file)
headerDataBytes: Token.UINT32_LE.get(buf, off + 20),
// the number of bytes of APE frame data
apeFrameDataBytes: Token.UINT32_LE.get(buf, off + 24),
// the high order number of APE frame data bytes
apeFrameDataBytesHigh: Token.UINT32_LE.get(buf, off + 28),
// the terminating data of the file (not including tag data)
terminatingDataBytes: Token.UINT32_LE.get(buf, off + 32),
// the MD5 hash of the file (see notes for usage... it's a littly tricky)
fileMD5: new Token.BufferType(16).get(buf, off + 36)
get: (buf, off) => {
const flags = Token.UINT32_LE.get(buf, off + 24);
const res = {
// should equal 'wvpk'
BlockID: FourCcToken.get(buf, off),
// 0x402 to 0x410 are valid for decode
blockSize: Token.UINT32_LE.get(buf, off + 4),
// 0x402 (1026) to 0x410 are valid for decode
version: Token.UINT16_LE.get(buf, off + 8),
// 40-bit total samples for entire file (if block_index == 0 and a value of -1 indicates an unknown length)
totalSamples: /* replace with bigint? (Token.UINT8.get(buf, off + 11) << 32) + */ Token.UINT32_LE.get(buf, off + 12),
// 40-bit block_index
blockIndex: /* replace with bigint? (Token.UINT8.get(buf, off + 10) << 32) + */ Token.UINT32_LE.get(buf, off + 16),
// 40-bit total samples for entire file (if block_index == 0 and a value of -1 indicates an unknown length)
blockSamples: Token.UINT32_LE.get(buf, off + 20),
// various flags for id and decoding
flags: {
bitsPerSample: (1 + WavPack.getBitAllignedNumber(flags, 0, 2)) * 8,
isMono: WavPack.isBitSet(flags, 2),
isHybrid: WavPack.isBitSet(flags, 3),
isJointStereo: WavPack.isBitSet(flags, 4),
crossChannel: WavPack.isBitSet(flags, 5),
hybridNoiseShaping: WavPack.isBitSet(flags, 6),
floatingPoint: WavPack.isBitSet(flags, 7),
samplingRate: SampleRates[WavPack.getBitAllignedNumber(flags, 23, 4)],
isDSD: WavPack.isBitSet(flags, 31)
},
return this.tokenizer.readToken(new Token.StringType(strLen, 'ascii')).then((v) => {
const idx = v.indexOf('=');
const key = v.slice(0, idx).toUpperCase();
let value: any = v.slice(idx + 1);
if (key === 'METADATA_BLOCK_PICTURE') {
value = this.options.skipCovers ? null : VorbisPictureToken.fromBase64(value);
}
if (value !== null)
this.tags.push({id: key, value});
const len = Token.UINT32_LE.len + strLen;
if (--userCommentListLength > 0) {
// if we don't want to read the duration
// then tell the parent stream to stop
// stop = !readDuration;
return this.parseUserComment(userCommentListLength).then((recLen) => {
return len + recLen;
});
}
return strLen;
});
});
get: (buf, off): Ogg.IPageHeader => {
return {
capturePattern: FourCcToken.get(buf, off),
version: buf.readUInt8(off + 4),
headerType: {
continued: common.strtokBITSET.get(buf, off + 5, 0),
firstPage: common.strtokBITSET.get(buf, off + 5, 1),
lastPage: common.strtokBITSET.get(buf, off + 5, 2)
},
// packet_flag: buf.readUInt8(off + 5),
absoluteGranulePosition: buf.readIntLE(off + 6, 6), // cannot read 2 of 8 most significant bytes
streamSerialNumber: Token.UINT32_LE.get(buf, off + 14),
pageSequenceNo: Token.UINT32_LE.get(buf, off + 18),
pageChecksum: Token.UINT32_LE.get(buf, off + 22),
page_segments: buf.readUInt8(off + 26)
};
}
};
return this.tokenizer.readToken(new Token.StringType(strLen, 'utf-8')).then((vendorString: string) => {
return this.tokenizer.readToken(Token.UINT32_LE).then((userCommentListLength) => {
return this.parseUserComment(userCommentListLength).then((len) => {
return 2 * Token.UINT32_LE.len + strLen + len;
});
});
});
});
public readInt32(): number {
const value = Token.UINT32_LE.get(this.data, this.offset);
this.offset += 4;
return value;
}
get: (buf, off) => {
return {
crc: Token.UINT32_LE.get(buf, off),
streamVersion: Token.UINT8.get(buf, off + 4)
};
}
};