Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
const dataAtom = await this.tokenizer.readToken(new AtomToken.DataAtom(metaAtom.header.length - AtomToken.Header.len));
if (dataAtom.type.set !== 0) {
throw new Error('Unsupported type-set != 0: ' + dataAtom.type.set);
}
// Use well-known-type table
// Ref: https://developer.apple.com/library/content/documentation/QuickTime/QTFF/Metadata/Metadata.html#//apple_ref/doc/uid/TP40000939-CH1-SW35
switch (dataAtom.type.type) {
case 0: // reserved: Reserved for use where no type needs to be indicated
switch (tagKey) {
case 'trkn':
case 'disk':
const num = Token.UINT8.get(dataAtom.value, 3);
const of = Token.UINT8.get(dataAtom.value, 5);
// console.log(" %s[data] = %s/%s", tagKey, num, of);
this.addTag(tagKey, num + '/' + of);
break;
case 'gnre':
const genreInt = Token.UINT8.get(dataAtom.value, 1);
const genreStr = Genres[genreInt - 1];
// console.log(" %s[data] = %s", tagKey, genreStr);
this.addTag(tagKey, genreStr);
break;
default:
// console.log(" reserved-data: name=%s, len=%s, set=%s, type=%s, locale=%s, value{ hex=%s, ascii=%s }",
// header.name, header.length, dataAtom.type.set, dataAtom.type.type, dataAtom.locale, dataAtom.value.toString('hex'), dataAtom.value.toString('ascii'));
}
break;
if (zipHeader.filename.startsWith('xl/')) {
return {
ext: 'xlsx',
mime: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
};
}
// The docx, xlsx and pptx file types extend the Office Open XML file format:
// https://en.wikipedia.org/wiki/Office_Open_XML_file_formats
// We look for:
// - one entry named '[Content_Types].xml' or '_rels/.rels',
// - one entry indicating specific type of file.
// MS Office, OpenOffice and LibreOffice may put the parts in different order, so the check should not rely on it.
if (zipHeader.filename === 'mimetype' && zipHeader.compressedSize === zipHeader.uncompressedSize) {
const mimeType = await tokenizer.readToken(new Token.StringType(zipHeader.compressedSize, 'utf-8'));
switch (mimeType) {
case 'application/epub+zip':
return {
ext: 'epub',
mime: 'application/epub+zip'
};
case 'application/vnd.oasis.opendocument.text':
return {
ext: 'odt',
mime: 'application/vnd.oasis.opendocument.text'
};
case 'application/vnd.oasis.opendocument.spreadsheet':
return {
ext: 'ods',
mime: 'application/vnd.oasis.opendocument.spreadsheet'
return this.tokenizer.readToken(Token.UINT32_LE).then((strLen) => {
return this.tokenizer.readToken(new Token.StringType(strLen, 'ascii')).then((v) => {
const idx = v.indexOf('=');
const key = v.slice(0, idx).toUpperCase();
let value: any = v.slice(idx + 1);
if (key === 'METADATA_BLOCK_PICTURE') {
value = this.options.skipCovers ? null : VorbisPictureToken.fromBase64(value);
}
if (value !== null)
this.tags.push({id: key, value});
const len = Token.UINT32_LE.len + strLen;
if (--userCommentListLength > 0) {
// if we don't want to read the duration
// then tell the parent stream to stop
// stop = !readDuration;
get: (buf, off) => {
return {
// should equal 'APETAGEX'
ID: new Token.StringType(8, 'ascii').get(buf, off),
// equals CURRENT_APE_TAG_VERSION
version: Token.UINT32_LE.get(buf, off + 8),
// the complete size of the tag, including this footer (excludes header)
size: Token.UINT32_LE.get(buf, off + 12),
// the number of fields in the tag
fields: Token.UINT32_LE.get(buf, off + 16),
// reserved for later use (must be zero),
flags: parseTagFlags(Token.UINT32_LE.get(buf, off + 20))
};
}
};
let subFormat = WaveChunk.WaveFormat[fmt.wFormatTag];
if (!subFormat) {
debug('WAVE/non-PCM format=' + fmt.wFormatTag);
subFormat = 'non-PCM (' + fmt.wFormatTag + ')';
}
this.metadata.setFormat('codec', subFormat);
this.metadata.setFormat('bitsPerSample', fmt.wBitsPerSample);
this.metadata.setFormat('sampleRate', fmt.nSamplesPerSec);
this.metadata.setFormat('numberOfChannels', fmt.nChannels);
this.metadata.setFormat('bitrate', fmt.nBlockAlign * fmt.nSamplesPerSec * 8);
this.blockAlign = fmt.nBlockAlign;
break;
case 'id3 ': // The way Picard, FooBar currently stores, ID3 meta-data
case 'ID3 ': // The way Mp3Tags stores ID3 meta-data
const id3_data = await this.tokenizer.readToken(new Token.BufferType(header.chunkSize));
const id3stream = new ID3Stream(id3_data);
const rst = strtok3.fromStream(id3stream);
await new ID3v2Parser().parse(this.metadata, rst, this.options);
break;
case 'data': // PCM-data
if (this.metadata.format.lossless !== false) {
this.metadata.setFormat('lossless', true);
}
const numberOfSamples = this.fact ? this.fact.dwSampleLength : (header.chunkSize / this.blockAlign);
this.metadata.setFormat('numberOfSamples', numberOfSamples);
this.metadata.setFormat('duration', numberOfSamples / this.metadata.format.sampleRate);
this.metadata.setFormat('bitrate', this.metadata.format.numberOfChannels * this.blockAlign * this.metadata.format.sampleRate); // ToDo: check me
await this.tokenizer.ignore(header.chunkSize);
break;
// Sample rate in Hz. Though 20 bits are available,
// the maximum sample rate is limited by the structure of frame headers to 655350Hz.
// Also, a value of 0 is invalid.
sampleRate: Token.UINT24_BE.get(buf, off + 10) >> 4,
// probably slower: sampleRate: common.getBitAllignedNumber(buf, off + 10, 0, 20),
// (number of channels)-1. FLAC supports from 1 to 8 channels
channels: common.getBitAllignedNumber(buf, off + 12, 4, 3) + 1,
// bits per sample)-1.
// FLAC supports from 4 to 32 bits per sample. Currently the reference encoder and decoders only support up to 24 bits per sample.
bitsPerSample: common.getBitAllignedNumber(buf, off + 12, 7, 5) + 1,
// Total samples in stream.
// 'Samples' means inter-channel sample, i.e. one second of 44.1Khz audio will have 44100 samples regardless of the number of channels.
// A value of zero here means the number of total samples is unknown.
totalSamples: common.getBitAllignedNumber(buf, off + 13, 4, 36),
// the MD5 hash of the file (see notes for usage... it's a littly tricky)
fileMD5: new Token.BufferType(16).get(buf, off + 18)
};
}
};
const p0 = this.tokenizer.position;
switch (header.chunkID.trim()) {
case 'FVER': // 3.1 FORMAT VERSION CHUNK
const version = await this.tokenizer.readToken(Token.UINT32_LE);
debug(`DSDIFF version=${version}`);
break;
case 'PROP': // 3.2 PROPERTY CHUNK
const propType = await this.tokenizer.readToken(FourCcToken);
assert.strictEqual(propType, 'SND ');
await this.handleSoundPropertyChunks(header.chunkSize - FourCcToken.len);
break;
case 'ID3': // Unofficial ID3 tag support
const id3_data = await this.tokenizer.readToken(new Token.BufferType(header.chunkSize));
const id3stream = new ID3Stream(id3_data);
const rst = strtok3.fromStream(id3stream);
await new ID3v2Parser().parse(this.metadata, rst, this.options);
break;
default:
debug(`Ignore chunk[ID=${header.chunkID}, size=${header.chunkSize}]`);
break;
case 'DSD':
this.metadata.setFormat('numberOfSamples', header.chunkSize * 8 / this.metadata.format.numberOfChannels);
this.metadata.setFormat('duration', this.metadata.format.numberOfSamples / this.metadata.format.sampleRate);
break;
}
const remaining = header.chunkSize - (this.tokenizer.position - p0);
debug('pos=%s, parsePage()', this.tokenizer.position);
try {
let header: Ogg.IPageHeader;
do {
header = await this.tokenizer.readToken(OggParser.Header);
assert.strictEqual(header.capturePattern, 'OggS', 'Ogg capture pattern');
this.metadata.setFormat('container', 'Ogg');
this.header = header;
this.pageNumber = header.pageSequenceNo;
debug('page#=%s, Ogg.id=%s', header.pageSequenceNo, header.capturePattern);
const segmentTable = await this.tokenizer.readToken(new SegmentTable(header));
debug('totalPageSize=%s', segmentTable.totalPageSize);
const pageData = await this.tokenizer.readToken(new Token.BufferType(segmentTable.totalPageSize));
debug('firstPage=%s, lastPage=%s, continued=%s', header.headerType.firstPage, header.headerType.lastPage, header.headerType.continued);
if (header.headerType.firstPage) {
const id = new Token.StringType(7, 'ascii').get(pageData, 0);
switch (id) {
case 'vorbis': // Ogg/Vorbis
debug('Set page consumer to Ogg/Vorbis');
this.pageConsumer = new VorbisParser(this.metadata, this.options);
break;
case 'OpusHea': // Ogg/Opus
debug('Set page consumer to Ogg/Opus');
this.pageConsumer = new OpusParser(this.metadata, this.options, this.tokenizer);
break;
case 'Speex ': // Ogg/Speex
debug('Set page consumer to Ogg/Speex');
this.pageConsumer = new SpeexParser(this.metadata, this.options, this.tokenizer);
break;
// the number of descriptor bytes (allows later expansion of this header)
descriptorBytes: Token.UINT32_LE.get(buf, off + 8),
// the number of header APE_HEADER bytes
headerBytes: Token.UINT32_LE.get(buf, off + 12),
// the number of header APE_HEADER bytes
seekTableBytes: Token.UINT32_LE.get(buf, off + 16),
// the number of header data bytes (from original file)
headerDataBytes: Token.UINT32_LE.get(buf, off + 20),
// the number of bytes of APE frame data
apeFrameDataBytes: Token.UINT32_LE.get(buf, off + 24),
// the high order number of APE frame data bytes
apeFrameDataBytesHigh: Token.UINT32_LE.get(buf, off + 28),
// the terminating data of the file (not including tag data)
terminatingDataBytes: Token.UINT32_LE.get(buf, off + 32),
// the MD5 hash of the file (see notes for usage... it's a littly tricky)
fileMD5: new Token.BufferType(16).get(buf, off + 36)
};
}
};
// Sample rate in Hz. Though 20 bits are available,
// the maximum sample rate is limited by the structure of frame headers to 655350Hz.
// Also, a value of 0 is invalid.
sampleRate: Token.UINT24_BE.get(buf, off + 10) >> 4,
// probably slower: sampleRate: common.getBitAllignedNumber(buf, off + 10, 0, 20),
// (number of channels)-1. FLAC supports from 1 to 8 channels
channels: common.getBitAllignedNumber(buf, off + 12, 4, 3) + 1,
// bits per sample)-1.
// FLAC supports from 4 to 32 bits per sample. Currently the reference encoder and decoders only support up to 24 bits per sample.
bitsPerSample: common.getBitAllignedNumber(buf, off + 12, 7, 5) + 1,
// Total samples in stream.
// 'Samples' means inter-channel sample, i.e. one second of 44.1Khz audio will have 44100 samples regardless of the number of channels.
// A value of zero here means the number of total samples is unknown.
totalSamples: common.getBitAllignedNumber(buf, off + 13, 4, 36),
// the MD5 hash of the file (see notes for usage... it's a littly tricky)
fileMD5: new Token.BufferType(16).get(buf, off + 18)
};
}
};