How to use the prism-media.opus function in prism-media

To help you get started, we’ve selected a few prism-media examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github amishshah / ytdl-core-discord / index.js View on Github external
ytdl.getInfo(url, (err, info) => {
			if (err) return reject(err);
			// Prefer opus
			const format = info.formats.find(filter);
			const canDemux = format && info.length_seconds != 0;
			if (canDemux) options = { ...options, filter };
			else if (info.length_seconds != 0) options = { ...options, filter: 'audioonly' };
			if (canDemux) {
				const demuxer = new prism.opus.WebmDemuxer();
				return resolve(ytdl.downloadFromInfo(info, options).pipe(demuxer).on('end', () => demuxer.destroy()));
			} else {
				const transcoder = new prism.FFmpeg({
					args: [
						'-reconnect', '1',
						'-reconnect_streamed', '1',
						'-reconnect_delay_max', '5',
						'-i', nextBestFormat(info.formats).url,
						'-analyzeduration', '0',
						'-loglevel', '0',
						'-f', 's16le',
						'-ar', '48000',
						'-ac', '2',
					],
				});
				const opus = new prism.opus.Encoder({ rate: 48000, channels: 2, frameSize: 960 });
github nullabork / talkbot / src / services / tts / AmazonTextToSpeechAPI.js View on Github external
var resample = new samplerate({
          // Value can be from 0 to 4 or using enum. 0 is the best quality and the slowest.
          type: samplerate.SRC_SINC_MEDIUM_QUALITY,
          // Stereo
          channels: 1,
          // Sample rate of source
          fromRate: 22050,
          // bit depth of source. Valid values: 16 or 32
          fromDepth: 16,
          // Desired sample rate
          toRate: 48000,
          // Desired bit depth. Valid values: 16 or 32
          toDepth: 16
        });

        callback(null, audioStream.pipe(ld).pipe(resample).pipe(new prism.opus.Encoder({rate: 48000, channels: 1, frameSize: 960 })));
      }
      catch(ex)
      {
        callback(ex, null);
      }
    });
  }
github nullabork / talkbot / src / services / tts / AzureTextToSpeechAPI.js View on Github external
var resample = new samplerate({
            // Value can be from 0 to 4 or using enum. 0 is the best quality and the slowest.
            type: samplerate.SRC_SINC_MEDIUM_QUALITY,
            // Stereo
            channels: 1,
            // Sample rate of source
            fromRate: 24000,
            // bit depth of source. Valid values: 16 or 32
            fromDepth: 16,
            // Desired sample rate
            toRate: 48000,
            // Desired bit depth. Valid values: 16 or 32
            toDepth: 16
          });

          callback(null, p.pipe(ld).pipe(resample).pipe(new prism.opus.Encoder({rate: 48000, channels: 1, frameSize: 960 })));
        }
        else {
          Common.error(response);
          callback(new Error('HTTP ERROR: ' + response.statusCode));
        }
      })
      .on('error', err => callback(err));
github nullabork / talkbot / bin / create-samples.js View on Github external
provider.getAudioContent(request, (err, audio) => {

                if ( err ) {
                    console.log(err);
                    return;
                }

                let decoder = new prism.opus.Decoder(48000, 1, 960);
                let encoder = new lame.Encoder({
                    channels: 1,
                    bitDepth: 16,
                    sampleRate: 48000,

                    bitRate: 128,
                    outSampleRate: 48000,
                    mode: lame.STEREO
                });
                let stm = fs.createWriteStream(sample_file);

                audio.pipe(decoder).pipe(encoder).pipe(stm);
            });
github discordjs / discord.js / src / client / voice / util / PlayInterface.js View on Github external
return this.player.playBroadcast(resource, options);
    }
    if (resource instanceof Readable || typeof resource === 'string') {
      const type = options.type || 'unknown';
      if (type === 'unknown') {
        return this.player.playUnknown(resource, options);
      } else if (type === 'converted') {
        return this.player.playPCMStream(resource, options);
      } else if (type === 'opus') {
        return this.player.playOpusStream(resource, options);
      } else if (type === 'ogg/opus') {
        if (!(resource instanceof Readable)) throw new Error('VOICE_PRISM_DEMUXERS_NEED_STREAM');
        return this.player.playOpusStream(resource.pipe(new prism.opus.OggDemuxer()), options);
      } else if (type === 'webm/opus') {
        if (!(resource instanceof Readable)) throw new Error('VOICE_PRISM_DEMUXERS_NEED_STREAM');
        return this.player.playOpusStream(resource.pipe(new prism.opus.WebmDemuxer()), options);
      }
    }
    throw new Error('VOICE_PLAY_INTERFACE_BAD_TYPE');
  }
github discordjs / discord.js / src / client / voice / player / BasePlayer.js View on Github external
playOpusStream(stream, options, streams = {}) {
    this.destroyDispatcher();
    streams.opus = stream;
    if (options.volume !== false && !streams.input) {
      streams.input = stream;
      const decoder = new prism.opus.Decoder({ channels: 2, rate: 48000, frameSize: 960 });
      streams.volume = new prism.VolumeTransformer({ type: 's16le', volume: options ? options.volume : 1 });
      streams.opus = stream
        .pipe(decoder)
        .pipe(streams.volume)
        .pipe(new prism.opus.Encoder({ channels: 2, rate: 48000, frameSize: 960 }));
    }
    const dispatcher = this.createDispatcher(options, streams);
    streams.opus.pipe(dispatcher);
    return dispatcher;
  }
github nullabork / talkbot / src / services / tts / GoogleTextToSpeechAPI.js View on Github external
return client.synthesizeSpeech(request, (err, response) => {
      if ( !response ) return callback(err,null);

      var stm = new streamifier.createReadStream(response.audioContent);
      callback(err, stm.pipe(new prism.opus.OggDemuxer()));
    });
  }
github discordjs / discord.js / src / client / voice / player / BasePlayer.js View on Github external
playOpusStream(stream, options, streams = {}) {
    this.destroyDispatcher();
    streams.opus = stream;
    if (options.volume !== false && !streams.input) {
      streams.input = stream;
      const decoder = new prism.opus.Decoder({ channels: 2, rate: 48000, frameSize: 960 });
      streams.volume = new prism.VolumeTransformer({ type: 's16le', volume: options ? options.volume : 1 });
      streams.opus = stream
        .pipe(decoder)
        .pipe(streams.volume)
        .pipe(new prism.opus.Encoder({ channels: 2, rate: 48000, frameSize: 960 }));
    }
    const dispatcher = this.createDispatcher(options, streams);
    streams.opus.pipe(dispatcher);
    return dispatcher;
  }
github discordjs / discord.js / src / client / voice / player / BasePlayer.js View on Github external
playPCMStream(stream, options, streams = {}) {
    this.destroyDispatcher();
    const opus = streams.opus = new prism.opus.Encoder({ channels: 2, rate: 48000, frameSize: 960 });
    if (options && options.volume === false) {
      stream.pipe(opus);
      return this.playOpusStream(opus, options, streams);
    }
    streams.volume = new prism.VolumeTransformer({ type: 's16le', volume: options ? options.volume : 1 });
    stream.pipe(streams.volume).pipe(opus);
    return this.playOpusStream(opus, options, streams);
  }

prism-media

Easy-to-use stream-based media transcoding

Apache-2.0
Latest version published 2 years ago

Package Health Score

53 / 100
Full package analysis