Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
// Imports the Google Cloud client library
const speech = require('@google-cloud/speech');
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
const request = {
config,
interimResults: false, //Get interim results from stream
};
// Creates a client
const client = new speech.SpeechClient();
// Create a recognize stream
const recognizeStream = client
.streamingRecognize(request)
.on('error', console.error)
.on('data', data =>
process.stdout.write(
data.results[0] && data.results[0].alternatives[0]
? `Transcription: ${data.results[0].alternatives[0].transcript}\n`
: `\n\nReached transcription time limit, press Ctrl+C\n`
)
);
// Start recording and send the microphone input to the Speech API
recorder
.record({
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
const request = {
config,
interimResults: true, //Get interim results from stream
};
const STREAMING_LIMIT = 55000;
// Create a client
const client = new speech.SpeechClient();
function startStream() {
// Initiate (Reinitiate) a recognize stream
const recognizeStream = client
.streamingRecognize(request)
.on('error', console.error)
.on('data', data => {
process.stdout.clearLine();
process.stdout.cursorTo(0);
process.stdout.write(data.results[0].alternatives[0].transcript);
if (data.results[0].isFinal) process.stdout.write('\n');
});
// Start recording and send the microphone input to the Speech API
record
.start({
getDetector(siteId) {
// Google Speech Client
const client = new speech.SpeechClient();
const encoding = 'LINEAR16';
const sampleRateHertz = 16000;
const languageCode = 'en-AU';
const request = {
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
},
interimResults: false, // If you want interim results, set this to true
};
// Stream the audio to the Google Cloud Speech API
var detector = client
.streamingRecognize(request)
.on('error', console.log)
let type = config.intype;
if (type === 'url') parameters.audio.uri = input;
else if (type === 'content') parameters.audio.content = input;
else {
try {
let buffer = fs.readFileSync(input);
parameters.audio.content = buffer.toString('base64');
}
catch(ex) {
console.log(ex);
helper.setByString(data, config.output || "payload", { error: ex });
return node.send(data);
}
}
const speech = require('@google-cloud/speech');
let client = new speech.v1.SpeechClient({credentials: node.auth.cred});
client.recognize(parameters).then((results) => {
let alternatives = (results[0] && results[0].results && results[0].results[0] && results[0].results[0].alternatives) ? results[0].results[0].alternatives : [];
helper.setByString(data, config.output || 'payload', { alternatives: alternatives });
node.send(data);
}).catch((err) => {
console.log(err);
helper.setByString(data, config.output || 'payload', { error: err });
node.send(data);
});
}
// TTS
else {
gcsUri,
model,
encoding,
sampleRateHertz,
languageCode
) {
// [START speech_transcribe_model_selection_gcs]
// Imports the Google Cloud client library for Beta API
/**
* TODO(developer): Update client library import to use new
* version of API when desired features become available
*/
const speech = require('@google-cloud/speech').v1p1beta1;
// Creates a client
const client = new speech.SpeechClient();
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const gcsUri = 'gs://my-bucket/audio.raw';
// const model = 'Model to use, e.g. phone_call, video, default';
// const encoding = 'Encoding of the audio file, e.g. LINEAR16';
// const sampleRateHertz = 16000;
// const languageCode = 'BCP-47 language code, e.g. en-US';
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
model: model,
};
async function syncRecognizeWithMultiChannel(fileName) {
// [START speech_transcribe_multichannel]
const fs = require('fs');
// Imports the Google Cloud client library
const speech = require('@google-cloud/speech').v1;
// Creates a client
const client = new speech.SpeechClient();
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const fileName = 'Local path to audio file, e.g. /path/to/audio.raw';
const config = {
encoding: `LINEAR16`,
languageCode: `en-US`,
audioChannelCount: 2,
enableSeparateRecognitionPerChannel: true,
};
const audio = {
content: fs.readFileSync(fileName).toString('base64'),
};
async function syncRecognizeGCS(
gcsUri,
encoding,
sampleRateHertz,
languageCode
) {
// [START speech_transcribe_sync_gcs]
// Imports the Google Cloud client library
const speech = require('@google-cloud/speech');
// Creates a client
const client = new speech.SpeechClient();
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
// const gcsUri = 'gs://my-bucket/audio.raw';
// const encoding = 'Encoding of the audio file, e.g. LINEAR16';
// const sampleRateHertz = 16000;
// const languageCode = 'BCP-47 language code, e.g. en-US';
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
};
const audio = {
uri: gcsUri,
function sampleRecognize(sampleRateHertz, languageCode, phrase, uriPath) {
const client = new speech.SpeechClient();
// const sampleRateHertz = 24000;
// const languageCode = 'en-US';
// const phrase = '$TIME';
// const uriPath = 'gs://cloud-samples-data/speech/time.mp3';
const encoding = 'MP3';
const phrases = [phrase];
const speechContextsElement = {
phrases: phrases,
};
const speechContexts = [speechContextsElement];
const config = {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
speechContexts: speechContexts,
};
// @ts-check
const Discord = require('discord.js')
const fs = require('fs')
const execFile = require('child_process').execFile
const config = JSON.parse(
fs.readFileSync(require.resolve('./discord.config.json'), 'utf8')
)
// @ts-ignore
const speech = require('@google-cloud/speech').v1p1beta1
const speechClient = new speech.SpeechClient({
keyFilename: 'google-cloud.credentials.json'
})
// This is our logger.
const pino = require('pino')({
prettyPrint: true,
level: 'trace'
})
// Crash when something unexpected happens.
// Let a process manager (e.g. pm2 or Docker) restart it.
process.on('unhandledRejection', up => {
throw up
})
// Keep track of billed usage.
async function syncRecognizeWithMultiChannelGCS(gcsUri) {
// [START speech_transcribe_multichannel_gcs]
const speech = require('@google-cloud/speech').v1;
// Creates a client
const client = new speech.SpeechClient();
const config = {
encoding: 'LINEAR16',
languageCode: `en-US`,
audioChannelCount: 2,
enableSeparateRecognitionPerChannel: true,
};
const audio = {
uri: gcsUri,
};
const request = {
config: config,
audio: audio,
};