Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename).on('data', function(arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function() {
pushStream.close();
});
// we are done with the setup
console.log("Now recognizing from: " + filename);
// now create the audio-config pointing to our stream and
// the speech config specifying the language.
var audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
var speechConfig = sdk.SpeechConfig.fromSubscription(subscriptionKey, serviceRegion);
// setting the recognition language to English.
speechConfig.speechRecognitionLanguage = "en-US";
// create the speech recognizer.
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
// start the recognizer and wait for a result.
recognizer.recognizeOnceAsync(
function (result) {
console.log(result);
recognizer.close();
recognizer = undefined;
},
function (err) {
main: function(settings, filename) {
// now create the audio-config pointing to the output file.
// You can also use audio output stream to initialize the audio config, see the docs for details.
var audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
var speechConfig = sdk.SpeechConfig.fromSubscription(settings.subscriptionKey, settings.serviceRegion);
// setting the synthesis language, voice name, and output audio format.
// see https://aka.ms/speech/tts-languages for available languages and voices
speechConfig.speechSynthesisLanguage = settings.language;
speechConfig.speechSynthesisVoiceName = "en-US-AriaRUS";
speechConfig.speechSynthesisOutputFormat = sdk.SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3;
var rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
// create the speech synthesizer.
var synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
// Before beginning speech synthesis, setup the callbacks to be invoked when an event occurs.
case "ready":
assistantWorker.postMessage({
action: "sendKeys",
keys: {
AccuWeatherkey,
SpeechSDKkey
}
});
break;
case "reply":
root.Assistant.add(e.data.reply, true);
}
};
try {
audioConfig = sdk.AudioConfig.fromDefaultMicrophoneInput();
speechConfig = sdk.SpeechConfig.fromSubscription(SpeechSDKkey, serviceRegion);
speechConfig.speechRecognitionLanguage = "en-US";
} catch (e) {
root.Assistant.add("Uh oh!<br><br>There is no <b>Azure Cognitive Services key</b> present in system. Add it in Settings and try again.", true);
root.Search.Input.placeholder = "";
root.AssistantButton.classList.remove("text-danger");
}
}
if (root.AssistantButton.classList.contains("text-danger")) {
if (recognizer) recognizer.dispose();
root.AssistantButton.classList.remove("text-danger");
if (root.Assistant.childNodes.length > 1) {
root.Search.Input.placeholder = "Press on the microphone and start speaking".toLocaleString();
} else showSection(root.AppsSection);
}