How to use the nltools.asr.ASR function in nltools

To help you get started, we’ve selected a few nltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github gooofy / py-nltools / tests / test_asr.py View on Github external
def test_asr_kaldi(self):

        asr = ASR(engine = ASR_ENGINE_NNET3)

        wavf = wave.open(TEST_WAVE_EN, 'rb')

        # check format
        self.assertEqual(wavf.getnchannels(), 1)
        self.assertEqual(wavf.getsampwidth(), 2)

        # process file in 250ms chunks

        chunk_frames = 250 * wavf.getframerate() / 1000
        tot_frames   = wavf.getnframes()

        num_frames = 0
        while num_frames < tot_frames:

            finalize = False
github gooofy / py-nltools / tests / test_asr.py View on Github external
def test_asr_pocketsphinx(self):

        asr = ASR(engine = ASR_ENGINE_POCKETSPHINX, model_dir = POCKETSPHINX_MODELDIR, model_name = POCKETSPHINX_MODELNAME)

        wavf = wave.open(TEST_WAVE_EN, 'rb')

        # check format
        self.assertEqual(wavf.getnchannels(), 1)
        self.assertEqual(wavf.getsampwidth(), 2)

        # process file in 250ms chunks

        chunk_frames = 250 * wavf.getframerate() / 1000
        tot_frames   = wavf.getnframes()

        num_frames = 0
        while num_frames < tot_frames:

            finalize = False
github gooofy / py-nltools / tests / test_asr.py View on Github external
def test_asr_kaldi_wavefile(self):
        asr = ASR(engine = ASR_ENGINE_NNET3)
        s, l = asr.decode_wav_file(TEST_WAVE_EN)
        self.assertEqual(s.strip(), TEST_WAVE_EN_TS)
github gooofy / py-nltools / tests / test_asr.py View on Github external
def test_asr_pocketsphinx_wavefile(self):
        asr = ASR(engine = ASR_ENGINE_POCKETSPHINX, model_dir = POCKETSPHINX_MODELDIR, model_name = POCKETSPHINX_MODELNAME)
        s, l = asr.decode_wav_file(TEST_WAVE_EN)
        self.assertEqual(s.strip(), TEST_WAVE_EN_TS_PS)
github gooofy / py-nltools / examples / wav_decoder.py View on Github external
#!/usr/bin/env python3
from nltools.asr import ASR

MODELDIR = '/opt/kaldi/model/kaldi-generic-en-tdnn_250'
WAVFILE  = 'dw961.wav'

asr = ASR(model_dir = MODELDIR)

s, l = asr.decode_wav_file(WAVFILE)
print ("Decoded %s: %s" % (WAVFILE, s))
github gooofy / py-nltools / examples / va_eliza.py View on Github external
from eliza                 import eliza

MODELDIR          = '/opt/kaldi/model/kaldi-generic-en-tdnn_250'
VOLUME            = 150
ED_THRESHOLD      = 2

class Intent(Enum):
    HELLO     = 1
    LIGHT     = 2
    RADIO     = 3

print ("Initializing...")

radio_on  = False
lights_on = False
asr       = ASR(model_dir = MODELDIR)
rec       = PulseRecorder (volume=VOLUME)
vad       = VAD()
tts       = TTS(engine="espeak", voice="en")
me        = MacroEngine()
eliza     = eliza()

utt_map   = {}
def add_utt (pattern, intent):
    for utterance, t in me.expand_macros('en', pattern):
        utt = ' '.join(utterance)
        utt_map[utt] = intent

add_utt("(hi|hello|ok) computer",             Intent.HELLO)
add_utt("switch (on|off) the (light|lights)", Intent.LIGHT)
add_utt("switch the (light|lights) (on|off)", Intent.LIGHT)
add_utt("switch (on|off) the (music|radio)",  Intent.RADIO)
github gooofy / py-kaldi-asr / examples / chain_live.py View on Github external
rec = PulseRecorder (source_name=source, volume=volume)

#
# VAD
#

vad = VAD(aggressiveness=aggressiveness)

#
# ASR
#

print "Loading model from %s ..." % model_dir

asr = ASR(engine = ASR_ENGINE_NNET3, model_dir = model_dir,
          kaldi_beam = DEFAULT_BEAM, kaldi_acoustic_scale = DEFAULT_ACOUSTIC_SCALE,
          kaldi_frame_subsampling_factor = DEFAULT_FRAME_SUBSAMPLING_FACTOR)


#
# main
#

rec.start_recording()

print "Please speak."

while True:

    samples = rec.get_samples()
github gooofy / py-nltools / examples / va_simple.py View on Github external
from nltools.vad           import VAD
from nltools.tts           import TTS

MODELDIR          = '/opt/kaldi/model/kaldi-generic-en-tdnn_250'
VOLUME            = 150

class Intent(Enum):
    HELLO     = 1
    LIGHT     = 2
    RADIO     = 3

print ("Initializing...")

radio_on  = False
lights_on = False
asr       = ASR(model_dir = MODELDIR)
rec       = PulseRecorder (volume=VOLUME)
vad       = VAD()
tts       = TTS(engine="espeak", voice="en")

utt_map = {}
def add_utt (utterance, intent):
    utt_map[utterance] = intent

add_utt("hello computer",        Intent.HELLO)
add_utt("switch on the lights",  Intent.LIGHT)
add_utt("switch off the lights", Intent.LIGHT)
add_utt("switch on the radio",   Intent.RADIO)
add_utt("switch off the radio",  Intent.RADIO)

rec.start_recording()
print ("Please speak. (CTRL-C to exit)")
github gooofy / zamia-ai / examples / voice_assistant_de / voice_assistant.py View on Github external
#
# setup AI DB, Kernal and Context
#

kernal = AIKernal.from_ini_file()
for skill in kernal.all_skills:
    kernal.consult_skill (skill)
kernal.setup_nlp_model()
ctx  = kernal.create_context()
logging.debug ('AI kernal initialized.')

#
# ASR
#

asr = ASR(model_dir = options.asr_model)
logging.debug ('ASR initialized.')

#
# TTS
#

tts = TTS(engine="espeak", voice=kernal.lang)

#
# main loop
#

print(chr(27) + "[2J")
while True:

    #
github gooofy / py-nltools / examples / live_vad.py View on Github external
#!/usr/bin/env python3
import logging
logging.basicConfig(level=logging.INFO)
from nltools.asr           import ASR
from nltools.pulserecorder import PulseRecorder
from nltools.vad           import VAD

MODELDIR = '/opt/kaldi/model/kaldi-generic-en-tdnn_250'
VOLUME   = 150

print ("Initializing...")

rec = PulseRecorder (volume=VOLUME)
asr = ASR(model_dir = MODELDIR)
vad = VAD()

rec.start_recording()
print ("Please speak. (CTRL-C to exit)")

while True:

    samples = rec.get_samples()

    audio, finalize = vad.process_audio(samples)

    if not audio:
        continue

    user_utt, confidence = asr.decode(audio, finalize)