How to use the nltools.tts.TTS function in nltools

To help you get started, we’ve selected a few nltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github gooofy / py-nltools / tests / test_tts.py View on Github external
def test_tts_espeak(self):

        config = misc.load_config('.speechrc')
        
        tts = TTS(config.get('tts', 'host'), int(config.get('tts', 'port')))

        tts.engine = 'espeak'

        first = True
        for v, word, ph in ESPEAK_TESTS:

            tts.locale = v
            tts.voice  = v

            espeak_ph = tts.gen_ipa (word)

            self.assertEqual (espeak_ph, ph)

            wav = tts.synthesize (word)
            logging.debug('wav len: %d bytes.' % len(wav))
            self.assertGreater (len(wav), 100)
github gooofy / py-nltools / tests / test_tts.py View on Github external
def test_tts_mary(self):

        config = misc.load_config('.speechrc')
        
        tts = TTS(config.get('tts', 'host'), int(config.get('tts', 'port')))

        # test mary

        tts.engine = 'mary'

        for l, voice, word, ph in MARY_TESTS:

            tts.locale = l
            tts.voice  = voice

            mary_ph = tts.gen_ipa (word)

            self.assertEqual (mary_ph, ph)

            wav = tts.synthesize (word)
            logging.debug('wav len: %d bytes.' % len(wav))
github gooofy / py-nltools / tests / test_tts.py View on Github external
def test_tts_pico(self):

        config = misc.load_config('.speechrc')
        
        tts = TTS(config.get('tts', 'host'), int(config.get('tts', 'port')))

        tts.engine = 'pico'

        for v, word in PICO_TESTS:

            tts.locale = v
            tts.voice  = v

            wav = tts.synthesize (word)
            logging.debug('wav len: %d bytes.' % len(wav))
            self.assertGreater (len(wav), 100)

            tts.say (word)
github gooofy / zamia-speech / speech_editor.py View on Github external
#
# config
#

config = misc.load_config('.speechrc')

wav16_dir   = config.get("speech", "wav16")
host        = config.get('tts', 'host')
port        = int(config.get('tts', 'port'))

#
# TTS
#

tts = TTS (host, port, locale='de', voice='bits3', engine='espeak')

def paint_main(cur_ts):

    global edit_ts, prompt_tokens, prompt_token_idx

    ts = edit_ts[cur_ts]

    # header

    print
    print u"%5d/%5d %s QLTY: %d" % (cur_ts+1, len(edit_ts), ts['cfn'], ts['quality'])

    # prompts file

    if prompt_token_idx < len(prompt_tokens):
        print
github gooofy / py-nltools / examples / va_eliza.py View on Github external
VOLUME            = 150
ED_THRESHOLD      = 2

class Intent(Enum):
    HELLO     = 1
    LIGHT     = 2
    RADIO     = 3

print ("Initializing...")

radio_on  = False
lights_on = False
asr       = ASR(model_dir = MODELDIR)
rec       = PulseRecorder (volume=VOLUME)
vad       = VAD()
tts       = TTS(engine="espeak", voice="en")
me        = MacroEngine()
eliza     = eliza()

utt_map   = {}
def add_utt (pattern, intent):
    for utterance, t in me.expand_macros('en', pattern):
        utt = ' '.join(utterance)
        utt_map[utt] = intent

add_utt("(hi|hello|ok) computer",             Intent.HELLO)
add_utt("switch (on|off) the (light|lights)", Intent.LIGHT)
add_utt("switch the (light|lights) (on|off)", Intent.LIGHT)
add_utt("switch (on|off) the (music|radio)",  Intent.RADIO)
add_utt("switch the (music|radio) (on|off)",  Intent.RADIO)

rec.start_recording()
github gooofy / zamia-speech / abook-transcribe.py View on Github external
transcript.append(t)

#
# config
#

config = misc.load_config('.speechrc')

vf_login    = config.get("speech", "vf_login")
extrasdir   = config.get("speech", "extrasdir_%s" % lang)

#
# TTS (for audio output)
#

tts = TTS ('local', 0, locale='de', voice='bits3', engine='espeak')

#
# load lexicon
#

logging.info("loading lexicon...")
lex = Lexicon(file_name=options.lang)
logging.info("loading lexicon...done.")

#
# main ui loop
#

next_segment()

while segmentfn:
github gooofy / zamia-ai / examples / voice_assistant_en / voice_assistant.py View on Github external
kernal.setup_nlp_model()
ctx  = kernal.create_context()
logging.debug ('AI kernal initialized.')

#
# ASR
#

asr = ASR(model_dir = options.asr_model)
logging.debug ('ASR initialized.')

#
# TTS
#

tts = TTS(engine="espeak", voice="en")

#
# main loop
#

print(chr(27) + "[2J")
while True:

    #
    # record audio, run VAD
    #

    print "Please speak.",

    rec.start_recording()
github gooofy / zamia-speech / speech_lex_edit.py View on Github external
stdscr.keypad(1)

#
# config
#

config = misc.load_config('.speechrc')

host        = config.get('tts', 'host')
port        = int(config.get('tts', 'port'))

#
# TTS Client
#

tts = TTS (host, port, locale='de', voice='bits3')

#
# main 
#

try:

    lex_gen = {}
    lex_cur_token = 0
    lex_set_token (lex_tokens[lex_cur_token])

    while True:
    
        lex_paint_main()
    
        c = stdscr.getch()
github gooofy / py-nltools / examples / espeakng_tts.py View on Github external
#!/usr/bin/env python3
from nltools.tts import TTS

tts = TTS(engine="espeak", voice="en")
tts.say("hello from your pi")
github gooofy / zamia-speech / speech_gender.py View on Github external
transcripts = Transcripts(corpus_name=corpus)
print "loading transcripts...done."

#
# config
#

wav16_dir   = config.get("speech", "wav16")
host        = config.get('tts', 'host')
port        = int(config.get('tts', 'port'))

#
# TTS
#

tts = TTS (host, port, locale='de', voice='bits3')

#
# count
#

known = set()
for spk in spk2gender:
    known.add(spk)

num_unk = 0
for cfn in transcripts:

    ts = transcripts[cfn]
    spk = ts['spk']

    if spk in known: