How to use the nltools.misc.message_popup function in nltools

To help you get started, we’ve selected a few nltools examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github gooofy / zamia-ai / ai_dbg.py View on Github external
def do_rec():

    global rec, stdscr, loc, hstr, prompt, recording, asr, options, mqtt_finalize, mqtt_cond, mqtt_audio, mqtt_listen

    logging.debug ('do_rec...')

    time.sleep(0.1)

    recording = []
    swin = misc.message_popup(stdscr, 'Recording...', 'Please speak now.')

    if options.mqtt:

        mqtt_cond.acquire()
        try:
            mqtt_finalize = False
            mqtt_listen   = True
            while not mqtt_finalize:
                mqtt_cond.wait()

                logging.debug ('do_rec... got audio from mqtt')
                recording.extend(mqtt_audio)

                hstr, confidence = asr.decode(SAMPLE_RATE, mqtt_audio, mqtt_finalize, stream_id=MQTT_LOCATION)

                logging.debug ('do_rec: hstr=%s' % repr(hstr))
github gooofy / zamia-ai / ai_trainer.py View on Github external
def do_rec():

    global rec, stdscr, loc, hstr, prompt, recording, asr

    logging.debug ('do_rec...')

    time.sleep(0.1)

    rec.start_recording(FRAMES_PER_BUFFER)

    finalize  = False
    recording = []

    swin = misc.message_popup(stdscr, 'Recording...', 'Please speak now.')

    while not finalize:

        samples = rec.get_samples()

        audio, finalize = vad.process_audio(samples)
        if not audio:
            continue

        recording.extend(audio)

        hstr, confidence = asr.decode(SAMPLE_RATE, audio, do_finalize, stream_id=loc)

    rec.stop_recording()

    prompt = hstr
github gooofy / zamia-ai / ai_dbg.py View on Github external
def do_help():

    global stdscr

    misc.message_popup(stdscr, 'Help', """
    simple question - response:

    "what are you called?",
    "I am called HAL 9000".

    context, patterns, variables::

    context(topic, wdeProgrammingLanguage),
    "what are you called (by the way|again|)?",
    or ( "I am called {self:rdfsLabel|en, s}",
         "My name is {self:rdfsLabel|en, s}").
    """)

    c = stdscr.getch()
github gooofy / zamia-ai / ai_dbg.py View on Github external
def do_align_module():

    global stdscr, match_module, kernal, prompt, lang

    misc.message_popup(stdscr, 'Align...', 'Matching prompt against existing utterances...')
    matches = kernal.align_utterances(lang=lang, utterances=[prompt])

    msg = u''

    for i, res in enumerate(matches[prompt]):
        sim, loc, utt = res
        msg += u'%d %s\n   %s\n\n' % (i, loc, utt)
        if i==4:
            break

    msg += 'Please select 0-%d >' % i

    stdscr.refresh()
    misc.message_popup(stdscr, 'Alignment Results', msg)

    while True:
github gooofy / zamia-ai / ai_trainer.py View on Github external
logging.debug ('PulseRecorder initialized.')

    #
    # VAD
    #

    misc.message_popup(stdscr, 'Initializing...', 'Init VAD...')
    vad = VAD(aggressiveness=aggressiveness, sample_rate=SAMPLE_RATE)
    paint_main()
    logging.debug ('VAD initialized.')

    #
    # setup AI Kernal
    #

    misc.message_popup(stdscr, 'Initializing...', 'Init AI Kernal...')
    kernal = AIKernal(load_all_modules=True)
    # kernal.setup_tf_model (mode='decode', load_model=True, ini_fn=ai_model)
    # kernal.setup_align_utterances(lang=lang)
    paint_main()
    logging.debug ('AI kernal initialized.')

    #
    # context
    #

    cur_context = kernal.find_prev_context(USER_URI)

    #
    # ASR
    #
github gooofy / zamia-ai / ai_dbg.py View on Github external
samples = rec.get_samples()

            audio, finalize = vad.process_audio(samples)
            if not audio:
                continue

            recording.extend(audio)

            hstr, confidence = asr.decode(SAMPLE_RATE, audio, finalize, stream_id=loc)

        rec.stop_recording()

    prompt = hstr

    stdscr.refresh()
    swin = misc.message_popup(stdscr, 'Processing input...', prompt)

    do_process_input()
github gooofy / zamia-ai / ai_dbg.py View on Github external
def do_apply_solution (sidx):

    global stdscr, responses, kernal, cur_context, next_context

    if sidx >= len(responses):
        misc.message_popup(stdscr, 'Error', 'Solution #%d does not exist.' % sidx)
        stdscr.getch()
        return

    # apply DB overlay, if any
    ovl = responses[sidx][4].get(ASSERT_OVERLAY_VAR_NAME)
    if ovl:
        ovl.do_apply(AI_MODULE, kernal.db, commit=True)

    responses = []
    cur_context = next_context
github gooofy / zamia-ai / ai_dbg.py View on Github external
packed_audio = struct.pack('%sh' % len(recording), *recording)
    wf.writeframes(packed_audio)
    wf.close()  

    # append etc/prompts-original file

    etcdirfn = '%s/%s-%s-rec/etc' % (rec_dir, vf_login, ds)
    logging.debug('etcdirfn: %s' % etcdirfn)
    misc.mkdirs(etcdirfn)

    promptsfn = '%s/prompts-original' % etcdirfn
    with codecs.open(promptsfn, 'a') as promptsf:
        promptsf.write('de5-%03d %s\n' % (cnt, prompt))

    misc.message_popup(stdscr, 'WAVE file written', audiofn)

    stdscr.getch()
github gooofy / zamia-ai / ai_dbg.py View on Github external
misc.message_popup(stdscr, 'Align...', 'Matching prompt against existing utterances...')
    matches = kernal.align_utterances(lang=lang, utterances=[prompt])

    msg = u''

    for i, res in enumerate(matches[prompt]):
        sim, loc, utt = res
        msg += u'%d %s\n   %s\n\n' % (i, loc, utt)
        if i==4:
            break

    msg += 'Please select 0-%d >' % i

    stdscr.refresh()
    misc.message_popup(stdscr, 'Alignment Results', msg)

    while True:
        c = stdscr.getch()
        if c == ord('0'):
            match_location = matches[prompt][0][1]
            break    
        if c == ord('1'):
            match_location = matches[prompt][1][1]
            break    
        if c == ord('2'):
            match_location = matches[prompt][2][1]
            break    
        if c == ord('3'):
            match_location = matches[prompt][3][1]
            break    
        if c == ord('4'):