Search code examples
rustportaudiomozilla-deepspeech

Getting blank results from deepspeech with portaudio in Rust


I'm attempting to use portaudio with deepspeech (both using Rust bindings) to create a speech recognition program. I can see the data when I log the buffer, but when attempting to use intermediate_decode, I always get blank results. I'm assuming I'm either configuring the audio incorrectly, or setting up the model incorrectly. I've spent a lot of time just getting to this point (fairly new to handling audio) and any help would be appreciated!

This is the full source code:

use deepspeech::Model;
use portaudio as pa;
use std::path::Path;

fn start_recognition(mut model: Model) {
    let pa = pa::PortAudio::new().expect("Unable to init PortAudio");

    let input_settings = pa.default_input_stream_settings(1, 16000.0, 1024).unwrap();

    let process_audio = move |pa::InputStreamCallbackArgs { buffer, .. }| {
        let mut stream = model
            .create_stream()
            .expect("Failed to create model stream");
        stream.feed_audio(&buffer);
        let text = stream.intermediate_decode();

        match text {
            Ok(t) => {
                if t.chars().count() > 0 {
                    println!("Text: {}", t)
                }
                pa::Continue
            }
            Err(err) => {
                eprintln!("Error: {:?}", err);
                pa::Complete
            }
        }
    };

    let mut stream = pa
        .open_non_blocking_stream(input_settings, process_audio)
        .expect("Unable to create audio stream");
    stream.start().expect("Unable to start audio stream");

    while let true = stream.is_active().unwrap() {}
}

fn get_model() -> Model {
    let dir_path = Path::new("src/models");
    let mut graph_name: Box<Path> = dir_path.join("output_graph.pb").into_boxed_path();

    for file in dir_path
        .read_dir()
        .expect("Specified model dir is not a dir")
    {
        if let Ok(f) = file {
            let file_path = f.path();
            if file_path.is_file() {
                if let Some(ext) = file_path.extension() {
                    if ext == "pb" || ext == "pbmm" {
                        graph_name = file_path.into_boxed_path();
                    }
                }
            }
        }
    }

    Model::load_from_files(&graph_name).unwrap()
}

fn main() {
    let model = get_model();
    start_recognition(model);
}

Solution

  • It turns out the issue was with the process_audio callback. I needed to move the initialization of the model's stream outside of the callback.

    fn start_recognition(mut model: Model) {
        let pa = pa::PortAudio::new().expect("Unable to init PortAudio");
    
        let input_settings = pa.default_input_stream_settings(1, 16000.0, 1024).unwrap();
    
        let mut stream = model
            .create_stream()
            .expect("Failed to create model stream");
    
        let process_audio = move |pa::InputStreamCallbackArgs { buffer, .. }| {
            stream.feed_audio(&buffer);
            let text = stream.intermediate_decode();
    
            match text {
                Ok(t) => {
                    if t.chars().count() > 0 {
                        println!("Text: {}", t)
                    }
                    pa::Continue
                }
                Err(err) => {
                    eprintln!("Error: {:?}", err);
                    pa::Complete
                }
            }
        };
    
        let mut stream = pa
            .open_non_blocking_stream(input_settings, process_audio)
            .expect("Unable to create audio stream");
        stream.start().expect("Unable to start audio stream");
    
        while let true = stream.is_active().unwrap() {}
    }