The best I could find is this demo, but it is not very clear for me.
Could you share a very simple implementation?
<div id="app">
<v-app id="inspire">
<v-container fluid>
<v-layout row wrap justify-center class="mt-4">
<v-flex xs12 sm10 text-xs-center>
<v-text-field
label="The text"
v-model="text"
textarea
></v-text-field>
</v-flex>
<v-flex xs12 sm8 md4 text-xs-center>
<speech-to-text :text.sync="text" @speechend="speechEnd"></speech-to-text>
</v-flex>
<v-flex xs12 text-xs-center class="mt-4">
{{sentences}}
</v-flex>
</v-layout>
</v-container>
</v-app>
</div>
I've found a simpler implementation, this is it:
<template>
<div class="voice">
<div class="speech-to-txt" @click="startSpeechToTxt">Speech to txt</div>
<p>{{transcription_}}</p>
</div>
</template>
<script>
export default {
name: 'speech_to_text',
data() {
return {
runtimeTranscription_: "",
transcription_: [],
lang_: "es-ES"
};
},
methods: {
startSpeechToTxt() {
// initialisation of voicereco
window.SpeechRecognition =
window.SpeechRecognition ||
window.webkitSpeechRecognition;
const recognition = new window.SpeechRecognition();
recognition.lang = this.lang_;
recognition.interimResults = true;
// event current voice reco word
recognition.addEventListener("result", event => {
var text = Array.from(event.results)
.map(result => result[0])
.map(result => result.transcript)
.join("");
this.runtimeTranscription_ = text;
});
// end of transcription
recognition.addEventListener("end", () => {
this.transcription_.push(this.runtimeTranscription_);
this.runtimeTranscription_ = "";
recognition.stop();
});
recognition.start();
},
}
}
</script>