We are using the API Sessionless and python, we have put the 'continue:True' parameter like this:
def make_completed_audio_request(url, API_name=None, language=None, time=None):
username, password, endpoint, lan=select_voice_api(name=API_name, language=language)
audio = get_complete_audio(url, api_name=API_name, time=time)
endpoint=get_post_endpoint(url=endpoint, api_name=API_name)
if audio:
list_audio=get_speakers_by_audio(audio[1].name)
headers={'content-type': audio[2]}
params = {'model': lan,
'continuous':True,
'timestamps': True}
if language and (API_name == 'watson' or API_name == 'WATSON'):
print 'enviando request'
response = requests.post(url=endpoint, auth=(username, password),
params=params, data=audio[1], headers=headers)
print 'cladificando error'
error_clasifier(code=response.status_code)
else:
response = requests.post(url=endpoint, auth=(username, password),
params=params, data=audio[1], headers=headers)
error_clasifier(code=response.status_code)
if response:
return response, list_audio, True, None
else:
return None, None, False, None
But it still does not work, it cuts the transcription in the first silence it founds
What am I doing wrong? is there another way to send it to the API?
I am using watson_developer_cloud API. Its easy to use and what is more important - it works. Here is the code sample:
import json
from os.path import join, dirname
from watson_developer_cloud import SpeechToTextV1
speech_to_text = SpeechToTextV1(
username="yourusername",
password="yourpassword",
x_watson_learning_opt_out=False)
with open(join(dirname(__file__), 'test.wav'), 'rb') as audio_file:
data = json.dumps(speech_to_text.recognize(audio_file, content_type='audio/wav', word_confidence=True, continuous=True, word_alternatives_threshold=0, max_alternatives=10))