import speech_recognition as sr
import gradio as gr
import time
import requests
import json
import time
from gtts import gTTS
import pandas as pd
import gradio as gr
# from transformers import pipeline
# p = pipeline("automatic-speech-recognition")
r = sr.Recognizer()
# def transcribe_old(audio):
# text = p(audio)["text"]
# return text
columns = ['FAQ', 'Answer']
NO_MATCH_RESPONSE = "Couldn't find relevant FAQ with enough confidence. Proceed to Agent?"
def get_faq_response_pd(text):
# print("came here...")
url = "https://nowrun.jina.ai/api/v1/text-to-text/search"
payload = json.dumps({
"host": "grpcs://nowapi-efb1aca434.wolf.jina.ai", # "grpcs://nowapi-3d81593c9f.wolf.jina.ai",
"port": None,
"jwt": {},
"limit": 10,
"uri": None,
"text": text
})
headers = {
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data=payload)
j = json.loads(response.text)
# return f"Q. {j[0]['text']} \n2. {j[1]['text']}"
if j[0]['scores']['cosine']['value']<0.65:
FAQ_list = [j[i]['text'].split('?')[0]+'?' for i in range(2)]
answers_list = [j[i]['text'].split('?')[1] for i in range(2)]
return pd.DataFrame({columns[0]: FAQ_list,
columns[1]: answers_list
})
else:
return pd.DataFrame({columns[0]: [NO_MATCH_RESPONSE], columns[1]:[""]}, columns=columns)
except Exception as e:
print(f"Exception occured: {e}")
return pd.DataFrame(columns=columns)
# def get_faq_response(text):
# # print("came here...")
# url = "https://nowrun.jina.ai/api/v1/text-to-text/search"
# payload = json.dumps({
# "host": "grpcs://nowapi-efb1aca434.wolf.jina.ai", # "grpcs://nowapi-3d81593c9f.wolf.jina.ai",
# "port": None,
# "jwt": {},
# "limit": 10,
# "uri": None,
# "text": text
# })
# headers = {
# 'Content-Type': 'application/json'
# }
# try:
# response = requests.request("POST", url, headers=headers, data=payload)
# j = json.loads(response.text)
# return f"Q. {j[0]['text']} \n2. {j[1]['text']}"
# except Exception as e:
# print(f"Exception occured: {e}")
# return ""
def transcribe(audio):
with sr.AudioFile(audio) as source:
audio_ = r.listen(source)
text = r.recognize_google(audio_)#, language = 'en-IN')# , show_all=True)
return text
def tts(text):
if len(text)>0:
mytext = text
else:
mytext = NO_MATCH_RESPONSE
language = 'en-IN'
myobj = gTTS(text=mytext, lang=language, slow=False)
filename = "abc.mp3"
myobj.save(filename)
# playsound.playsound(filename)
# os.remove(filename)
return filename
with gr.Blocks() as myapp:
gr.Markdown("""
# FAQs Automator
## _Ask me anything_
FAQs Automator lets you automate your customer queries via chatbot interface or telemetry.
Current FAQ website used:
[Qatar Airways - Privilege club]
[//]: #
[Qatar Airways - Privilege club]:
""")
inputs=gr.Audio(source="microphone", type="filepath")
ask_btn = gr.Button("Ask FAQ")
you_asked = gr.Textbox(label="You asked")
## best_response = gr.Textbox(label="best response")
with gr.Row():
# FAQs = gr.Textbox(label="Found FAQs")
# answers = gr.Textbox(label="Found Answers")
response_df_list = [gr.Dataframe(row_count = (2, "dynamic"), col_count=(2,"dynamic"), label="Matching FAQs", headers=columns, wrap=True, interactive=False)]
audio_bar = gr.Audio(label='Audio output')
ask_btn.click(fn=transcribe, inputs=inputs, outputs=you_asked)
you_asked.change(fn=get_faq_response_pd, inputs=you_asked, outputs=response_df_list)
dummy_box = gr.Text(visible=False)
def get_first_response(df):
try:
# print("came to get_first", df)
# print(f"tdf.iloc[0,1] = {df.iloc[0,1]}")
# print(f"df[0] = {str(df[0])}")
# print(f"df[1] = {str(df[1])}")
return str(df.iloc[0,1])
except:
pass
return ""
response_df_list[0].change(fn=get_first_response, inputs=response_df_list, outputs=dummy_box)
dummy_box.change(fn=tts, inputs=dummy_box, outputs=audio_bar)
# dummy_box.change(fn=tts, inputs=dummy_box, outputs=audio_bar)
# output.change(lambda x: "\n".join([k.split('?')[0]+"?\n\n" for k in get_faq_response(x).split('\n')]), output, FAQs)
# output.change(lambda x: "\n".join([k.split('?')[1] for k in get_faq_response(x).split('\n')]), output, answers)
#
if __name__ == '__main__':
# main()
# test()
# gr.Series(myapp, audioapp).launch()
myapp.launch()