grady / app.py
rohan13's picture
Reducing bot window size
113bfd8
import gradio as gr
from main import index, run
from gtts import gTTS
import os, time
from transformers import pipeline
p = pipeline("automatic-speech-recognition")
"""Use text to call chat method from main.py"""
models = ["GPT-3.5", "Flan UL2", "GPT-4", "Flan T5"]
def add_text(history, text, model):
print("Question asked: " + text)
response = run_model(text, model)
history = history + [(text, response)]
print(history)
return history, ""
def run_model(text, model):
start_time = time.time()
print("start time:" + str(start_time))
response = run(text, model)
end_time = time.time()
# If response contains string `SOURCES:`, then add a \n before `SOURCES`
if "SOURCES:" in response:
response = response.replace("SOURCES:", "\nSOURCES:")
# response = response + "\n\n" + "Time taken: " + str(end_time - start_time)
print(response)
print("Time taken: " + str(end_time - start_time))
return response
def get_output(history, audio, model):
txt = p(audio)["text"]
# history.append(( (audio, ) , txt))
audio_path = 'response.wav'
response = run_model(txt, model)
# Remove all text from SOURCES: to the end of the string
trimmed_response = response.split("SOURCES:")[0]
myobj = gTTS(text=trimmed_response, lang='en', slow=False)
myobj.save(audio_path)
# split audio by / and keep the last element
# audio = audio.split("/")[-1]
# audio = audio + ".wav"
history.append(( (audio, ) , (audio_path, )))
print(history)
return history
def set_model(history, model):
print("Model selected: " + model)
history = get_first_message(history)
index(model)
return history
def get_first_message(history):
history = [(None,
'''Hi!! I AM GRADY!! I am a grading assistant to help you grade assignments based on a rubric!! <br>
Today, I will be grading <a href="https://hbsp.harvard.edu/product/908D01-PDF-ENG"> Paediatric Orthopaedic Quiz. </a> <br>
Use the format as given in the example below to get an accurate grade. <br>
WARNING! I might get things wrong, so double check before your final grading. All the best. ''')]
return history
def bot(history):
return history
with gr.Blocks() as demo:
gr.HTML("<h1 style='text-align: center;color: darkblue'>Grady - Your helpful Grading Assistant</h1>")
chatbot = gr.Chatbot(get_first_message([]), elem_id="chatbot", interactive=True).style(height=500)
with gr.Row():
# Create radio button to select model
radio = gr.Radio(models, label="Choose a model", value="GPT-3.5", type="value", visible=False)
with gr.Row():
with gr.Column(scale=0.75):
txt = gr.Textbox(
label="Student Response",
placeholder="Enter text and press enter", lines=1, interactive=True
).style(container=False)
with gr.Column(scale=0.25):
audio = gr.Audio(source="microphone", type="filepath").style(container=False)
with gr.Row():
gr.Examples(examples=["""11: Currently the process is not very efficient as each patient goes through the same steps at the front desk and the radiology department although the sub-activities and processes are different. Also, the staff is doing multiple activities based on patient requirements. 
One solution is to have a streamlined and differentiated process for each sub-type with dedicated staff. For example, at the front desk, all new patient cases can be handled by one nurse while all follow-up cases by a second nurse.
Similarly, in the radiology department, all upper extremity cases can be handled by 2 technicians while lower extremity cases by the other 2 technicians with dedicated X-ray machines. The 3rd nurse will be responsible for handling the hand-off of X-rays and inserting them into the patient's files. 
By having staff do a single type of task on a particular day, and by having the patients go through differentiated workflows, it should be possible to improve overall efficiency. """], inputs=[txt], label="Answers")
txt.submit(add_text, [chatbot, txt, radio], [chatbot, txt], postprocess=False).then(
bot, chatbot, chatbot
)
audio.change(fn=get_output, inputs=[chatbot, audio, radio], outputs=[chatbot]).then(
bot, chatbot, chatbot
)
radio.change(fn=set_model, inputs=[chatbot, radio], outputs=[chatbot]).then(bot, chatbot, chatbot)
audio.change(lambda:None, None, audio)
set_model(chatbot, radio.value)
if __name__ == "__main__":
demo.queue()
demo.queue(concurrency_count=5)
demo.launch(debug=True)