File size: 4,698 Bytes
9748641
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17920dd
 
 
 
9748641
 
 
 
 
 
 
17920dd
113bfd8
9748641
 
 
 
 
 
 
17920dd
9748641
 
 
 
 
f46cbce
 
 
 
 
 
 
 
9748641
 
 
 
 
 
 
 
 
 
 
f46cbce
9748641
 
 
f46cbce
 
9748641
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import gradio as gr
from main import index, run
from gtts import gTTS
import os, time

from transformers import pipeline

p = pipeline("automatic-speech-recognition")

"""Use text to call chat method from main.py"""

models = ["GPT-3.5", "Flan UL2", "GPT-4", "Flan T5"]

def add_text(history, text, model):
    print("Question asked: " + text)
    response = run_model(text, model)
    history = history + [(text, response)]
    print(history)
    return history, ""


def run_model(text, model):
    start_time = time.time()
    print("start time:" + str(start_time))
    response = run(text, model)
    end_time = time.time()
    # If response contains string `SOURCES:`, then add a \n before `SOURCES`
    if "SOURCES:" in response:
        response = response.replace("SOURCES:", "\nSOURCES:")
        # response = response + "\n\n" + "Time taken: " + str(end_time - start_time)
    print(response)
    print("Time taken: " + str(end_time - start_time))
    return response



def get_output(history, audio, model):

    txt = p(audio)["text"]
    # history.append(( (audio, ) , txt))
    audio_path = 'response.wav'
    response = run_model(txt, model)
    # Remove all text from SOURCES: to the end of the string
    trimmed_response = response.split("SOURCES:")[0]
    myobj = gTTS(text=trimmed_response, lang='en', slow=False)
    myobj.save(audio_path)
    # split audio by / and keep the last element
    # audio = audio.split("/")[-1]
    # audio = audio + ".wav"
    history.append(( (audio, ) , (audio_path, )))
    print(history)
    return history

def set_model(history, model):
    print("Model selected: " + model)
    history = get_first_message(history)
    index(model)
    return history


def get_first_message(history):
    history = [(None,
                '''Hi!! I AM GRADY!! I am a grading assistant to help you grade assignments based on a rubric!! <br>
                Today, I will be grading <a href="https://hbsp.harvard.edu/product/908D01-PDF-ENG"> Paediatric Orthopaedic Quiz. </a> <br>
                Use the format as given in the example below to get an accurate grade. <br> 
                WARNING! I might get things wrong, so double check before your final grading. All the best. ''')]
    return history


def bot(history):
    return history

with gr.Blocks() as demo:
    gr.HTML("<h1 style='text-align: center;color: darkblue'>Grady - Your helpful Grading Assistant</h1>")
    chatbot = gr.Chatbot(get_first_message([]), elem_id="chatbot", interactive=True).style(height=500)

    with gr.Row():
        # Create radio button to select model
        radio = gr.Radio(models, label="Choose a model", value="GPT-3.5", type="value", visible=False)
    with gr.Row():
        with gr.Column(scale=0.75):
            txt = gr.Textbox(
                label="Student Response",
                placeholder="Enter text and press enter", lines=1, interactive=True
            ).style(container=False)

        with gr.Column(scale=0.25):
            audio = gr.Audio(source="microphone", type="filepath").style(container=False)
    with gr.Row():
        gr.Examples(examples=["""11: Currently the process is not very efficient as each patient goes through the same steps at the front desk and the radiology department although the sub-activities and processes are different. Also, the staff is doing multiple activities based on patient requirements.  

One solution is to have a streamlined and differentiated process for each sub-type with dedicated staff. For example, at the front desk, all new patient cases can be handled by one nurse while all follow-up cases by a second nurse. 

Similarly, in the radiology department, all upper extremity cases can be handled by 2 technicians while lower extremity cases by the other 2 technicians with dedicated X-ray machines. The 3rd nurse will be responsible for handling the hand-off of X-rays and inserting them into the patient's files.  

By having staff do a single type of task on a particular day, and by having the patients go through differentiated workflows, it should be possible to improve overall efficiency. """], inputs=[txt], label="Answers")

    txt.submit(add_text, [chatbot, txt, radio], [chatbot, txt], postprocess=False).then(
        bot, chatbot, chatbot
    )

    audio.change(fn=get_output, inputs=[chatbot, audio, radio], outputs=[chatbot]).then(
        bot, chatbot, chatbot
    )

    radio.change(fn=set_model, inputs=[chatbot, radio], outputs=[chatbot]).then(bot, chatbot, chatbot)

    audio.change(lambda:None, None, audio)

    set_model(chatbot, radio.value)



if __name__ == "__main__":
    demo.queue()
    demo.queue(concurrency_count=5)
    demo.launch(debug=True)