File size: 5,334 Bytes
e9fecb4
8d9f20a
e9fecb4
3d8c3dd
8d9f20a
ea9ad23
8d9f20a
b17ed29
 
 
 
 
 
 
 
 
 
ea9ad23
 
 
 
 
 
e9fecb4
3d8c3dd
7b17104
 
 
 
 
e9fecb4
 
 
2eaf865
7b17104
2eaf865
e9fecb4
7b17104
e9fecb4
 
 
 
7b17104
 
 
ea9ad23
 
7b17104
 
 
 
 
 
3d8c3dd
 
 
 
 
 
 
 
 
 
 
7b17104
 
 
3d8c3dd
7b17104
 
 
 
2eaf865
ea9ad23
2eaf865
7b17104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d8c3dd
2eaf865
 
7b17104
b17ed29
7b17104
 
 
ea9ad23
3d8c3dd
2eaf865
ea9ad23
 
 
 
2eaf865
 
 
 
ea9ad23
 
 
 
 
 
 
 
 
 
2eaf865
 
 
3d8c3dd
 
7b17104
ea9ad23
 
2eaf865
e9fecb4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
from openai import OpenAI
import gradio as gr
import os
import json

DEFAULT_API_KEY = os.getenv("DEFAULT_API_KEY")  # Replace with your key

default_system_prompt = """\
You are an empathetic Thai woman assistant named แม่หมอแพตตี้. (Thai woman will say 'ค่ะ'). 
You provide insights and support offering clarity and healing. 
You always answer in Thai.
First, you need to know these insight ask each one separately.
- What is the problem that user faced.
- How long that user faced.
If the statement is not clear and concise, you can ask multiple times.
And then, you will open one Tarot cards and explain the future of how to fix the problem."""

def predict(message, history, system_prompt, model_id, api_key, base_url, temperature):
    client = OpenAI(
        base_url=base_url,
        api_key=api_key,
    )

    history_openai_format = [{"role": "system", "content": system_prompt}]
    for human, assistant in history[-3:]:
        if isinstance(human, str) and human.strip():
            history_openai_format.append({"role": "user", "content": human})
        if isinstance(assistant, str) and assistant.strip():
            history_openai_format.append({"role": "assistant", "content": assistant})
    
    history_openai_format.append({"role": "user", "content": message})
  
    response = client.chat.completions.create(
        model=model_id,
        messages=history_openai_format,
        temperature=temperature,
        stream=True
    )

    partial_message = ""
    for chunk in response:
        if chunk.choices[0].delta.content is not None:
            partial_message += chunk.choices[0].delta.content
            yield partial_message

def chat_bot(user_input, history, system_prompt, model_id, api_key, base_url, temperature):
    bot_response_generator = predict(user_input, history, system_prompt, model_id, api_key, base_url, temperature)
    history.append((user_input, ""))

    for bot_response in bot_response_generator:
        history[-1] = (user_input, bot_response)
        yield "", history

def get_log(history, system_prompt):
    history_openai_format = [{"role": "system", "content": system_prompt}]
    for human, assistant in history:
        if isinstance(human, str) and human.strip():
            history_openai_format.append({"role": "user", "content": human})
        if isinstance(assistant, str) and assistant.strip():
            history_openai_format.append({"role": "assistant", "content": assistant})
    
    history_openai_format_json = '[\n' + ",\n".join([json.dumps(h, ensure_ascii=False) for h in history_openai_format]) + '\n]'
    return history_openai_format_json

CSS ="""
.contain { display: flex; flex-direction: column; }
.gradio-container { height: 100vh !important; }
#component-0 { height: 80%; }
#chatbot { flex-grow: 1; overflow: auto;}
"""

with gr.Blocks(css=CSS) as demo:
    gr.HTML("""<h1><center>HoraCare 🫶</center></h1>
            <center> Version 3 </center>
            """)
    
    with gr.Tab("Chat"):
        chatbot = gr.Chatbot(elem_id="chatbot")
        msg = gr.Textbox(placeholder="พิมพ์ข้อความของคุณที่นี่...")

        with gr.Row():
            clear = gr.Button("Clear History")
            send = gr.Button("Send Message", variant="primary")
            
        gr.Examples(
            examples=[
                "เราเศร้าจังเลย อกหักมา ร้องให้ไม่หยุดเลย", 
                "เราเหงาจังเลยไม่มีใครรัก", 
                "หัวหน้าจะใล่เราออกทำยังไงดี"
            ], 
            inputs=msg,
        )

    with gr.Tab("Setting") as setting_tab:
        
        gr.Markdown("### Model Setting")
        system_prompt = gr.Code(
            value=default_system_prompt,
            show_label=True,
            label="System Prompt",
            lines=2
        )        
        
        all_model_id = [
            'llama-3.1-8b-instant',
            'llama-3.2-1b-preview',
            'grok-2-1212',
            'grok-beta',
            'typhoon-v1.5-instruct',
            'typhoon-v1.5-instruct-fc',
            'typhoon-v1.5x-70b-instruct',
            ]

        all_base_url = [
            'https://api.groq.com/openai/v1',
            'https://api.x.ai/v1',
            'https://api.opentyphoon.ai/v1'
        ]
        
        model_id = gr.Dropdown(all_model_id, value=all_model_id[0], allow_custom_value=True, label='base_url')
        base_url = gr.Dropdown(all_base_url, value=all_base_url[0], allow_custom_value=True, label='model_id')
        api_key = gr.Textbox(DEFAULT_API_KEY, type='password', label='API_KEY')
        temperature = gr.Slider(0, 1, value=0.5, label='temperature')
        
        gr.Markdown("### Message Log")
        msg_log = gr.Code(language='json', label='msg_log')
        
    clear.click(lambda: [], [], chatbot)
    msg.submit(chat_bot, [msg, chatbot, system_prompt, model_id, api_key, base_url, temperature], [msg, chatbot])
    send.click(chat_bot, [msg, chatbot, system_prompt, model_id, api_key, base_url, temperature], [msg, chatbot])
    setting_tab.select(get_log, [chatbot, system_prompt,], [msg_log])

demo.launch()