nisten commited on
Commit
0e24f7b
·
verified ·
1 Parent(s): 0b5789f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -41
app.py CHANGED
@@ -1,72 +1,161 @@
1
- import os
2
- os.system('pip install dashscope')
3
- import gradio as gr
4
  from http import HTTPStatus
5
- import dashscope
6
- from dashscope import Generation
7
- from dashscope.api_entities.dashscope_response import Role
8
- from typing import List, Optional, Tuple, Dict
9
  from urllib.error import HTTPError
10
- default_system = 'You are a helpful assistant.'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
13
- dashscope.api_key = YOUR_API_TOKEN
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  History = List[Tuple[str, str]]
16
  Messages = List[Dict[str, str]]
17
 
 
18
  def clear_session() -> History:
19
  return '', []
20
 
21
- def modify_system_session(system: str) -> str:
22
- if system is None or len(system) == 0:
 
23
  system = default_system
24
  return system, system, []
25
 
 
26
  def history_to_messages(history: History, system: str) -> Messages:
27
- messages = [{'role': Role.SYSTEM, 'content': system}]
28
  for h in history:
29
- messages.append({'role': Role.USER, 'content': h[0]})
30
- messages.append({'role': Role.ASSISTANT, 'content': h[1]})
31
  return messages
32
 
33
-
34
  def messages_to_history(messages: Messages) -> Tuple[str, History]:
35
- assert messages[0]['role'] == Role.SYSTEM
36
  system = messages[0]['content']
37
  history = []
38
  for q, r in zip(messages[1::2], messages[2::2]):
39
  history.append([q['content'], r['content']])
40
  return system, history
41
 
 
 
 
 
 
 
 
42
 
43
- def model_chat(query: Optional[str], history: Optional[History], system: str
44
- ) -> Tuple[str, str, History]:
45
  if query is None:
46
  query = ''
47
  if history is None:
48
  history = []
49
  messages = history_to_messages(history, system)
50
- messages.append({'role': Role.USER, 'content': query})
51
- gen = Generation.call(
52
- model = "codeqwen1.5-7b-chat",
53
- messages=messages,
54
- result_format='message',
55
- stream=True
56
- )
57
- for response in gen:
58
- if response.status_code == HTTPStatus.OK:
59
- role = response.output.choices[0].message.role
60
- response = response.output.choices[0].message.content
61
- system, history = messages_to_history(messages + [{'role': role, 'content': response}])
62
- yield '', history, system
63
- else:
64
- raise HTTPError('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
65
- response.request_id, response.status_code,
66
- response.code, response.message
67
- ))
68
 
 
69
 
 
70
  with gr.Blocks() as demo:
71
  gr.Markdown("""<center><font size=8>CodeQwen1.5-7B-Chat Bot👾</center>""")
72
 
@@ -81,9 +170,9 @@ with gr.Blocks() as demo:
81
 
82
  with gr.Row():
83
  clear_history = gr.Button("🧹 Clear History")
84
- sumbit = gr.Button("🚀 Send")
85
 
86
- sumbit.click(model_chat,
87
  inputs=[textbox, chatbot, system_state],
88
  outputs=[textbox, chatbot, system_input])
89
  clear_history.click(fn=clear_session,
@@ -93,5 +182,4 @@ with gr.Blocks() as demo:
93
  inputs=[system_input],
94
  outputs=[system_state, system_input, chatbot])
95
 
96
- demo.queue(api_open=False)
97
- demo.launch(max_threads=30)
 
1
+ # import os
2
+ # !os.system('pip install dashscope')
3
+ # import gradio as gr
4
  from http import HTTPStatus
5
+
6
+ # from dashscope import Generation
7
+ # !from dashscope.api_entities.dashscope_response import Role
8
+ # from typing import List, Optional, Tuple, Dict
9
  from urllib.error import HTTPError
10
+ # default_system = 'You are a helpful assistant.'e
11
+
12
+
13
+
14
+ # History = List[Tuple[str, str]]
15
+ # Messages = List[Dict[str, str]]
16
+
17
+ # def clear_session() -> History:
18
+ # return '', []
19
+
20
+ # def modify_system_session(system: str) -> str:
21
+ # if system is None or len(system) == 0:
22
+ # system = default_system
23
+ # return system, system, []
24
+
25
+ # def history_to_messages(history: History, system: str) -> Messages:
26
+ # messages = [{'role': Role.SYSTEM, 'content': system}]
27
+ # for h in history:
28
+ # messages.append({'role': Role.USER, 'content': h[0]})
29
+ # messages.append({'role': Role.ASSISTANT, 'content': h[1]})
30
+ # return messages
31
+
32
+
33
+ # def messages_to_history(messages: Messages) -> Tuple[str, History]:
34
+ # assert messages[0]['role'] == Role.SYSTEM
35
+ # system = messages[0]['content']
36
+ # history = []
37
+ # for q, r in zip(messages[1::2], messages[2::2]):
38
+ # history.append([q['content'], r['content']])
39
+ # return system, history
40
+
41
 
42
+ # def model_chat(query: Optional[str], history: Optional[History], system: str
43
+ # ) -> Tuple[str, str, History]:
44
+ # if query is None:
45
+ # query = ''
46
+ # if history is None:
47
+ # history = []
48
+ # messages = history_to_messages(history, system)
49
+ # messages.append({'role': Role.USER, 'content': query})
50
+ # gen = Generation.call(
51
+ # model = "codeqwen1.5-7b-chat",
52
+ # messages=messages,
53
+ # result_format='message',
54
+ # stream=True
55
+ # )
56
+ # for response in gen:
57
+ # if response.status_code == HTTPStatus.OK:
58
+ # role = response.output.choices[0].message.role
59
+ # response = response.output.choices[0].message.content
60
+ # system, history = messages_to_history(messages + [{'role': role, 'content': response}])
61
+ # yield '', history, system
62
+ # else:
63
+ # raise HTTPError('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
64
+ # response.request_id, response.status_code,
65
+ # response.code, response.message
66
+ # ))
67
 
68
+
69
+ # with gr.Blocks() as demo:
70
+ # gr.Markdown("""<center><font size=8>CodeQwen1.5-7B-Chat Bot👾</center>""")
71
+
72
+ # with gr.Row():
73
+ # with gr.Column(scale=3):
74
+ # system_input = gr.Textbox(value=default_system, lines=1, label='System')
75
+ # with gr.Column(scale=1):
76
+ # modify_system = gr.Button("🛠️ Set system prompt and clear history", scale=2)
77
+ # system_state = gr.Textbox(value=default_system, visible=False)
78
+ # chatbot = gr.Chatbot(label='codeqwen1.5-7b-chat')
79
+ # textbox = gr.Textbox(lines=2, label='Input')
80
+
81
+ # with gr.Row():
82
+ # clear_history = gr.Button("🧹 Clear History")
83
+ # sumbit = gr.Button("🚀 Send")
84
+
85
+ # sumbit.click(model_chat,
86
+ # inputs=[textbox, chatbot, system_state],
87
+ # outputs=[textbox, chatbot, system_input])
88
+ # clear_history.click(fn=clear_session,
89
+ # inputs=[],
90
+ # outputs=[textbox, chatbot])
91
+ # modify_system.click(fn=modify_system_session,
92
+ # inputs=[system_input],
93
+ # outputs=[system_state, system_input, chatbot])
94
+
95
+ # demo.queue(api_open=False)
96
+ # demo.launch(max_threads=30, share=True)
97
+ import os
98
+ import torch
99
+ import gradio as gr
100
+ from transformers import AutoTokenizer, AutoModelForCausalLM
101
+ from typing import List, Optional, Tuple, Dict
102
+
103
+ # Set up default system prompt and history/message structures
104
+ default_system = 'You are Andrej Karpathy, the most helpful coding assistant and solves everything with working code.'
105
  History = List[Tuple[str, str]]
106
  Messages = List[Dict[str, str]]
107
 
108
+ # Clear the chat session
109
  def clear_session() -> History:
110
  return '', []
111
 
112
+ # Modify the chat system's prompt
113
+ def modify_system_session(system: str) -> Tuple[str, str, History]:
114
+ if system is None or system == "":
115
  system = default_system
116
  return system, system, []
117
 
118
+ # Convert the history of messages into a format suitable for model input
119
  def history_to_messages(history: History, system: str) -> Messages:
120
+ messages = [{'role': 'system', 'content': system}]
121
  for h in history:
122
+ messages.append({'role': 'user', 'content': h[0]})
123
+ messages.append({'role': 'assistant', 'content': h[1]})
124
  return messages
125
 
126
+ # Convert messages back into a history format after model processing
127
  def messages_to_history(messages: Messages) -> Tuple[str, History]:
128
+ assert messages[0]['role'] == 'system'
129
  system = messages[0]['content']
130
  history = []
131
  for q, r in zip(messages[1::2], messages[2::2]):
132
  history.append([q['content'], r['content']])
133
  return system, history
134
 
135
+ # Handle the chat interaction, including processing the input and generating a response
136
+ def model_chat(query: Optional[str], history: Optional[History], system: str) -> Tuple[str, History, str]:
137
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
138
+ model_name = "codeqwen1.5-7b-chat"
139
+
140
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
141
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
142
 
 
 
143
  if query is None:
144
  query = ''
145
  if history is None:
146
  history = []
147
  messages = history_to_messages(history, system)
148
+ messages.append({'role': 'user', 'content': query})
149
+
150
+ inputs = tokenizer(messages[-1]['content'], return_tensors="pt").to(device)
151
+ outputs = model.generate(inputs, max_length=100, pad_token_id=tokenizer.eos_token_id)
152
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
153
+
154
+ system, history = messages_to_history(messages + [{'role': 'assistant', 'content': response}])
 
 
 
 
 
 
 
 
 
 
 
155
 
156
+ return '', history, system
157
 
158
+ # Setting up the Gradio interface
159
  with gr.Blocks() as demo:
160
  gr.Markdown("""<center><font size=8>CodeQwen1.5-7B-Chat Bot👾</center>""")
161
 
 
170
 
171
  with gr.Row():
172
  clear_history = gr.Button("🧹 Clear History")
173
+ submit = gr.Button("🚀 Send")
174
 
175
+ submit.click(model_chat,
176
  inputs=[textbox, chatbot, system_state],
177
  outputs=[textbox, chatbot, system_input])
178
  clear_history.click(fn=clear_session,
 
182
  inputs=[system_input],
183
  outputs=[system_state, system_input, chatbot])
184
 
185
+ demo.launch(share=True, enable_queue=True)