binker commited on
Commit
855fc19
·
1 Parent(s): a5bbef7

Upload 6 files

Browse files
Files changed (6) hide show
  1. bot_backend.py +232 -0
  2. config.json +16 -0
  3. functional.py +116 -0
  4. jupyter_backend.py +100 -0
  5. response_parser.py +200 -0
  6. web_ui.py +185 -0
bot_backend.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import openai
3
+ import os
4
+ import copy
5
+ import shutil
6
+ from jupyter_backend import *
7
+ from typing import *
8
+
9
+ functions = [
10
+ {
11
+ "name": "execute_code",
12
+ "description": "This function allows you to execute Python code and retrieve the terminal output. If the code "
13
+ "generates image output, the function will return the text '[image]'. The code is sent to a "
14
+ "Jupyter kernel for execution. The kernel will remain active after execution, retaining all "
15
+ "variables in memory.",
16
+ "parameters": {
17
+ "type": "object",
18
+ "properties": {
19
+ "code": {
20
+ "type": "string",
21
+ "description": "The code text"
22
+ }
23
+ },
24
+ "required": ["code"],
25
+ }
26
+ }
27
+ ]
28
+
29
+ system_msg = '''You are an AI code interpreter.
30
+ Your goal is to help users do a variety of jobs by executing Python code.
31
+
32
+ You should:
33
+ 1. Comprehend the user's requirements carefully & to the letter.
34
+ 2. Give a brief description for what you plan to do & call the provided function to run code.
35
+ 3. Provide results analysis based on the execution output.
36
+ 4. If error occurred, try to fix it.
37
+
38
+ Note: If the user uploads a file, you will receive a system message "User uploaded a file: filename". Use the filename as the path in the code. '''
39
+
40
+ with open('config.json') as f:
41
+ config = json.load(f)
42
+
43
+ if not config['API_KEY']:
44
+ config['API_KEY'] = os.getenv('OPENAI_API_KEY')
45
+ os.unsetenv('OPENAI_API_KEY')
46
+
47
+
48
+ def get_config():
49
+ return config
50
+
51
+
52
+ def config_openai_api(api_type, api_base, api_version, api_key):
53
+ openai.api_type = api_type
54
+ openai.api_base = api_base
55
+ openai.api_version = api_version
56
+ openai.api_key = api_key
57
+
58
+
59
+ class GPTResponseLog:
60
+ def __init__(self):
61
+ self.assistant_role_name = ''
62
+ self.content = ''
63
+ self.function_name = None
64
+ self.function_args_str = ''
65
+ self.display_code_block = ''
66
+ self.finish_reason = 'stop'
67
+ self.bot_history = None
68
+
69
+ def reset_gpt_response_log_values(self, exclude=None):
70
+ if exclude is None:
71
+ exclude = []
72
+
73
+ attributes = {'assistant_role_name': '',
74
+ 'content': '',
75
+ 'function_name': None,
76
+ 'function_args_str': '',
77
+ 'display_code_block': '',
78
+ 'finish_reason': 'stop',
79
+ 'bot_history': None}
80
+
81
+ for attr_name in exclude:
82
+ del attributes[attr_name]
83
+ for attr_name, value in attributes.items():
84
+ setattr(self, attr_name, value)
85
+
86
+ def set_assistant_role_name(self, assistant_role_name: str):
87
+ self.assistant_role_name = assistant_role_name
88
+
89
+ def add_content(self, content: str):
90
+ self.content += content
91
+
92
+ def set_function_name(self, function_name: str):
93
+ self.function_name = function_name
94
+
95
+ def copy_current_bot_history(self, bot_history: List):
96
+ self.bot_history = copy.deepcopy(bot_history)
97
+
98
+ def add_function_args_str(self, function_args_str: str):
99
+ self.function_args_str += function_args_str
100
+
101
+ def update_display_code_block(self, display_code_block):
102
+ self.display_code_block = display_code_block
103
+
104
+ def update_finish_reason(self, finish_reason: str):
105
+ self.finish_reason = finish_reason
106
+
107
+
108
+ class BotBackend(GPTResponseLog):
109
+ def __init__(self):
110
+ super().__init__()
111
+ self.unique_id = hash(id(self))
112
+ self.jupyter_work_dir = f'cache/work_dir_{self.unique_id}'
113
+ self.jupyter_kernel = JupyterKernel(work_dir=self.jupyter_work_dir)
114
+ self.gpt_model_choice = "GPT-3.5"
115
+ self.revocable_files = []
116
+ self._init_conversation()
117
+ self._init_api_config()
118
+ self._init_kwargs_for_chat_completion()
119
+
120
+ def _init_conversation(self):
121
+ first_system_msg = {'role': 'system', 'content': system_msg}
122
+ if hasattr(self, 'conversation'):
123
+ self.conversation.clear()
124
+ self.conversation.append(first_system_msg)
125
+ else:
126
+ self.conversation: List[Dict] = [first_system_msg]
127
+
128
+ def _init_api_config(self):
129
+ self.config = get_config()
130
+ api_type = self.config['API_TYPE']
131
+ api_base = self.config['API_base']
132
+ api_version = self.config['API_VERSION']
133
+ api_key = config['API_KEY']
134
+ config_openai_api(api_type, api_base, api_version, api_key)
135
+
136
+ def _init_kwargs_for_chat_completion(self):
137
+ self.kwargs_for_chat_completion = {
138
+ 'stream': True,
139
+ 'messages': self.conversation,
140
+ 'functions': functions,
141
+ 'function_call': 'auto'
142
+ }
143
+
144
+ model_name = self.config['model'][self.gpt_model_choice]['model_name']
145
+
146
+ if self.config['API_TYPE'] == 'azure':
147
+ self.kwargs_for_chat_completion['engine'] = model_name
148
+ else:
149
+ self.kwargs_for_chat_completion['model'] = model_name
150
+
151
+ def _clear_all_files_in_work_dir(self):
152
+ for filename in os.listdir(self.jupyter_work_dir):
153
+ os.remove(
154
+ os.path.join(self.jupyter_work_dir, filename)
155
+ )
156
+
157
+ def add_gpt_response_content_message(self):
158
+ self.conversation.append(
159
+ {'role': self.assistant_role_name, 'content': self.content}
160
+ )
161
+
162
+ def add_text_message(self, user_text):
163
+ self.conversation.append(
164
+ {'role': 'user', 'content': user_text}
165
+ )
166
+ self.revocable_files.clear()
167
+ self.update_finish_reason(finish_reason='new_input')
168
+
169
+ def add_file_message(self, path, bot_msg):
170
+ filename = os.path.basename(path)
171
+ work_dir = self.jupyter_work_dir
172
+
173
+ shutil.copy(path, work_dir)
174
+
175
+ gpt_msg = {'role': 'system', 'content': f'User uploaded a file: {filename}'}
176
+ self.conversation.append(gpt_msg)
177
+ self.revocable_files.append(
178
+ {
179
+ 'bot_msg': bot_msg,
180
+ 'gpt_msg': gpt_msg,
181
+ 'path': os.path.join(work_dir, filename)
182
+ }
183
+ )
184
+
185
+ def add_function_call_response_message(self, function_response: str, save_tokens=True):
186
+ self.conversation.append(
187
+ {
188
+ "role": self.assistant_role_name,
189
+ "name": self.function_name,
190
+ "content": self.function_args_str
191
+ }
192
+ )
193
+
194
+ if save_tokens and len(function_response) > 500:
195
+ function_response = f'{function_response[:200]}\n[Output too much, the middle part output is omitted]\n ' \
196
+ f'End part of output:\n{function_response[-200:]}'
197
+ self.conversation.append(
198
+ {
199
+ "role": "function",
200
+ "name": self.function_name,
201
+ "content": function_response,
202
+ }
203
+ )
204
+
205
+ def revoke_file(self):
206
+ if self.revocable_files:
207
+ file = self.revocable_files[-1]
208
+ bot_msg = file['bot_msg']
209
+ gpt_msg = file['gpt_msg']
210
+ path = file['path']
211
+
212
+ assert self.conversation[-1] is gpt_msg
213
+ del self.conversation[-1]
214
+
215
+ os.remove(path)
216
+
217
+ del self.revocable_files[-1]
218
+
219
+ return bot_msg
220
+ else:
221
+ return None
222
+
223
+ def update_gpt_model_choice(self, model_choice):
224
+ self.gpt_model_choice = model_choice
225
+ self._init_kwargs_for_chat_completion()
226
+
227
+ def restart(self):
228
+ self._clear_all_files_in_work_dir()
229
+ self.revocable_files.clear()
230
+ self._init_conversation()
231
+ self.reset_gpt_response_log_values()
232
+ self.jupyter_kernel.restart_jupyter_kernel()
config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "API_TYPE": "open_ai",
3
+ "API_base": "https://api.zeroai.link/v1",
4
+ "API_VERSION": null,
5
+ "API_KEY": "sk-g8IWhTw12439JewdEaD8CdBe7d2049548200AcBb6a74078f",
6
+ "model": {
7
+ "GPT-3.5": {
8
+ "model_name": "gpt-3.5-turbo-16k-0613",
9
+ "available": true
10
+ },
11
+ "GPT-4": {
12
+ "model_name": "gpt-4-0613",
13
+ "available": true
14
+ }
15
+ }
16
+ }
functional.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bot_backend import *
2
+ import base64
3
+ import time
4
+
5
+
6
+ def chat_completion(bot_backend: BotBackend):
7
+ model_choice = bot_backend.gpt_model_choice
8
+ config = bot_backend.config
9
+ kwargs_for_chat_completion = bot_backend.kwargs_for_chat_completion
10
+
11
+ assert config['model'][model_choice]['available'], f"{model_choice} is not available for your API key"
12
+
13
+ response = openai.ChatCompletion.create(**kwargs_for_chat_completion)
14
+ return response
15
+
16
+
17
+ def add_function_response_to_bot_history(content_to_display, history, unique_id):
18
+ images, text = [], []
19
+
20
+ # terminal output
21
+ error_occurred = False
22
+ for mark, out_str in content_to_display:
23
+ if mark in ('stdout', 'execute_result_text', 'display_text'):
24
+ text.append(out_str)
25
+ elif mark in ('execute_result_png', 'execute_result_jpeg', 'display_png', 'display_jpeg'):
26
+ if 'png' in mark:
27
+ images.append(('png', out_str))
28
+ else:
29
+ images.append(('jpg', out_str))
30
+ elif mark == 'error':
31
+ text.append(delete_color_control_char(out_str))
32
+ error_occurred = True
33
+ text = '\n'.join(text).strip('\n')
34
+ if error_occurred:
35
+ history.append([None, f'❌Terminal output:\n```shell\n\n{text}\n```'])
36
+ else:
37
+ history.append([None, f'✔️Terminal output:\n```shell\n{text}\n```'])
38
+
39
+ # image output
40
+ for filetype, img in images:
41
+ image_bytes = base64.b64decode(img)
42
+ temp_path = f'cache/temp_{unique_id}'
43
+ if not os.path.exists(temp_path):
44
+ os.mkdir(temp_path)
45
+ path = f'{temp_path}/{hash(time.time())}.{filetype}'
46
+ with open(path, 'wb') as f:
47
+ f.write(image_bytes)
48
+ history.append(
49
+ [
50
+ None,
51
+ f'<img src=\"file={path}\" style=\'width: 600px; max-width:none; max-height:none\'>'
52
+ ]
53
+ )
54
+
55
+
56
+ def parse_json(function_args: str, finished: bool):
57
+ """
58
+ GPT may generate non-standard JSON format string, which contains '\n' in string value, leading to error when using
59
+ `json.loads()`.
60
+ Here we implement a parser to extract code directly from non-standard JSON string.
61
+ :return: code string if successfully parsed otherwise None
62
+ """
63
+ parser_log = {
64
+ 'met_begin_{': False,
65
+ 'begin_"code"': False,
66
+ 'end_"code"': False,
67
+ 'met_:': False,
68
+ 'met_end_}': False,
69
+ 'met_end_code_"': False,
70
+ "code_begin_index": 0,
71
+ "code_end_index": 0
72
+ }
73
+ try:
74
+ for index, char in enumerate(function_args):
75
+ if char == '{':
76
+ parser_log['met_begin_{'] = True
77
+ elif parser_log['met_begin_{'] and char == '"':
78
+ if parser_log['met_:']:
79
+ if finished:
80
+ parser_log['code_begin_index'] = index + 1
81
+ break
82
+ else:
83
+ if index + 1 == len(function_args):
84
+ return ''
85
+ else:
86
+ temp_code_str = function_args[index + 1:]
87
+ if '\n' in temp_code_str:
88
+ return temp_code_str.strip('\n')
89
+ else:
90
+ return json.loads(function_args + '"}')['code']
91
+ elif parser_log['begin_"code"']:
92
+ parser_log['end_"code"'] = True
93
+ else:
94
+ parser_log['begin_"code"'] = True
95
+ elif parser_log['end_"code"'] and char == ':':
96
+ parser_log['met_:'] = True
97
+ else:
98
+ continue
99
+ if finished:
100
+ for index, char in enumerate(function_args[::-1]):
101
+ back_index = -1 - index
102
+ if char == '}':
103
+ parser_log['met_end_}'] = True
104
+ elif parser_log['met_end_}'] and char == '"':
105
+ parser_log['code_end_index'] = back_index - 1
106
+ break
107
+ else:
108
+ continue
109
+ code_str = function_args[parser_log['code_begin_index']: parser_log['code_end_index'] + 1]
110
+ if '\n' in code_str:
111
+ return code_str.strip('\n')
112
+ else:
113
+ return json.loads(function_args)['code']
114
+
115
+ except Exception as e:
116
+ return None
jupyter_backend.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jupyter_client
2
+ import re
3
+
4
+
5
+ def delete_color_control_char(string):
6
+ ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
7
+ return ansi_escape.sub('', string)
8
+
9
+
10
+ class JupyterKernel:
11
+ def __init__(self, work_dir):
12
+ self.kernel_manager, self.kernel_client = jupyter_client.manager.start_new_kernel(kernel_name='python3')
13
+ self.work_dir = work_dir
14
+ self._create_work_dir()
15
+ self.available_functions = {
16
+ 'execute_code': self.execute_code,
17
+ 'python': self.execute_code
18
+ }
19
+
20
+ def execute_code_(self, code):
21
+ msg_id = self.kernel_client.execute(code)
22
+
23
+ # Get the output of the code
24
+ iopub_msg = self.kernel_client.get_iopub_msg()
25
+
26
+ all_output = []
27
+ while True:
28
+ if iopub_msg['msg_type'] == 'stream':
29
+ if iopub_msg['content'].get('name') == 'stdout':
30
+ output = iopub_msg['content']['text']
31
+ all_output.append(('stdout', output))
32
+ iopub_msg = self.kernel_client.get_iopub_msg()
33
+ elif iopub_msg['msg_type'] == 'execute_result':
34
+ if 'data' in iopub_msg['content']:
35
+ if 'text/plain' in iopub_msg['content']['data']:
36
+ output = iopub_msg['content']['data']['text/plain']
37
+ all_output.append(('execute_result_text', output))
38
+ if 'text/html' in iopub_msg['content']['data']:
39
+ output = iopub_msg['content']['data']['text/html']
40
+ all_output.append(('execute_result_html', output))
41
+ if 'image/png' in iopub_msg['content']['data']:
42
+ output = iopub_msg['content']['data']['image/png']
43
+ all_output.append(('execute_result_png', output))
44
+ if 'image/jpeg' in iopub_msg['content']['data']:
45
+ output = iopub_msg['content']['data']['image/jpeg']
46
+ all_output.append(('execute_result_jpeg', output))
47
+ iopub_msg = self.kernel_client.get_iopub_msg()
48
+ elif iopub_msg['msg_type'] == 'display_data':
49
+ if 'data' in iopub_msg['content']:
50
+ if 'text/plain' in iopub_msg['content']['data']:
51
+ output = iopub_msg['content']['data']['text/plain']
52
+ all_output.append(('display_text', output))
53
+ if 'text/html' in iopub_msg['content']['data']:
54
+ output = iopub_msg['content']['data']['text/html']
55
+ all_output.append(('display_html', output))
56
+ if 'image/png' in iopub_msg['content']['data']:
57
+ output = iopub_msg['content']['data']['image/png']
58
+ all_output.append(('display_png', output))
59
+ if 'image/jpeg' in iopub_msg['content']['data']:
60
+ output = iopub_msg['content']['data']['image/jpeg']
61
+ all_output.append(('display_jpeg', output))
62
+ iopub_msg = self.kernel_client.get_iopub_msg()
63
+ elif iopub_msg['msg_type'] == 'error':
64
+ if 'traceback' in iopub_msg['content']:
65
+ output = '\n'.join(iopub_msg['content']['traceback'])
66
+ all_output.append(('error', output))
67
+ iopub_msg = self.kernel_client.get_iopub_msg()
68
+ elif iopub_msg['msg_type'] == 'status' and iopub_msg['content'].get('execution_state') == 'idle':
69
+ break
70
+ else:
71
+ iopub_msg = self.kernel_client.get_iopub_msg()
72
+
73
+ return all_output
74
+
75
+ def execute_code(self, code):
76
+ text_to_gpt = []
77
+ content_to_display = self.execute_code_(code)
78
+ for mark, out_str in content_to_display:
79
+ if mark in ('stdout', 'execute_result_text', 'display_text'):
80
+ text_to_gpt.append(out_str)
81
+ elif mark in ('execute_result_png', 'execute_result_jpeg', 'display_png', 'display_jpeg'):
82
+ text_to_gpt.append('[image]')
83
+ elif mark == 'error':
84
+ text_to_gpt.append(delete_color_control_char(out_str))
85
+
86
+ return '\n'.join(text_to_gpt), content_to_display
87
+
88
+ def _create_work_dir(self):
89
+ # set work dir in jupyter environment
90
+ init_code = f"import os\n" \
91
+ f"if not os.path.exists('{self.work_dir}'):\n" \
92
+ f" os.mkdir('{self.work_dir}')\n" \
93
+ f"os.chdir('{self.work_dir}')\n" \
94
+ f"del os"
95
+ self.execute_code_(init_code)
96
+
97
+ def restart_jupyter_kernel(self):
98
+ self.kernel_client.shutdown()
99
+ self.kernel_manager, self.kernel_client = jupyter_client.manager.start_new_kernel(kernel_name='python3')
100
+ self._create_work_dir()
response_parser.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod
2
+ from functional import *
3
+
4
+
5
+ class ChoiceStrategy(metaclass=ABCMeta):
6
+ def __init__(self, choice):
7
+ self.choice = choice
8
+ self.delta = choice['delta']
9
+
10
+ @abstractmethod
11
+ def support(self):
12
+ pass
13
+
14
+ @abstractmethod
15
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
16
+ pass
17
+
18
+
19
+ class RoleChoiceStrategy(ChoiceStrategy):
20
+
21
+ def support(self):
22
+ return 'role' in self.delta
23
+
24
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
25
+ bot_backend.set_assistant_role_name(assistant_role_name=self.delta['role'])
26
+ return history, whether_exit
27
+
28
+
29
+ class ContentChoiceStrategy(ChoiceStrategy):
30
+ def support(self):
31
+ return 'content' in self.delta and self.delta['content'] is not None
32
+ # null value of content often occur in function call:
33
+ # {
34
+ # "role": "assistant",
35
+ # "content": null,
36
+ # "function_call": {
37
+ # "name": "python",
38
+ # "arguments": ""
39
+ # }
40
+ # }
41
+
42
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
43
+ bot_backend.add_content(content=self.delta.get('content', ''))
44
+ history[-1][1] = bot_backend.content
45
+ return history, whether_exit
46
+
47
+
48
+ class NameFunctionCallChoiceStrategy(ChoiceStrategy):
49
+ def support(self):
50
+ return 'function_call' in self.delta and 'name' in self.delta['function_call']
51
+
52
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
53
+ function_dict = bot_backend.jupyter_kernel.available_functions
54
+ bot_backend.set_function_name(function_name=self.delta['function_call']['name'])
55
+ bot_backend.copy_current_bot_history(bot_history=history)
56
+ if bot_backend.function_name not in function_dict:
57
+ history.append(
58
+ [
59
+ None,
60
+ f'GPT attempted to call a function that does '
61
+ f'not exist: {bot_backend.function_name}\n '
62
+ ]
63
+ )
64
+ whether_exit = True
65
+
66
+ return history, whether_exit
67
+
68
+
69
+ class ArgumentsFunctionCallChoiceStrategy(ChoiceStrategy):
70
+
71
+ def support(self):
72
+ return 'function_call' in self.delta and 'arguments' in self.delta['function_call']
73
+
74
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
75
+ bot_backend.add_function_args_str(function_args_str=self.delta['function_call']['arguments'])
76
+
77
+ if bot_backend.function_name == 'python': # handle hallucinatory function calls
78
+ """
79
+ In practice, we have noticed that GPT, especially GPT-3.5, may occasionally produce hallucinatory
80
+ function calls. These calls involve a non-existent function named `python` with arguments consisting
81
+ solely of raw code text (not a JSON format).
82
+ """
83
+ temp_code_str = bot_backend.function_args_str
84
+ bot_backend.update_display_code_block(
85
+ display_code_block="\n🔴Working:\n```python\n{}\n```".format(temp_code_str)
86
+ )
87
+ history = copy.deepcopy(bot_backend.bot_history)
88
+ history[-1][1] += bot_backend.display_code_block
89
+ else:
90
+ temp_code_str = parse_json(function_args=bot_backend.function_args_str, finished=False)
91
+ if temp_code_str is not None:
92
+ bot_backend.update_display_code_block(
93
+ display_code_block="\n🔴Working:\n```python\n{}\n```".format(
94
+ temp_code_str
95
+ )
96
+ )
97
+ history = copy.deepcopy(bot_backend.bot_history)
98
+ history[-1][1] += bot_backend.display_code_block
99
+
100
+ return history, whether_exit
101
+
102
+
103
+ class FinishReasonChoiceStrategy(ChoiceStrategy):
104
+ def support(self):
105
+ return self.choice['finish_reason'] is not None
106
+
107
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
108
+ function_dict = bot_backend.jupyter_kernel.available_functions
109
+
110
+ if bot_backend.content:
111
+ bot_backend.add_gpt_response_content_message()
112
+
113
+ bot_backend.update_finish_reason(finish_reason=self.choice['finish_reason'])
114
+ if bot_backend.finish_reason == 'function_call':
115
+ try:
116
+
117
+ code_str = self.get_code_str(bot_backend)
118
+
119
+ bot_backend.update_display_code_block(
120
+ display_code_block="\n🟢Working:\n```python\n{}\n```".format(code_str)
121
+ )
122
+ history = copy.deepcopy(bot_backend.bot_history)
123
+ history[-1][1] += bot_backend.display_code_block
124
+
125
+ # function response
126
+ text_to_gpt, content_to_display = function_dict[
127
+ bot_backend.function_name
128
+ ](code_str)
129
+
130
+ # add function call to conversion
131
+ bot_backend.add_function_call_response_message(function_response=text_to_gpt, save_tokens=True)
132
+
133
+ add_function_response_to_bot_history(
134
+ content_to_display=content_to_display, history=history, unique_id=bot_backend.unique_id
135
+ )
136
+
137
+ except json.JSONDecodeError:
138
+ history.append(
139
+ [None, f"GPT generate wrong function args: {bot_backend.function_args_str}"]
140
+ )
141
+ whether_exit = True
142
+ return history, whether_exit
143
+
144
+ except Exception as e:
145
+ history.append([None, f'Backend error: {e}'])
146
+ whether_exit = True
147
+ return history, whether_exit
148
+
149
+ bot_backend.reset_gpt_response_log_values(exclude=['finish_reason'])
150
+
151
+ return history, whether_exit
152
+
153
+ @staticmethod
154
+ def get_code_str(bot_backend):
155
+ if bot_backend.function_name == 'python':
156
+ code_str = bot_backend.function_args_str
157
+ else:
158
+ code_str = parse_json(function_args=bot_backend.function_args_str, finished=True)
159
+ if code_str is None:
160
+ raise json.JSONDecodeError
161
+ return code_str
162
+
163
+
164
+ class ChoiceHandler:
165
+ strategies = [
166
+ RoleChoiceStrategy, ContentChoiceStrategy, NameFunctionCallChoiceStrategy,
167
+ ArgumentsFunctionCallChoiceStrategy, FinishReasonChoiceStrategy
168
+ ]
169
+
170
+ def __init__(self, choice):
171
+ self.choice = choice
172
+
173
+ def handle(self, bot_backend: BotBackend, history: List, whether_exit: bool):
174
+ for Strategy in self.strategies:
175
+ strategy_instance = Strategy(choice=self.choice)
176
+ if not strategy_instance.support():
177
+ continue
178
+ history, whether_exit = strategy_instance.execute(
179
+ bot_backend=bot_backend,
180
+ history=history,
181
+ whether_exit=whether_exit
182
+ )
183
+ return history, whether_exit
184
+
185
+
186
+ def parse_response(chunk, history, bot_backend: BotBackend):
187
+ """
188
+ :return: history, whether_exit
189
+ """
190
+ whether_exit = False
191
+ if chunk['choices']:
192
+ choice = chunk['choices'][0]
193
+ choice_handler = ChoiceHandler(choice=choice)
194
+ history, whether_exit = choice_handler.handle(
195
+ history=history,
196
+ bot_backend=bot_backend,
197
+ whether_exit=whether_exit
198
+ )
199
+
200
+ return history, whether_exit
web_ui.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from response_parser import *
2
+ import gradio as gr
3
+
4
+
5
+ def initialization(state_dict: Dict) -> None:
6
+ if not os.path.exists('cache'):
7
+ os.mkdir('cache')
8
+ if state_dict["bot_backend"] is None:
9
+ state_dict["bot_backend"] = BotBackend()
10
+ if 'OPENAI_API_KEY' in os.environ:
11
+ del os.environ['OPENAI_API_KEY']
12
+
13
+
14
+ def get_bot_backend(state_dict: Dict) -> BotBackend:
15
+ return state_dict["bot_backend"]
16
+
17
+
18
+ def switch_to_gpt4(state_dict: Dict, whether_switch: bool) -> None:
19
+ bot_backend = get_bot_backend(state_dict)
20
+ if whether_switch:
21
+ bot_backend.update_gpt_model_choice("GPT-4")
22
+ else:
23
+ bot_backend.update_gpt_model_choice("GPT-3.5")
24
+
25
+
26
+ def add_text(state_dict: Dict, history: List, text: str) -> Tuple[List, Dict]:
27
+ bot_backend = get_bot_backend(state_dict)
28
+ bot_backend.add_text_message(user_text=text)
29
+
30
+ history = history + [(text, None)]
31
+
32
+ return history, gr.update(value="", interactive=False)
33
+
34
+
35
+ def add_file(state_dict: Dict, history: List, file) -> List:
36
+ bot_backend = get_bot_backend(state_dict)
37
+ path = file.name
38
+ filename = os.path.basename(path)
39
+
40
+ bot_msg = [f'📁[{filename}]', None]
41
+ history.append(bot_msg)
42
+
43
+ bot_backend.add_file_message(path=path, bot_msg=bot_msg)
44
+
45
+ return history
46
+
47
+
48
+ def undo_upload_file(state_dict: Dict, history: List) -> Tuple[List, Dict]:
49
+ bot_backend = get_bot_backend(state_dict)
50
+ bot_msg = bot_backend.revoke_file()
51
+
52
+ if bot_msg is None:
53
+ return history, gr.Button.update(interactive=False)
54
+
55
+ else:
56
+ assert history[-1] == bot_msg
57
+ del history[-1]
58
+ if bot_backend.revocable_files:
59
+ return history, gr.Button.update(interactive=True)
60
+ else:
61
+ return history, gr.Button.update(interactive=False)
62
+
63
+
64
+ def refresh_file_display(state_dict: Dict) -> List[str]:
65
+ bot_backend = get_bot_backend(state_dict)
66
+ work_dir = bot_backend.jupyter_work_dir
67
+ filenames = os.listdir(work_dir)
68
+ paths = []
69
+ for filename in filenames:
70
+ paths.append(
71
+ os.path.join(work_dir, filename)
72
+ )
73
+ return paths
74
+
75
+
76
+ def restart_ui(history: List) -> Tuple[List, Dict, Dict, Dict, Dict]:
77
+ history.clear()
78
+ return (
79
+ history,
80
+ gr.Textbox.update(value="", interactive=False),
81
+ gr.Button.update(interactive=False),
82
+ gr.Button.update(interactive=False),
83
+ gr.Button.update(interactive=False)
84
+ )
85
+
86
+
87
+ def restart_bot_backend(state_dict: Dict) -> None:
88
+ bot_backend = get_bot_backend(state_dict)
89
+ bot_backend.restart()
90
+
91
+
92
+ def bot(state_dict: Dict, history: List) -> List:
93
+ bot_backend = get_bot_backend(state_dict)
94
+
95
+ while bot_backend.finish_reason in ('new_input', 'function_call'):
96
+ if history[-1][0] is None:
97
+ history.append(
98
+ [None, ""]
99
+ )
100
+ else:
101
+ history[-1][1] = ""
102
+
103
+ response = chat_completion(bot_backend=bot_backend)
104
+ for chunk in response:
105
+ history, weather_exit = parse_response(
106
+ chunk=chunk,
107
+ history=history,
108
+ bot_backend=bot_backend
109
+ )
110
+ yield history
111
+ if weather_exit:
112
+ exit(-1)
113
+
114
+ yield history
115
+
116
+
117
+ if __name__ == '__main__':
118
+ config = get_config()
119
+ with gr.Blocks(theme=gr.themes.Base()) as block:
120
+ """
121
+ Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
122
+ """
123
+ # UI components
124
+ state = gr.State(value={"bot_backend": None})
125
+ with gr.Tab("Chat"):
126
+ chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750)
127
+ with gr.Row():
128
+ with gr.Column(scale=0.85):
129
+ text_box = gr.Textbox(
130
+ show_label=False,
131
+ placeholder="Enter text and press enter, or upload a file",
132
+ container=False
133
+ )
134
+ with gr.Column(scale=0.15, min_width=0):
135
+ file_upload_button = gr.UploadButton("📁", file_types=['file'])
136
+ with gr.Row(equal_height=True):
137
+ with gr.Column(scale=0.7):
138
+ check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
139
+ check_box.change(fn=switch_to_gpt4, inputs=[state, check_box])
140
+ with gr.Column(scale=0.15, min_width=0):
141
+ restart_button = gr.Button(value='🔄 Restart')
142
+ with gr.Column(scale=0.15, min_width=0):
143
+ undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
144
+ with gr.Tab("Files"):
145
+ file_output = gr.Files()
146
+
147
+ # Components function binding
148
+ txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
149
+ bot, [state, chatbot], chatbot
150
+ )
151
+ txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
152
+ txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
153
+ txt_msg.then(lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False)
154
+
155
+ file_msg = file_upload_button.upload(
156
+ add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
157
+ ).then(
158
+ bot, [state, chatbot], chatbot
159
+ )
160
+ file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False)
161
+ file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
162
+
163
+ undo_file_button.click(
164
+ fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button]
165
+ ).then(
166
+ fn=refresh_file_display, inputs=[state], outputs=[file_output]
167
+ )
168
+
169
+ restart_button.click(
170
+ fn=restart_ui, inputs=[chatbot],
171
+ outputs=[chatbot, text_box, restart_button, file_upload_button, undo_file_button]
172
+ ).then(
173
+ fn=restart_bot_backend, inputs=[state], queue=False
174
+ ).then(
175
+ fn=refresh_file_display, inputs=[state], outputs=[file_output]
176
+ ).then(
177
+ fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True),
178
+ gr.Button.update(interactive=True)),
179
+ inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False
180
+ )
181
+
182
+ block.load(fn=initialization, inputs=[state])
183
+
184
+ block.queue()
185
+ block.launch(inbrowser=True)