Spaces:
Sleeping
Sleeping
File size: 7,716 Bytes
ab01635 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import json
import openai
import os
import copy
import shutil
from jupyter_backend import *
from typing import *
functions = [
{
"name": "execute_code",
"description": "This function allows you to execute Python code and retrieve the terminal output. If the code "
"generates image output, the function will return the text '[image]'. The code is sent to a "
"Jupyter kernel for execution. The kernel will remain active after execution, retaining all "
"variables in memory.",
"parameters": {
"type": "object",
"properties": {
"code": {
"type": "string",
"description": "The code text"
}
},
"required": ["code"],
}
}
]
system_msg = '''You are an AI code interpreter.
Your goal is to help users do a variety of jobs by executing Python code.
You should:
1. Comprehend the user's requirements carefully & to the letter.
2. Give a brief description for what you plan to do & call the provided function to run code.
3. Provide results analysis based on the execution output.
4. If error occurred, try to fix it.
Note: If the user uploads a file, you will receive a system message "User uploaded a file: filename". Use the filename as the path in the code. '''
with open('config.json') as f:
config = json.load(f)
if not config['API_KEY']:
config['API_KEY'] = os.getenv('OPENAI_API_KEY')
os.unsetenv('OPENAI_API_KEY')
def get_config():
return config
def config_openai_api(api_type, api_base, api_version, api_key):
openai.api_type = api_type
openai.api_base = api_base
openai.api_version = api_version
openai.api_key = api_key
class GPTResponseLog:
def __init__(self):
self.assistant_role_name = ''
self.content = ''
self.function_name = None
self.function_args_str = ''
self.display_code_block = ''
self.finish_reason = 'stop'
self.bot_history = None
def reset_gpt_response_log_values(self, exclude=None):
if exclude is None:
exclude = []
attributes = {'assistant_role_name': '',
'content': '',
'function_name': None,
'function_args_str': '',
'display_code_block': '',
'finish_reason': 'stop',
'bot_history': None}
for attr_name in exclude:
del attributes[attr_name]
for attr_name, value in attributes.items():
setattr(self, attr_name, value)
def set_assistant_role_name(self, assistant_role_name: str):
self.assistant_role_name = assistant_role_name
def add_content(self, content: str):
self.content += content
def set_function_name(self, function_name: str):
self.function_name = function_name
def copy_current_bot_history(self, bot_history: List):
self.bot_history = copy.deepcopy(bot_history)
def add_function_args_str(self, function_args_str: str):
self.function_args_str += function_args_str
def update_display_code_block(self, display_code_block):
self.display_code_block = display_code_block
def update_finish_reason(self, finish_reason: str):
self.finish_reason = finish_reason
class BotBackend(GPTResponseLog):
def __init__(self):
super().__init__()
self.unique_id = hash(id(self))
self.jupyter_work_dir = f'cache/work_dir_{self.unique_id}'
self.jupyter_kernel = JupyterKernel(work_dir=self.jupyter_work_dir)
self.gpt_model_choice = "GPT-3.5"
self.revocable_files = []
self._init_conversation()
self._init_api_config()
self._init_kwargs_for_chat_completion()
def _init_conversation(self):
first_system_msg = {'role': 'system', 'content': system_msg}
if hasattr(self, 'conversation'):
self.conversation.clear()
self.conversation.append(first_system_msg)
else:
self.conversation: List[Dict] = [first_system_msg]
def _init_api_config(self):
self.config = get_config()
api_type = self.config['API_TYPE']
api_base = self.config['API_base']
api_version = self.config['API_VERSION']
api_key = config['API_KEY']
config_openai_api(api_type, api_base, api_version, api_key)
def _init_kwargs_for_chat_completion(self):
self.kwargs_for_chat_completion = {
'stream': True,
'messages': self.conversation,
'functions': functions,
'function_call': 'auto'
}
model_name = self.config['model'][self.gpt_model_choice]['model_name']
if self.config['API_TYPE'] == 'azure':
self.kwargs_for_chat_completion['engine'] = model_name
else:
self.kwargs_for_chat_completion['model'] = model_name
def _clear_all_files_in_work_dir(self):
for filename in os.listdir(self.jupyter_work_dir):
os.remove(
os.path.join(self.jupyter_work_dir, filename)
)
def add_gpt_response_content_message(self):
self.conversation.append(
{'role': self.assistant_role_name, 'content': self.content}
)
def add_text_message(self, user_text):
self.conversation.append(
{'role': 'user', 'content': user_text}
)
self.revocable_files.clear()
self.update_finish_reason(finish_reason='new_input')
def add_file_message(self, path, bot_msg):
filename = os.path.basename(path)
work_dir = self.jupyter_work_dir
shutil.copy(path, work_dir)
gpt_msg = {'role': 'system', 'content': f'User uploaded a file: {filename}'}
self.conversation.append(gpt_msg)
self.revocable_files.append(
{
'bot_msg': bot_msg,
'gpt_msg': gpt_msg,
'path': os.path.join(work_dir, filename)
}
)
def add_function_call_response_message(self, function_response: str, save_tokens=True):
self.conversation.append(
{
"role": self.assistant_role_name,
"name": self.function_name,
"content": self.function_args_str
}
)
if save_tokens and len(function_response) > 500:
function_response = f'{function_response[:200]}\n[Output too much, the middle part output is omitted]\n ' \
f'End part of output:\n{function_response[-200:]}'
self.conversation.append(
{
"role": "function",
"name": self.function_name,
"content": function_response,
}
)
def revoke_file(self):
if self.revocable_files:
file = self.revocable_files[-1]
bot_msg = file['bot_msg']
gpt_msg = file['gpt_msg']
path = file['path']
assert self.conversation[-1] is gpt_msg
del self.conversation[-1]
os.remove(path)
del self.revocable_files[-1]
return bot_msg
else:
return None
def update_gpt_model_choice(self, model_choice):
self.gpt_model_choice = model_choice
self._init_kwargs_for_chat_completion()
def restart(self):
self._clear_all_files_in_work_dir()
self.revocable_files.clear()
self._init_conversation()
self.reset_gpt_response_log_values()
self.jupyter_kernel.restart_jupyter_kernel()
|