| ''' | |
| 参考: https://github.com/shroominic/codeinterpreter-api | |
| 1. 可以存在本地,然后再调出来。 working. | |
| 1. 可以直接在内存中读出图片。 | |
| ''' | |
| # TODO:如何在内存中读取文件。 | |
| from codeinterpreterapi import CodeInterpreterSession, File | |
| import streamlit as st | |
| from codeinterpreterapi import CodeInterpreterSession | |
| import openai | |
| import os | |
| import matplotlib.pyplot as plt | |
| import pandas as pd | |
| from io import StringIO | |
| import csv | |
| import tempfile | |
| from tempfile import NamedTemporaryFile | |
| import pathlib | |
| from pathlib import Path | |
| import matplotlib | |
| from matplotlib.font_manager import FontProperties | |
| os.environ["OPENAI_API_KEY"] = os.environ['user_token'] | |
| openai.api_key = os.environ['user_token'] | |
| os.environ["VERBOSE"] = "True" # 可以看到具体的错误? | |
| # # #* 如果碰到接口问题,可以启用如下设置。 | |
| # openai.proxy = { | |
| # "http": "http://127.0.0.1:7890", | |
| # "https": "http://127.0.0.1:7890" | |
| # } | |
| # st.title("ChatGPT-like clone") | |
| st.title("大语言模型商业数据分析中心") | |
| st.subheader("Business Data Analytics Based Upon LLM") | |
| uploaded_file = st.file_uploader("Choose a file", type=(["csv","txt","xlsx","xls"])) | |
| # uploaded_file = st.file_uploader("选择一个文件", type=(["csv","txt","xlsx","xls"])) | |
| # st.write(uploaded_file) | |
| if uploaded_file is not None: | |
| # csv_file = csv.reader(uploaded_file) | |
| csv_file = pd.read_csv(uploaded_file) | |
| st.write(csv_file[:5]) ## 这里只是显示文件,后面需要定位文件所在的绝对路径。 | |
| uploaded_file_name = "File_provided" | |
| temp_dir = tempfile.TemporaryDirectory() | |
| uploaded_file_path = pathlib.Path(temp_dir.name) / uploaded_file_name #! working. | |
| with open(uploaded_file_path, 'wb') as output_temporary_file: | |
| # output_temporary_file.write(uploaded_file.read()) | |
| output_temporary_file.write(uploaded_file.getvalue()) #! 必须用这种格式读入内容,然后才可以写入temporary文件夹中。 | |
| # st.write(uploaded_file_path) #* 可以查看文件是否真实存在,然后是否可以 | |
| ### how to read data inside streamlit. | |
| # # files = pd.read_csv(uploaded_file) | |
| # bytes_data = uploaded_file.getvalue() | |
| # # st.write(bytes_data) | |
| # # To convert to a string based IO: | |
| # stringio = StringIO(uploaded_file.getvalue().decode("utf-8")) | |
| # # st.write(stringio) | |
| # # To read file as string: | |
| # string_data = stringio.read() | |
| # # st.write(string_data) | |
| # # Can be used wherever a "file-like" object is accepted: | |
| # # dataframe = pd.read_csv(uploaded_file) | |
| # files = pd.read_csv(uploaded_file, encoding='utf-8') | |
| # openai.api_key = st.secrets["OPENAI_API_KEY"] | |
| async def main(): | |
| if "openai_model" not in st.session_state: | |
| # st.session_state["openai_model"] = "gpt-3.5-turbo" | |
| st.session_state["openai_model"] = "gpt-4" ##NOTE: data analysis module must use GPT-4. | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| if prompt := st.chat_input("What is up?"): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| with st.chat_message("assistant"): | |
| message_placeholder = st.empty() | |
| full_response = "" | |
| ###原始示例 https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps | |
| # for response in openai.ChatCompletion.create( | |
| # model=st.session_state["openai_model"], | |
| # messages=[ | |
| # {"role": m["role"], "content": m["content"]} | |
| # for m in st.session_state.messages | |
| # ], | |
| # stream=True, | |
| # ): | |
| # full_response += response.choices[0].delta.get("content", "") | |
| # message_placeholder.markdown(full_response + "▌") | |
| async with CodeInterpreterSession() as session: | |
| # user_request = "对于文件中的'SepalLengthCm’数据给我一个'直方图',提供图表,并给出分析结果" | |
| #! 可以用设定dpi=300来输出高质量的图表。(注:图的解析度dpi设定为300) | |
| # environ_settings = "【<默认要求> 如果我没有告诉你任何定制化的要求,那么请按照以下的默认要求来回答:1. 你需要用提问的语言来回答(即:如果我用中文提问,你就用中文来回答;我如果用英文提问吗,你就用英文来回答)。2. 如果要求你输出图表,那么图的解析度dpi需要设定为300。图尽量使用seaborn库。seaborn库的参数设定:sns.set(rc={'axes.facecolor':'#FFF9ED','figure.facecolor':'#FFF9ED'}, palette='deep')。】" ## seaborn中的palette参数可以设定图表的颜色,选项包括:deep, muted, pastel, bright, dark, colorblind,Spectral。更多参数可以参考:https://seaborn.pydata.org/generated/seaborn.color_palette.html。 | |
| environ_settings = """【背景要求】如果我没有告诉你任何定制化的要求,那么请按照以下的默认要求来回答: | |
| ------------------------------------------------------------------------- | |
| 1. 你需要用提问的语言来回答(如:中文提问你就用中文来回答,英文提问你就用英文来回答)。 | |
| 2. 如果要求你输出图表,那么图的解析度dpi需要设定为300。图尽量使用seaborn库。seaborn库的参数设定:sns.set(rc={'axes.facecolor':'#FFF9ED','figure.facecolor':'#FFF9ED'}, palette='deep'。 | |
| 3. 图表上的字体需要能显示中文,执行以下命令: | |
| myfont=FontProperties(fname='./YaHei.ttf') | |
| ------------------------------------------------------------------------- | |
| """ ## seaborn中的palette参数可以设定图表的颜色,选项包括:deep, muted, pastel, bright, dark, colorblind,Spectral。更多参数可以参考:https://seaborn.pydata.org/generated/seaborn.color_palette.html。 | |
| myfont=FontProperties(fname='./YaHei.ttf') | |
| print('now font is:', myfont) | |
| user_request = environ_settings + "\n\n"+ "你需要完成以下任务:\n\n" + prompt | |
| # print('user_request: \n', user_request) | |
| ### 加载上传的文件,主要路径在上面代码中。 | |
| files = [File.from_path(str(uploaded_file_path))] | |
| ### generate the response | |
| response = await session.generate_response( | |
| user_request, files=files | |
| ) | |
| # output to the user | |
| print("AI: ", response.content) | |
| full_response = response.content | |
| ### full_response = "this is full response" | |
| # for file in response.files: | |
| for i, file in enumerate(response.files): | |
| # await file.asave(f"/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/output{i}.png") ##working. | |
| # st.image(file.get_image() #! working. | |
| # file.show_image() | |
| # st.image(file.get_image(), width=500, output_format='png') | |
| st.image(file.get_image(), width=None, output_format='PNG') #* 注意这里的设定,可以提高图片的精细程度。 | |
| # message_placeholder.markdown(full_response + "▌") ## orignal code. | |
| # message_placeholder.markdown(full_response) ## orignal code. | |
| st.write(full_response) | |
| await session.astop() #! 确认需要关闭。 | |
| st.session_state.messages.append( | |
| {"role": "assistant", "content": full_response}) | |
| if __name__ == "__main__": | |
| import asyncio | |
| # * 也可以用命令执行这个python文件。’streamlit run frontend/app.py‘ | |
| asyncio.run(main()) | |