epalvarez commited on
Commit
4c20ecd
·
verified ·
1 Parent(s): 225d9ba

Original upload of app.py, requirements.txt, and tesla_db (vector database) folder from a local computer folder (not directly from Google Drive folder)

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tesla_db/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ import json
4
+ from pathlib import Path
5
+
6
+ # ATTENTION: some versions in the requirements.txt file are more current than the ones used in the notebook
7
+
8
+ # GUI components
9
+ import gradio as gr
10
+
11
+ # Accessing external environment with endpoint and secret
12
+ # Using openai as a dummy container to connect to an endpoint and send HTTP requests (note: one could also use the python "request" package functions)
13
+ # Accessing OpenAI Model hosting platform
14
+ from openai import OpenAI, OpenAIError
15
+
16
+ # Embedding operations & Vector DB creation
17
+ from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
18
+ from langchain_community.vectorstores import Chroma
19
+
20
+ # HuggingFace platform
21
+ from huggingface_hub import CommitScheduler
22
+
23
+ # Set working directory (HuggingFace space)
24
+ hf_space_dir = os.getcwd()
25
+
26
+ # Anyscale model hosting platform NOT USED in this notebook. Open AI model hosting platform used instead.
27
+ # client = OpenAI(
28
+ # base_url="https://api.endpoints.anyscale.com/v1",
29
+ # api_key=os.environ['ANYSCALE_API_KEY']
30
+ # )
31
+
32
+ #----------------------------------------------------------------------
33
+ # OpenAI API key stored as a "secret" HuggingFace Space
34
+ # OPENAI_API_KEY
35
+
36
+ #import os
37
+ # Set your API key from an environment variable: get("OPENAI_API_KEY")
38
+ # app.py should have access to the OpenAI key stored as a HuggingFace secret in the HuggingFace space where the app.py will be uploaded, by accessing it through the environment variables
39
+ # openai_api_key = os.getenv("OpenAI_API_key_GL_Adv_Python_Project")
40
+ # or:
41
+ # openai_api_key = os.environ.get("OpenAI_API_key_GL_Adv_Python_Project")
42
+ # or:
43
+ openai_api_key = os.environ("OpenAI_API_key_GL_Adv_Python_Project") # environ is a dictionary with all the environment variables, so to access the value, provide the key
44
+
45
+ # Set Up Your API Key: You'll need an API key from OpenAI. You can obtain one by signing up on the OpenAI website and navigating to your API keys in the dashboard.
46
+ # This approach ensures that your API key remains secure and is not exposed in your code.
47
+
48
+ # OpenAI models:
49
+
50
+ # model_name = "gpt-4o" # Cost: Input: $5 / 1M tokens ; Output: $15 / 1M tokens
51
+ # GPT-4o is OpenAI's most advanced multimodal model that’s faster and cheaper than GPT-4 Turbo with stronger vision capabilities.
52
+ # The model has 128K context and an October 2023 knowledge cutoff.
53
+
54
+ model_name = "gpt-4o-mini" # Cost: Input: $0.15 / 1M tokens ; Output: $0.60 / 1M tokens
55
+ # GPT-4o mini is our most cost-efficient small model that’s smarter and cheaper than GPT-3.5 Turbo, and has vision capabilities.
56
+ # The model has 128K context and an October 2023 knowledge cutoff.
57
+
58
+ # Create an OpenAI Client: setting up the client with new version of OpenAI Python library - version OpenAI 1.0.0 and above
59
+
60
+ client = OpenAI(
61
+ # This is the default and can be omitted
62
+ # api_key=os.environ.get("OPENAI_API_KEY"),
63
+ api_key=openai_api_key,
64
+ )
65
+ #---------------------------------------------------------------------
66
+
67
+ embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-small')
68
+
69
+ tesla_10k_collection = 'tesla-10k-2019-to-2023'
70
+
71
+ # vector database constructor Chroma()
72
+ vectorstore_persisted = Chroma(
73
+ collection_name=tesla_10k_collection,
74
+ persist_directory='./tesla_db',
75
+ embedding_function=embedding_model
76
+ )
77
+
78
+ retriever = vectorstore_persisted.as_retriever(
79
+ search_type='similarity',
80
+ search_kwargs={'k': 5}
81
+ )
82
+
83
+ # Prepare the logging functionality
84
+
85
+ log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
86
+ log_folder = log_file.parent
87
+
88
+ scheduler = CommitScheduler(
89
+ repo_id="document-qna-chroma-openai-logs", # name of the log folder containing json elements --> HuggingFace dataset # OLD name: "document-qna-chroma-anyscale-logs",
90
+ repo_type="dataset",
91
+ folder_path=log_folder,
92
+ path_in_repo="data",
93
+ every=2 # execute every two API calls
94
+
95
+ qna_system_message = """
96
+ You are an assistant to a financial services firm who answers user queries on annual reports.
97
+ Users will ask questions delimited by triple backticks, that is, ```.
98
+ User input will have the context required by you to answer user questions.
99
+ This context will begin with the token: ###Context.
100
+ The context contains references to specific portions of a document relevant to the user query.
101
+ Please answer only using the context provided in the input. However, do not mention anything about the context in your answer.
102
+ If the answer is not found in the context, respond "I don't know".
103
+ """
104
+
105
+ qna_user_message_template = """
106
+ ###Context
107
+ Here are some documents that are relevant to the question.
108
+ {context}
109
+ ```
110
+ {question}
111
+ ```
112
+ """
113
+
114
+ # Define the predict function that runs when 'Submit' is clicked or when a API request is made
115
+ def predict(user_input):
116
+
117
+ relevant_document_chunks = retriever.invoke(user_input)
118
+ # relevant_document_chunks = retriever.get_relevant_documents(query = user_input)
119
+ context_list = [doc_chunk.page_content for doc_chunk in relevant_document_chunks]
120
+ context_for_query = ". ".join(context_list)
121
+
122
+ prompt = [
123
+ {'role':'system', 'content': qna_system_message},
124
+ {'role':'user', 'content': qna_user_message_template.format(
125
+ context=context_for_query,
126
+ question=user_input
127
+ )
128
+ }
129
+ ]
130
+
131
+ try:
132
+ response = client.chat.completions.create(
133
+ model=model_name, # previous model used: 'mlabonne/NeuralHermes-2.5-Mistral-7B',
134
+ messages=prompt,
135
+ temperature=0, # Temperature > 0 to encourage creative answer... Temperature = 0.7: A common setting that provides a balance between creativity and coherence.
136
+ # max_tokens=200 # Limit the number of tokens in the response
137
+ )
138
+
139
+ prediction = response.choices[0].message.content.strip() # Access response attributes directly
140
+
141
+ except Exception as e:
142
+ prediction = e
143
+
144
+ # While the prediction is made, log both the inputs and outputs to a local log file (i.e., HuggingFace dataset)
145
+ # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
146
+ # access (i.e., put a lock on the state of the log_file in case user are entering queries while the log operation is in progress.)
147
+ # Note: the log_file is a json file.
148
+
149
+ with scheduler.lock:
150
+ with log_file.open("a") as f:
151
+ # json.dumps turns the dictionary into a json string containing 'user_input', 'context_for_query', and 'prediction'
152
+ f.write(json.dumps(
153
+ {
154
+ 'user_input': user_input,
155
+ 'retrieved_context': context_for_query,
156
+ 'model_response': prediction
157
+ }
158
+ ))
159
+ f.write("\n") # write a new line to prepare for the next observation to be logged
160
+
161
+ return prediction
162
+
163
+
164
+ textbox = gr.Textbox(placeholder="Enter your query here", lines=6)
165
+
166
+ # Create the interface
167
+ demo = gr.Interface(
168
+ inputs=textbox, fn=predict, outputs="text",
169
+ title="Ask Me Anything (AMA) on Tesla 10-K statements",
170
+ description="This web API presents an interface to ask questions on contents of the Tesla 10-K reports for the period 2019 - 2023.",
171
+ article="Note that questions that are not relevant to the Tesla 10-K report will not be answered.",
172
+ examples=[["What was the total revenue of the company in 2022?", "$ 81.46 Billion"],
173
+ ["Summarize the Management Discussion and Analysis section of the 2021 report in 50 words.", ""],
174
+ ["What was the company's debt level in 2020?", ""],
175
+ ["Identify 5 key risks identified in the 2019 10k report? Respond with bullet point summaries.", ""]
176
+ ],
177
+ concurrency_limit=16
178
+ )
179
+
180
+ demo.queue()
181
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ openai==1.23.2
2
+ chromadb==0.4.22
3
+ langchain==0.1.9
4
+ langchain-community==0.0.32
5
+ sentence-transformers==2.3.1
tesla_db/3b5f5183-1f48-4873-ae05-297f75d85ed2/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9ff9f9ee0095bf3d2a87e98c76eb56a3f34fb13c041cd7fcfc43dc6b85cc4d3
3
+ size 12708000
tesla_db/3b5f5183-1f48-4873-ae05-297f75d85ed2/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0e8e145fcc03c24c5125dd89060c5951b0abcf71bb72771d6bab0efefa08aa0
3
+ size 100
tesla_db/3b5f5183-1f48-4873-ae05-297f75d85ed2/index_metadata.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:373446c85bb79dec763010e5b33a241e2b9cfacc8c1a119dca0fdc45adc0d1e7
3
+ size 172004
tesla_db/3b5f5183-1f48-4873-ae05-297f75d85ed2/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f0dec43639a3ed312ef382157c5d9611ae2ee5a872375b6d5fd8d00622c750
3
+ size 12000
tesla_db/3b5f5183-1f48-4873-ae05-297f75d85ed2/link_lists.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2d324b759e78b46a63ae38998cec57587475ed8ea1c2a2e688b87a957072c21
3
+ size 25736
tesla_db/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06ed87d12e6ee1b0b1f1f81fdfa4576e32f095c86fb7a2a66d0200a7640da76a
3
+ size 46223360