Revert to last version with streaming
Browse files- app.py +15 -2
- climateqa/engine/rag.py +1 -1
app.py
CHANGED
|
@@ -148,7 +148,8 @@ async def chat(query,history,audience,sources,reports):
|
|
| 148 |
|
| 149 |
reformulated_question_path_id = "/logs/flatten_dict/final_output"
|
| 150 |
retriever_path_id = "/logs/Retriever/final_output"
|
| 151 |
-
|
|
|
|
| 152 |
|
| 153 |
docs_html = ""
|
| 154 |
output_query = ""
|
|
@@ -158,6 +159,7 @@ async def chat(query,history,audience,sources,reports):
|
|
| 158 |
async for op in result:
|
| 159 |
|
| 160 |
op = op.ops[0]
|
|
|
|
| 161 |
|
| 162 |
if op['path'] == reformulated_question_path_id: # reforulated question
|
| 163 |
output_language = op['value']["language"] # str
|
|
@@ -175,12 +177,23 @@ async def chat(query,history,audience,sources,reports):
|
|
| 175 |
print("op: ",op)
|
| 176 |
continue
|
| 177 |
|
| 178 |
-
elif op['path'] ==
|
| 179 |
new_token = op['value'] # str
|
| 180 |
time.sleep(0.03)
|
| 181 |
answer_yet = history[-1][1] + new_token
|
| 182 |
answer_yet = parse_output_llm_with_sources(answer_yet)
|
| 183 |
history[-1] = (query,answer_yet)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
else:
|
| 186 |
continue
|
|
|
|
| 148 |
|
| 149 |
reformulated_question_path_id = "/logs/flatten_dict/final_output"
|
| 150 |
retriever_path_id = "/logs/Retriever/final_output"
|
| 151 |
+
streaming_output_path_id = "/logs/AzureChatOpenAI:2/streamed_output_str/-"
|
| 152 |
+
final_output_path_id = "/streamed_output/-"
|
| 153 |
|
| 154 |
docs_html = ""
|
| 155 |
output_query = ""
|
|
|
|
| 159 |
async for op in result:
|
| 160 |
|
| 161 |
op = op.ops[0]
|
| 162 |
+
print(op)
|
| 163 |
|
| 164 |
if op['path'] == reformulated_question_path_id: # reforulated question
|
| 165 |
output_language = op['value']["language"] # str
|
|
|
|
| 177 |
print("op: ",op)
|
| 178 |
continue
|
| 179 |
|
| 180 |
+
elif op['path'] == streaming_output_path_id: # final answer
|
| 181 |
new_token = op['value'] # str
|
| 182 |
time.sleep(0.03)
|
| 183 |
answer_yet = history[-1][1] + new_token
|
| 184 |
answer_yet = parse_output_llm_with_sources(answer_yet)
|
| 185 |
history[-1] = (query,answer_yet)
|
| 186 |
+
|
| 187 |
+
# elif op['path'] == final_output_path_id:
|
| 188 |
+
# final_output = op['value']
|
| 189 |
+
|
| 190 |
+
# if "answer" in final_output:
|
| 191 |
+
|
| 192 |
+
# final_output = final_output["answer"]
|
| 193 |
+
# print(final_output)
|
| 194 |
+
# answer = history[-1][1] + final_output
|
| 195 |
+
# answer = parse_output_llm_with_sources(answer)
|
| 196 |
+
# history[-1] = (query,answer)
|
| 197 |
|
| 198 |
else:
|
| 199 |
continue
|
climateqa/engine/rag.py
CHANGED
|
@@ -72,7 +72,7 @@ def make_rag_chain(retriever,llm):
|
|
| 72 |
|
| 73 |
# ------- FINAL CHAIN
|
| 74 |
# Build the final chain
|
| 75 |
-
rag_chain = reformulation | find_documents |
|
| 76 |
|
| 77 |
return rag_chain
|
| 78 |
|
|
|
|
| 72 |
|
| 73 |
# ------- FINAL CHAIN
|
| 74 |
# Build the final chain
|
| 75 |
+
rag_chain = reformulation | find_documents | answer_with_docs
|
| 76 |
|
| 77 |
return rag_chain
|
| 78 |
|