|
import os |
|
|
|
import numpy as np |
|
import pandas as pd |
|
import openai |
|
from haystack.schema import Document |
|
import streamlit as st |
|
from tenacity import retry, stop_after_attempt, wait_random_exponential |
|
|
|
|
|
|
|
openai.api_key = os.environ["OPENAI_API_KEY"] |
|
model_select = "gpt-3.5-turbo-1106" |
|
|
|
|
|
|
|
def get_prompt(context): |
|
base_prompt="Summarize the following context efficiently in bullet points, the less the better. \ |
|
Summarize only activities that address the vulnerability of the given context to climate change. \ |
|
Formatting example: \ |
|
- Collect and utilize gender-disaggregated data to inform and improve climate change adaptation efforts. \ |
|
- Prioritize gender sensitivity in adaptation options, ensuring participation and benefits for women, who are more vulnerable to climate impacts. \ |
|
" |
|
|
|
|
|
|
|
prompt = base_prompt+"; Context: "+context+"; Answer:" |
|
|
|
return prompt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) |
|
def completion_with_backoff(**kwargs): |
|
return openai.ChatCompletion.create(**kwargs) |
|
|
|
|
|
|
|
def run_query(df): |
|
docs = df |
|
|
|
''' |
|
For non-streamed completion, enable the following 2 lines and comment out the code below |
|
''' |
|
|
|
|
|
|
|
|
|
response = completion_with_backoff(model=model_select, messages=[{"role": "user", "content": get_prompt(docs)}], stream=True) |
|
|
|
report = [] |
|
res_box = st.empty() |
|
for chunk in response: |
|
|
|
chunk_message = chunk['choices'][0]['delta'] |
|
|
|
if 'content' in chunk_message: |
|
report.append(chunk_message.content) |
|
|
|
result = "".join(report).strip() |
|
|
|
res_box.success(result) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|