Spaces:
Build error
Build error
Update actions/actions.py
Browse files- actions/actions.py +34 -23
actions/actions.py
CHANGED
@@ -14,7 +14,7 @@ import sys
|
|
14 |
import openai
|
15 |
|
16 |
# Add "/app/actions" to the sys.path
|
17 |
-
actions_path = os.path.abspath("/
|
18 |
sys.path.insert(0, actions_path)
|
19 |
|
20 |
print("-#-System-path-#-")
|
@@ -32,43 +32,54 @@ openai.api_key = secret_value_0
|
|
32 |
# Provide your OpenAI API key
|
33 |
|
34 |
#model_engine="text-davinci-002"
|
35 |
-
def generate_openai_response(user_queries, model_engine="gpt-3.5-turbo", max_tokens=
|
36 |
"""Generate a response using the OpenAI API."""
|
37 |
|
38 |
|
39 |
|
40 |
-
# Run the main function from search_content.py and store the results in a variable
|
41 |
-
#results = main_search(query)
|
42 |
-
#results = main_search(user_queries[-1])
|
43 |
-
|
44 |
-
# Create context from the results
|
45 |
-
#context = "".join([f"#{str(i)}" for i in results])[:2014] # Trim the context to 2014 characters - Modify as necessory
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
# Create the prompt template
|
50 |
-
#prompt_template = f"Using Relevant context:{context}\n\n and Previous User Query: {previous_user_query}\n\n Answer the next question in detail:{current_user_query}"
|
51 |
-
messages=[
|
52 |
-
{"role": "system", "content": f"You are tasked to find out the latest user intent and generate the question related to omdena"},
|
53 |
]
|
54 |
-
max_user_queries_to_include =
|
55 |
-
|
56 |
-
for i
|
57 |
if i<max_user_queries_to_include:
|
58 |
-
|
59 |
|
60 |
# Generate a response using the OpenAI API
|
61 |
-
|
62 |
model="gpt-3.5-turbo",
|
63 |
-
messages=
|
64 |
-
max_tokens=
|
65 |
-
temperature=
|
66 |
top_p=1,
|
67 |
frequency_penalty=0,
|
68 |
presence_penalty=0
|
69 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
return
|
72 |
|
73 |
class GetOpenAIResponse(Action):
|
74 |
|
|
|
14 |
import openai
|
15 |
|
16 |
# Add "/app/actions" to the sys.path
|
17 |
+
actions_path = os.path.abspath("/workspaces/Omdena-faq-chatbot-RASA/RASA_OpenAI_actions_server/actions")
|
18 |
sys.path.insert(0, actions_path)
|
19 |
|
20 |
print("-#-System-path-#-")
|
|
|
32 |
# Provide your OpenAI API key
|
33 |
|
34 |
#model_engine="text-davinci-002"
|
35 |
+
def generate_openai_response(user_queries, model_engine="gpt-3.5-turbo", max_tokens=100, temperature=0.5):
|
36 |
"""Generate a response using the OpenAI API."""
|
37 |
|
38 |
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
messages_0=[
|
42 |
+
{"role": "system", "content": f"Extract and return the question the user is trying to ask, use the previous questions as context only if necessory"}
|
|
|
|
|
|
|
|
|
43 |
]
|
44 |
+
max_user_queries_to_include = min(3,len(user_queries))
|
45 |
+
# The latest query is at the end of the list
|
46 |
+
for i in range(len(user_queries)):
|
47 |
if i<max_user_queries_to_include:
|
48 |
+
messages_0.append({"role": "user", "content": user_queries[-max_user_queries_to_include+i]})
|
49 |
|
50 |
# Generate a response using the OpenAI API
|
51 |
+
response_0 = openai.ChatCompletion.create(
|
52 |
model="gpt-3.5-turbo",
|
53 |
+
messages= messages_0,
|
54 |
+
max_tokens=20,
|
55 |
+
temperature=0,
|
56 |
top_p=1,
|
57 |
frequency_penalty=0,
|
58 |
presence_penalty=0
|
59 |
)
|
60 |
+
# Run the main function from search_content.py and store the results in a variable
|
61 |
+
extracted_query = response_0.choices[0]['message']['content'].strip()
|
62 |
+
results = main_search(extracted_query)
|
63 |
+
|
64 |
+
# Create context from the results
|
65 |
+
context = "".join([f"#{str(i)}" for i in results])[:2014] # Trim the context to 2014 characters - Modify as necessory
|
66 |
+
|
67 |
+
messages_1=[
|
68 |
+
{"role": "system", "content": f"You are tasked to answer user queries using the following Relevant context: {context}"},
|
69 |
+
{"role": "user", "content": extracted_query}]
|
70 |
+
|
71 |
+
response_1 = openai.ChatCompletion.create(
|
72 |
+
model="gpt-3.5-turbo",
|
73 |
+
messages= messages_1,
|
74 |
+
max_tokens=254,
|
75 |
+
temperature=0,
|
76 |
+
top_p=1,
|
77 |
+
frequency_penalty=0,
|
78 |
+
presence_penalty=0
|
79 |
+
)
|
80 |
+
|
81 |
|
82 |
+
return response_1.choices[0]['message']['content'].strip()+"Debug Info: Openai-input_1: "+str(messages_1)+" Openai_input_0"+str( messages_0)+" user_queries: "+str(user_queries)
|
83 |
|
84 |
class GetOpenAIResponse(Action):
|
85 |
|