Kevin Hu
commited on
Commit
·
93e3725
1
Parent(s):
0fa6297
Add support to iframe chatbot (#3929)
Browse files### What problem does this PR solve?
#3909
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
- agent/canvas.py +11 -0
- agent/component/base.py +2 -5
- agent/component/begin.py +1 -0
- agent/templates/customer_service.json +3 -3
- api/apps/conversation_app.py +3 -1
- api/apps/sdk/session.py +68 -248
- api/apps/system_app.py +1 -0
- api/db/db_models.py +7 -0
- api/db/services/canvas_service.py +118 -0
- api/db/services/conversation_service.py +221 -0
- api/db/services/dialog_service.py +1 -22
- api/db/services/document_service.py +2 -1
agent/canvas.py
CHANGED
|
@@ -21,6 +21,7 @@ from functools import partial
|
|
| 21 |
from agent.component import component_class
|
| 22 |
from agent.component.base import ComponentBase
|
| 23 |
|
|
|
|
| 24 |
class Canvas(ABC):
|
| 25 |
"""
|
| 26 |
dsl = {
|
|
@@ -320,3 +321,13 @@ class Canvas(ABC):
|
|
| 320 |
|
| 321 |
def get_prologue(self):
|
| 322 |
return self.components["begin"]["obj"]._param.prologue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
from agent.component import component_class
|
| 22 |
from agent.component.base import ComponentBase
|
| 23 |
|
| 24 |
+
|
| 25 |
class Canvas(ABC):
|
| 26 |
"""
|
| 27 |
dsl = {
|
|
|
|
| 321 |
|
| 322 |
def get_prologue(self):
|
| 323 |
return self.components["begin"]["obj"]._param.prologue
|
| 324 |
+
|
| 325 |
+
def set_global_param(self, **kwargs):
|
| 326 |
+
for k, v in kwargs.items():
|
| 327 |
+
for q in self.components["begin"]["obj"]._param.query:
|
| 328 |
+
if k != q["key"]:
|
| 329 |
+
continue
|
| 330 |
+
q["value"] = v
|
| 331 |
+
|
| 332 |
+
def get_preset_param(self):
|
| 333 |
+
return self.components["begin"]["obj"]._param.query
|
agent/component/base.py
CHANGED
|
@@ -383,9 +383,6 @@ class ComponentBase(ABC):
|
|
| 383 |
"params": {}
|
| 384 |
}
|
| 385 |
"""
|
| 386 |
-
out = json.loads(str(self._param)).get("output", {})
|
| 387 |
-
if isinstance(out, dict) and "vector" in out:
|
| 388 |
-
del out["vector"]
|
| 389 |
return """{{
|
| 390 |
"component_name": "{}",
|
| 391 |
"params": {},
|
|
@@ -393,7 +390,7 @@ class ComponentBase(ABC):
|
|
| 393 |
"inputs": {}
|
| 394 |
}}""".format(self.component_name,
|
| 395 |
self._param,
|
| 396 |
-
json.dumps(
|
| 397 |
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
| 398 |
)
|
| 399 |
|
|
@@ -462,7 +459,7 @@ class ComponentBase(ABC):
|
|
| 462 |
self._param.inputs = []
|
| 463 |
outs = []
|
| 464 |
for q in self._param.query:
|
| 465 |
-
if q
|
| 466 |
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
| 467 |
cpn_id, key = q["component_id"].split("@")
|
| 468 |
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
|
|
|
| 383 |
"params": {}
|
| 384 |
}
|
| 385 |
"""
|
|
|
|
|
|
|
|
|
|
| 386 |
return """{{
|
| 387 |
"component_name": "{}",
|
| 388 |
"params": {},
|
|
|
|
| 390 |
"inputs": {}
|
| 391 |
}}""".format(self.component_name,
|
| 392 |
self._param,
|
| 393 |
+
json.dumps(json.loads(str(self._param)).get("output", {}), ensure_ascii=False),
|
| 394 |
json.dumps(json.loads(str(self._param)).get("inputs", []), ensure_ascii=False)
|
| 395 |
)
|
| 396 |
|
|
|
|
| 459 |
self._param.inputs = []
|
| 460 |
outs = []
|
| 461 |
for q in self._param.query:
|
| 462 |
+
if q.get("component_id"):
|
| 463 |
if q["component_id"].split("@")[0].lower().find("begin") >= 0:
|
| 464 |
cpn_id, key = q["component_id"].split("@")
|
| 465 |
for p in self._canvas.get_component(cpn_id)["obj"]._param.query:
|
agent/component/begin.py
CHANGED
|
@@ -26,6 +26,7 @@ class BeginParam(ComponentParamBase):
|
|
| 26 |
def __init__(self):
|
| 27 |
super().__init__()
|
| 28 |
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
|
|
|
| 29 |
|
| 30 |
def check(self):
|
| 31 |
return True
|
|
|
|
| 26 |
def __init__(self):
|
| 27 |
super().__init__()
|
| 28 |
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
| 29 |
+
self.query = []
|
| 30 |
|
| 31 |
def check(self):
|
| 32 |
return True
|
agent/templates/customer_service.json
CHANGED
|
@@ -336,7 +336,7 @@
|
|
| 336 |
"parameters": [],
|
| 337 |
"presencePenaltyEnabled": true,
|
| 338 |
"presence_penalty": 0.4,
|
| 339 |
-
"prompt": "Role: You are a customer support. \n\nTask: Please answer the question based on content of knowledge base. \n\
|
| 340 |
"temperature": 0.1,
|
| 341 |
"temperatureEnabled": true,
|
| 342 |
"topPEnabled": true,
|
|
@@ -603,7 +603,7 @@
|
|
| 603 |
{
|
| 604 |
"data": {
|
| 605 |
"form": {
|
| 606 |
-
"text": "Static messages.\nDefine
|
| 607 |
},
|
| 608 |
"label": "Note",
|
| 609 |
"name": "N: What else?"
|
|
@@ -691,7 +691,7 @@
|
|
| 691 |
{
|
| 692 |
"data": {
|
| 693 |
"form": {
|
| 694 |
-
"text": "Complete questions by conversation history.\nUser: What's RAGFlow?\nAssistant: RAGFlow is xxx.\nUser: How to
|
| 695 |
},
|
| 696 |
"label": "Note",
|
| 697 |
"name": "N: Refine Question"
|
|
|
|
| 336 |
"parameters": [],
|
| 337 |
"presencePenaltyEnabled": true,
|
| 338 |
"presence_penalty": 0.4,
|
| 339 |
+
"prompt": "Role: You are a customer support. \n\nTask: Please answer the question based on content of knowledge base. \n\nReuirements & restrictions:\n - DO NOT make things up when all knowledge base content is irrelevant to the question. \n - Answers need to consider chat history.\n - Request about customer's contact information like, Wechat number, LINE number, twitter, discord, etc,. , when knowlegebase content can't answer his question. So, product expert could contact him soon to solve his problem.\n\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
| 340 |
"temperature": 0.1,
|
| 341 |
"temperatureEnabled": true,
|
| 342 |
"topPEnabled": true,
|
|
|
|
| 603 |
{
|
| 604 |
"data": {
|
| 605 |
"form": {
|
| 606 |
+
"text": "Static messages.\nDefine replys after recieve user's contact information."
|
| 607 |
},
|
| 608 |
"label": "Note",
|
| 609 |
"name": "N: What else?"
|
|
|
|
| 691 |
{
|
| 692 |
"data": {
|
| 693 |
"form": {
|
| 694 |
+
"text": "Complete questions by conversation history.\nUser: What's RAGFlow?\nAssistant: RAGFlow is xxx.\nUser: How to deloy it?\n\nRefine it: How to deploy RAGFlow?"
|
| 695 |
},
|
| 696 |
"label": "Note",
|
| 697 |
"name": "N: Refine Question"
|
api/apps/conversation_app.py
CHANGED
|
@@ -17,12 +17,14 @@ import json
|
|
| 17 |
import re
|
| 18 |
import traceback
|
| 19 |
from copy import deepcopy
|
|
|
|
|
|
|
| 20 |
from api.db.services.user_service import UserTenantService
|
| 21 |
from flask import request, Response
|
| 22 |
from flask_login import login_required, current_user
|
| 23 |
|
| 24 |
from api.db import LLMType
|
| 25 |
-
from api.db.services.dialog_service import DialogService,
|
| 26 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
| 27 |
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
| 28 |
from api import settings
|
|
|
|
| 17 |
import re
|
| 18 |
import traceback
|
| 19 |
from copy import deepcopy
|
| 20 |
+
|
| 21 |
+
from api.db.services.conversation_service import ConversationService
|
| 22 |
from api.db.services.user_service import UserTenantService
|
| 23 |
from flask import request, Response
|
| 24 |
from flask_login import login_required, current_user
|
| 25 |
|
| 26 |
from api.db import LLMType
|
| 27 |
+
from api.db.services.dialog_service import DialogService, chat, ask
|
| 28 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
| 29 |
from api.db.services.llm_service import LLMBundle, TenantService, TenantLLMService
|
| 30 |
from api import settings
|
api/apps/sdk/session.py
CHANGED
|
@@ -15,17 +15,19 @@
|
|
| 15 |
#
|
| 16 |
import re
|
| 17 |
import json
|
| 18 |
-
from copy import deepcopy
|
| 19 |
-
from uuid import uuid4
|
| 20 |
from api.db import LLMType
|
| 21 |
from flask import request, Response
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
from api.db.services.dialog_service import ask
|
| 23 |
from agent.canvas import Canvas
|
| 24 |
from api.db import StatusEnum
|
| 25 |
-
from api.db.db_models import
|
| 26 |
from api.db.services.api_service import API4ConversationService
|
| 27 |
from api.db.services.canvas_service import UserCanvasService
|
| 28 |
-
from api.db.services.dialog_service import DialogService
|
| 29 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
| 30 |
from api.utils import get_uuid
|
| 31 |
from api.utils.api_utils import get_error_data_result
|
|
@@ -66,8 +68,6 @@ def create_agent_session(tenant_id, agent_id):
|
|
| 66 |
e, cvs = UserCanvasService.get_by_id(agent_id)
|
| 67 |
if not e:
|
| 68 |
return get_error_data_result("Agent not found.")
|
| 69 |
-
if cvs.user_id != tenant_id:
|
| 70 |
-
return get_error_data_result(message="You do not own the agent.")
|
| 71 |
|
| 72 |
if not isinstance(cvs.dsl, str):
|
| 73 |
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
|
@@ -110,98 +110,10 @@ def update(tenant_id, chat_id, session_id):
|
|
| 110 |
|
| 111 |
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
| 112 |
@token_required
|
| 113 |
-
def
|
| 114 |
-
dia = DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
| 115 |
-
if not dia:
|
| 116 |
-
return get_error_data_result(message="You do not own the chat")
|
| 117 |
req = request.json
|
| 118 |
-
if not req.get("session_id"):
|
| 119 |
-
conv = {
|
| 120 |
-
"id": get_uuid(),
|
| 121 |
-
"dialog_id": chat_id,
|
| 122 |
-
"name": req.get("name", "New session"),
|
| 123 |
-
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
| 124 |
-
}
|
| 125 |
-
if not conv.get("name"):
|
| 126 |
-
return get_error_data_result(message="`name` can not be empty.")
|
| 127 |
-
ConversationService.save(**conv)
|
| 128 |
-
e, conv = ConversationService.get_by_id(conv["id"])
|
| 129 |
-
session_id = conv.id
|
| 130 |
-
else:
|
| 131 |
-
session_id = req.get("session_id")
|
| 132 |
-
if not req.get("question"):
|
| 133 |
-
return get_error_data_result(message="Please input your question.")
|
| 134 |
-
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
| 135 |
-
if not conv:
|
| 136 |
-
return get_error_data_result(message="Session does not exist")
|
| 137 |
-
conv = conv[0]
|
| 138 |
-
msg = []
|
| 139 |
-
question = {
|
| 140 |
-
"content": req.get("question"),
|
| 141 |
-
"role": "user",
|
| 142 |
-
"id": str(uuid4())
|
| 143 |
-
}
|
| 144 |
-
conv.message.append(question)
|
| 145 |
-
for m in conv.message:
|
| 146 |
-
if m["role"] == "system":
|
| 147 |
-
continue
|
| 148 |
-
if m["role"] == "assistant" and not msg:
|
| 149 |
-
continue
|
| 150 |
-
msg.append(m)
|
| 151 |
-
message_id = msg[-1].get("id")
|
| 152 |
-
e, dia = DialogService.get_by_id(conv.dialog_id)
|
| 153 |
-
|
| 154 |
-
if not conv.reference:
|
| 155 |
-
conv.reference = []
|
| 156 |
-
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
| 157 |
-
conv.reference.append({"chunks": [], "doc_aggs": []})
|
| 158 |
-
|
| 159 |
-
def fillin_conv(ans):
|
| 160 |
-
reference = ans["reference"]
|
| 161 |
-
temp_reference = deepcopy(ans["reference"])
|
| 162 |
-
nonlocal conv, message_id
|
| 163 |
-
if not conv.reference:
|
| 164 |
-
conv.reference.append(temp_reference)
|
| 165 |
-
else:
|
| 166 |
-
conv.reference[-1] = temp_reference
|
| 167 |
-
conv.message[-1] = {"role": "assistant", "content": ans["answer"],
|
| 168 |
-
"id": message_id, "prompt": ans.get("prompt", "")}
|
| 169 |
-
if "chunks" in reference:
|
| 170 |
-
chunks = reference.get("chunks")
|
| 171 |
-
chunk_list = []
|
| 172 |
-
for chunk in chunks:
|
| 173 |
-
new_chunk = {
|
| 174 |
-
"id": chunk["chunk_id"],
|
| 175 |
-
"content": chunk["content_with_weight"],
|
| 176 |
-
"document_id": chunk["doc_id"],
|
| 177 |
-
"document_name": chunk["docnm_kwd"],
|
| 178 |
-
"dataset_id": chunk["kb_id"],
|
| 179 |
-
"image_id": chunk.get("image_id", ""),
|
| 180 |
-
"similarity": chunk["similarity"],
|
| 181 |
-
"vector_similarity": chunk["vector_similarity"],
|
| 182 |
-
"term_similarity": chunk["term_similarity"],
|
| 183 |
-
"positions": chunk.get("positions", []),
|
| 184 |
-
}
|
| 185 |
-
chunk_list.append(new_chunk)
|
| 186 |
-
reference["chunks"] = chunk_list
|
| 187 |
-
ans["id"] = message_id
|
| 188 |
-
ans["session_id"] = session_id
|
| 189 |
-
|
| 190 |
-
def stream():
|
| 191 |
-
nonlocal dia, msg, req, conv
|
| 192 |
-
try:
|
| 193 |
-
for ans in chat(dia, msg, **req):
|
| 194 |
-
fillin_conv(ans)
|
| 195 |
-
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
| 196 |
-
ConversationService.update_by_id(conv.id, conv.to_dict())
|
| 197 |
-
except Exception as e:
|
| 198 |
-
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
| 199 |
-
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
| 200 |
-
ensure_ascii=False) + "\n\n"
|
| 201 |
-
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
| 202 |
-
|
| 203 |
if req.get("stream", True):
|
| 204 |
-
resp = Response(
|
| 205 |
resp.headers.add_header("Cache-control", "no-cache")
|
| 206 |
resp.headers.add_header("Connection", "keep-alive")
|
| 207 |
resp.headers.add_header("X-Accel-Buffering", "no")
|
|
@@ -211,172 +123,26 @@ def completion(tenant_id, chat_id):
|
|
| 211 |
|
| 212 |
else:
|
| 213 |
answer = None
|
| 214 |
-
for ans in
|
| 215 |
answer = ans
|
| 216 |
-
fillin_conv(ans)
|
| 217 |
-
ConversationService.update_by_id(conv.id, conv.to_dict())
|
| 218 |
break
|
| 219 |
return get_result(data=answer)
|
| 220 |
|
| 221 |
|
| 222 |
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
| 223 |
@token_required
|
| 224 |
-
def
|
| 225 |
req = request.json
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
if not e:
|
| 229 |
-
return get_error_data_result("Agent not found.")
|
| 230 |
-
if cvs.user_id != tenant_id:
|
| 231 |
-
return get_error_data_result(message="You do not own the agent.")
|
| 232 |
-
if not isinstance(cvs.dsl, str):
|
| 233 |
-
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
| 234 |
-
canvas = Canvas(cvs.dsl, tenant_id)
|
| 235 |
-
|
| 236 |
-
if not req.get("session_id"):
|
| 237 |
-
session_id = get_uuid()
|
| 238 |
-
conv = {
|
| 239 |
-
"id": session_id,
|
| 240 |
-
"dialog_id": cvs.id,
|
| 241 |
-
"user_id": req.get("user_id", ""),
|
| 242 |
-
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
| 243 |
-
"source": "agent",
|
| 244 |
-
"dsl": json.loads(cvs.dsl)
|
| 245 |
-
}
|
| 246 |
-
API4ConversationService.save(**conv)
|
| 247 |
-
conv = API4Conversation(**conv)
|
| 248 |
-
else:
|
| 249 |
-
session_id = req.get("session_id")
|
| 250 |
-
e, conv = API4ConversationService.get_by_id(req["session_id"])
|
| 251 |
-
if not e:
|
| 252 |
-
return get_error_data_result(message="Session not found!")
|
| 253 |
-
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
| 254 |
-
|
| 255 |
-
messages = conv.message
|
| 256 |
-
question = req.get("question")
|
| 257 |
-
if not question:
|
| 258 |
-
return get_error_data_result("`question` is required.")
|
| 259 |
-
question = {
|
| 260 |
-
"role": "user",
|
| 261 |
-
"content": question,
|
| 262 |
-
"id": str(uuid4())
|
| 263 |
-
}
|
| 264 |
-
messages.append(question)
|
| 265 |
-
msg = []
|
| 266 |
-
for m in messages:
|
| 267 |
-
if m["role"] == "system":
|
| 268 |
-
continue
|
| 269 |
-
if m["role"] == "assistant" and not msg:
|
| 270 |
-
continue
|
| 271 |
-
msg.append(m)
|
| 272 |
-
if not msg[-1].get("id"):
|
| 273 |
-
msg[-1]["id"] = get_uuid()
|
| 274 |
-
message_id = msg[-1]["id"]
|
| 275 |
-
|
| 276 |
-
stream = req.get("stream", True)
|
| 277 |
-
|
| 278 |
-
def fillin_conv(ans):
|
| 279 |
-
reference = ans["reference"]
|
| 280 |
-
temp_reference = deepcopy(ans["reference"])
|
| 281 |
-
nonlocal conv, message_id
|
| 282 |
-
if not conv.reference:
|
| 283 |
-
conv.reference.append(temp_reference)
|
| 284 |
-
else:
|
| 285 |
-
conv.reference[-1] = temp_reference
|
| 286 |
-
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
| 287 |
-
if "chunks" in reference:
|
| 288 |
-
chunks = reference.get("chunks")
|
| 289 |
-
chunk_list = []
|
| 290 |
-
for chunk in chunks:
|
| 291 |
-
new_chunk = {
|
| 292 |
-
"id": chunk["chunk_id"],
|
| 293 |
-
"content": chunk["content"],
|
| 294 |
-
"document_id": chunk["doc_id"],
|
| 295 |
-
"document_name": chunk["docnm_kwd"],
|
| 296 |
-
"dataset_id": chunk["kb_id"],
|
| 297 |
-
"image_id": chunk["image_id"],
|
| 298 |
-
"similarity": chunk["similarity"],
|
| 299 |
-
"vector_similarity": chunk["vector_similarity"],
|
| 300 |
-
"term_similarity": chunk["term_similarity"],
|
| 301 |
-
"positions": chunk["positions"],
|
| 302 |
-
}
|
| 303 |
-
chunk_list.append(new_chunk)
|
| 304 |
-
reference["chunks"] = chunk_list
|
| 305 |
-
ans["id"] = message_id
|
| 306 |
-
ans["session_id"] = session_id
|
| 307 |
-
|
| 308 |
-
def rename_field(ans):
|
| 309 |
-
reference = ans['reference']
|
| 310 |
-
if not isinstance(reference, dict):
|
| 311 |
-
return
|
| 312 |
-
for chunk_i in reference.get('chunks', []):
|
| 313 |
-
if 'docnm_kwd' in chunk_i:
|
| 314 |
-
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
| 315 |
-
chunk_i.pop('docnm_kwd')
|
| 316 |
-
|
| 317 |
-
if not conv.reference:
|
| 318 |
-
conv.reference = []
|
| 319 |
-
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
| 320 |
-
conv.reference.append({"chunks": [], "doc_aggs": []})
|
| 321 |
-
|
| 322 |
-
final_ans = {"reference": [], "content": ""}
|
| 323 |
-
|
| 324 |
-
canvas.add_user_input(msg[-1]["content"])
|
| 325 |
-
|
| 326 |
-
if stream:
|
| 327 |
-
def sse():
|
| 328 |
-
nonlocal answer, cvs
|
| 329 |
-
try:
|
| 330 |
-
for ans in canvas.run(stream=stream):
|
| 331 |
-
if ans.get("running_status"):
|
| 332 |
-
yield "data:" + json.dumps({"code": 0, "message": "",
|
| 333 |
-
"data": {"answer": ans["content"],
|
| 334 |
-
"running_status": True}},
|
| 335 |
-
ensure_ascii=False) + "\n\n"
|
| 336 |
-
continue
|
| 337 |
-
for k in ans.keys():
|
| 338 |
-
final_ans[k] = ans[k]
|
| 339 |
-
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
| 340 |
-
fillin_conv(ans)
|
| 341 |
-
rename_field(ans)
|
| 342 |
-
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
| 343 |
-
ensure_ascii=False) + "\n\n"
|
| 344 |
-
|
| 345 |
-
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
| 346 |
-
canvas.history.append(("assistant", final_ans["content"]))
|
| 347 |
-
if final_ans.get("reference"):
|
| 348 |
-
canvas.reference.append(final_ans["reference"])
|
| 349 |
-
conv.dsl = json.loads(str(canvas))
|
| 350 |
-
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 351 |
-
except Exception as e:
|
| 352 |
-
conv.dsl = json.loads(str(canvas))
|
| 353 |
-
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 354 |
-
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
| 355 |
-
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
| 356 |
-
ensure_ascii=False) + "\n\n"
|
| 357 |
-
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
| 358 |
-
|
| 359 |
-
resp = Response(sse(), mimetype="text/event-stream")
|
| 360 |
resp.headers.add_header("Cache-control", "no-cache")
|
| 361 |
resp.headers.add_header("Connection", "keep-alive")
|
| 362 |
resp.headers.add_header("X-Accel-Buffering", "no")
|
| 363 |
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
| 364 |
return resp
|
| 365 |
|
| 366 |
-
for answer in
|
| 367 |
-
|
| 368 |
-
continue
|
| 369 |
-
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
| 370 |
-
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
| 371 |
-
if final_ans.get("reference"):
|
| 372 |
-
canvas.reference.append(final_ans["reference"])
|
| 373 |
-
conv.dsl = json.loads(str(canvas))
|
| 374 |
-
|
| 375 |
-
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
| 376 |
-
fillin_conv(result)
|
| 377 |
-
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 378 |
-
rename_field(result)
|
| 379 |
-
return get_result(data=result)
|
| 380 |
|
| 381 |
|
| 382 |
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
|
@@ -590,3 +356,57 @@ Keywords: {question}
|
|
| 590 |
Related search terms:
|
| 591 |
"""}], {"temperature": 0.9})
|
| 592 |
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
#
|
| 16 |
import re
|
| 17 |
import json
|
|
|
|
|
|
|
| 18 |
from api.db import LLMType
|
| 19 |
from flask import request, Response
|
| 20 |
+
|
| 21 |
+
from api.db.services.conversation_service import ConversationService, iframe_completion
|
| 22 |
+
from api.db.services.conversation_service import completion as rag_completion
|
| 23 |
+
from api.db.services.canvas_service import completion as agent_completion
|
| 24 |
from api.db.services.dialog_service import ask
|
| 25 |
from agent.canvas import Canvas
|
| 26 |
from api.db import StatusEnum
|
| 27 |
+
from api.db.db_models import APIToken
|
| 28 |
from api.db.services.api_service import API4ConversationService
|
| 29 |
from api.db.services.canvas_service import UserCanvasService
|
| 30 |
+
from api.db.services.dialog_service import DialogService
|
| 31 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
| 32 |
from api.utils import get_uuid
|
| 33 |
from api.utils.api_utils import get_error_data_result
|
|
|
|
| 68 |
e, cvs = UserCanvasService.get_by_id(agent_id)
|
| 69 |
if not e:
|
| 70 |
return get_error_data_result("Agent not found.")
|
|
|
|
|
|
|
| 71 |
|
| 72 |
if not isinstance(cvs.dsl, str):
|
| 73 |
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
|
|
|
| 110 |
|
| 111 |
@manager.route('/chats/<chat_id>/completions', methods=['POST']) # noqa: F821
|
| 112 |
@token_required
|
| 113 |
+
def chat_completion(tenant_id, chat_id):
|
|
|
|
|
|
|
|
|
|
| 114 |
req = request.json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
if req.get("stream", True):
|
| 116 |
+
resp = Response(rag_completion(tenant_id, chat_id, **req), mimetype="text/event-stream")
|
| 117 |
resp.headers.add_header("Cache-control", "no-cache")
|
| 118 |
resp.headers.add_header("Connection", "keep-alive")
|
| 119 |
resp.headers.add_header("X-Accel-Buffering", "no")
|
|
|
|
| 123 |
|
| 124 |
else:
|
| 125 |
answer = None
|
| 126 |
+
for ans in rag_completion(tenant_id, chat_id, **req):
|
| 127 |
answer = ans
|
|
|
|
|
|
|
| 128 |
break
|
| 129 |
return get_result(data=answer)
|
| 130 |
|
| 131 |
|
| 132 |
@manager.route('/agents/<agent_id>/completions', methods=['POST']) # noqa: F821
|
| 133 |
@token_required
|
| 134 |
+
def agent_completions(tenant_id, agent_id):
|
| 135 |
req = request.json
|
| 136 |
+
if req.get("stream", True):
|
| 137 |
+
resp = Response(agent_completion(tenant_id, agent_id, **req), mimetype="text/event-stream")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
resp.headers.add_header("Cache-control", "no-cache")
|
| 139 |
resp.headers.add_header("Connection", "keep-alive")
|
| 140 |
resp.headers.add_header("X-Accel-Buffering", "no")
|
| 141 |
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
| 142 |
return resp
|
| 143 |
|
| 144 |
+
for answer in agent_completion(tenant_id, agent_id, **req):
|
| 145 |
+
return get_result(data=answer)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
|
| 148 |
@manager.route('/chats/<chat_id>/sessions', methods=['GET']) # noqa: F821
|
|
|
|
| 356 |
Related search terms:
|
| 357 |
"""}], {"temperature": 0.9})
|
| 358 |
return get_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
@manager.route('/chatbots/<dialog_id>/completions', methods=['POST']) # noqa: F821
|
| 362 |
+
def chatbot_completions(dialog_id):
|
| 363 |
+
req = request.json
|
| 364 |
+
|
| 365 |
+
token = request.headers.get('Authorization').split()
|
| 366 |
+
if len(token) != 2:
|
| 367 |
+
return get_error_data_result(message='Authorization is not valid!"')
|
| 368 |
+
token = token[1]
|
| 369 |
+
objs = APIToken.query(beta=token)
|
| 370 |
+
if not objs:
|
| 371 |
+
return get_error_data_result(message='Token is not valid!"')
|
| 372 |
+
|
| 373 |
+
if "quote" not in req:
|
| 374 |
+
req["quote"] = False
|
| 375 |
+
|
| 376 |
+
if req.get("stream", True):
|
| 377 |
+
resp = Response(iframe_completion(objs[0].tenant_id, dialog_id, **req), mimetype="text/event-stream")
|
| 378 |
+
resp.headers.add_header("Cache-control", "no-cache")
|
| 379 |
+
resp.headers.add_header("Connection", "keep-alive")
|
| 380 |
+
resp.headers.add_header("X-Accel-Buffering", "no")
|
| 381 |
+
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
| 382 |
+
return resp
|
| 383 |
+
|
| 384 |
+
for answer in agent_completion(objs[0].tenant_id, dialog_id, **req):
|
| 385 |
+
return get_result(data=answer)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
@manager.route('/agentbots/<agent_id>/completions', methods=['POST']) # noqa: F821
|
| 389 |
+
def agent_bot_completions(agent_id):
|
| 390 |
+
req = request.json
|
| 391 |
+
|
| 392 |
+
token = request.headers.get('Authorization').split()
|
| 393 |
+
if len(token) != 2:
|
| 394 |
+
return get_error_data_result(message='Authorization is not valid!"')
|
| 395 |
+
token = token[1]
|
| 396 |
+
objs = APIToken.query(beta=token)
|
| 397 |
+
if not objs:
|
| 398 |
+
return get_error_data_result(message='Token is not valid!"')
|
| 399 |
+
|
| 400 |
+
if "quote" not in req:
|
| 401 |
+
req["quote"] = False
|
| 402 |
+
|
| 403 |
+
if req.get("stream", True):
|
| 404 |
+
resp = Response(agent_completion(objs[0].tenant_id, agent_id, **req), mimetype="text/event-stream")
|
| 405 |
+
resp.headers.add_header("Cache-control", "no-cache")
|
| 406 |
+
resp.headers.add_header("Connection", "keep-alive")
|
| 407 |
+
resp.headers.add_header("X-Accel-Buffering", "no")
|
| 408 |
+
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
| 409 |
+
return resp
|
| 410 |
+
|
| 411 |
+
for answer in agent_completion(objs[0].tenant_id, agent_id, **req):
|
| 412 |
+
return get_result(data=answer)
|
api/apps/system_app.py
CHANGED
|
@@ -205,6 +205,7 @@ def new_token():
|
|
| 205 |
obj = {
|
| 206 |
"tenant_id": tenant_id,
|
| 207 |
"token": generate_confirmation_token(tenant_id),
|
|
|
|
| 208 |
"create_time": current_timestamp(),
|
| 209 |
"create_date": datetime_format(datetime.now()),
|
| 210 |
"update_time": None,
|
|
|
|
| 205 |
obj = {
|
| 206 |
"tenant_id": tenant_id,
|
| 207 |
"token": generate_confirmation_token(tenant_id),
|
| 208 |
+
"beta": generate_confirmation_token(generate_confirmation_token(tenant_id)).replace("ragflow-", "")[:32],
|
| 209 |
"create_time": current_timestamp(),
|
| 210 |
"create_date": datetime_format(datetime.now()),
|
| 211 |
"update_time": None,
|
api/db/db_models.py
CHANGED
|
@@ -934,6 +934,7 @@ class APIToken(DataBaseModel):
|
|
| 934 |
token = CharField(max_length=255, null=False, index=True)
|
| 935 |
dialog_id = CharField(max_length=32, null=False, index=True)
|
| 936 |
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
|
|
|
| 937 |
|
| 938 |
class Meta:
|
| 939 |
db_table = "api_token"
|
|
@@ -1083,4 +1084,10 @@ def migrate_db():
|
|
| 1083 |
)
|
| 1084 |
except Exception:
|
| 1085 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1086 |
|
|
|
|
| 934 |
token = CharField(max_length=255, null=False, index=True)
|
| 935 |
dialog_id = CharField(max_length=32, null=False, index=True)
|
| 936 |
source = CharField(max_length=16, null=True, help_text="none|agent|dialog", index=True)
|
| 937 |
+
beta = CharField(max_length=255, null=True, index=True)
|
| 938 |
|
| 939 |
class Meta:
|
| 940 |
db_table = "api_token"
|
|
|
|
| 1084 |
)
|
| 1085 |
except Exception:
|
| 1086 |
pass
|
| 1087 |
+
try:
|
| 1088 |
+
migrate(
|
| 1089 |
+
migrator.add_column("api_token", "beta", CharField(max_length=255, null=True, index=True))
|
| 1090 |
+
)
|
| 1091 |
+
except Exception:
|
| 1092 |
+
pass
|
| 1093 |
|
api/db/services/canvas_service.py
CHANGED
|
@@ -13,8 +13,14 @@
|
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
#
|
|
|
|
|
|
|
|
|
|
| 16 |
from api.db.db_models import DB, CanvasTemplate, UserCanvas
|
|
|
|
| 17 |
from api.db.services.common_service import CommonService
|
|
|
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
class CanvasTemplateService(CommonService):
|
|
@@ -42,3 +48,115 @@ class UserCanvasService(CommonService):
|
|
| 42 |
agents = agents.paginate(page_number, items_per_page)
|
| 43 |
|
| 44 |
return list(agents.dicts())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# See the License for the specific language governing permissions and
|
| 14 |
# limitations under the License.
|
| 15 |
#
|
| 16 |
+
import json
|
| 17 |
+
from uuid import uuid4
|
| 18 |
+
from agent.canvas import Canvas
|
| 19 |
from api.db.db_models import DB, CanvasTemplate, UserCanvas
|
| 20 |
+
from api.db.services.api_service import API4ConversationService
|
| 21 |
from api.db.services.common_service import CommonService
|
| 22 |
+
from api.db.services.conversation_service import structure_answer
|
| 23 |
+
from api.utils import get_uuid
|
| 24 |
|
| 25 |
|
| 26 |
class CanvasTemplateService(CommonService):
|
|
|
|
| 48 |
agents = agents.paginate(page_number, items_per_page)
|
| 49 |
|
| 50 |
return list(agents.dicts())
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def completion(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
| 54 |
+
e, cvs = UserCanvasService.get_by_id(agent_id)
|
| 55 |
+
assert e, "Agent not found."
|
| 56 |
+
assert cvs.user_id == tenant_id, "You do not own the agent."
|
| 57 |
+
|
| 58 |
+
if not isinstance(cvs.dsl, str):
|
| 59 |
+
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
| 60 |
+
canvas = Canvas(cvs.dsl, tenant_id)
|
| 61 |
+
|
| 62 |
+
if not session_id:
|
| 63 |
+
session_id = get_uuid()
|
| 64 |
+
conv = {
|
| 65 |
+
"id": session_id,
|
| 66 |
+
"dialog_id": cvs.id,
|
| 67 |
+
"user_id": kwargs.get("user_id", ""),
|
| 68 |
+
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
| 69 |
+
"source": "agent",
|
| 70 |
+
"dsl": json.loads(cvs.dsl)
|
| 71 |
+
}
|
| 72 |
+
API4ConversationService.save(**conv)
|
| 73 |
+
yield "data:" + json.dumps({"code": 0,
|
| 74 |
+
"message": "",
|
| 75 |
+
"data": {
|
| 76 |
+
"session_id": session_id,
|
| 77 |
+
"answer": canvas.get_prologue(),
|
| 78 |
+
"reference": [],
|
| 79 |
+
"param": canvas.get_preset_param()
|
| 80 |
+
}
|
| 81 |
+
},
|
| 82 |
+
ensure_ascii=False) + "\n\n"
|
| 83 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
| 84 |
+
return
|
| 85 |
+
else:
|
| 86 |
+
session_id = session_id
|
| 87 |
+
e, conv = API4ConversationService.get_by_id(session_id)
|
| 88 |
+
assert e, "Session not found!"
|
| 89 |
+
canvas = Canvas(json.dumps(conv.dsl), tenant_id)
|
| 90 |
+
|
| 91 |
+
messages = conv.message
|
| 92 |
+
question = {
|
| 93 |
+
"role": "user",
|
| 94 |
+
"content": question,
|
| 95 |
+
"id": str(uuid4())
|
| 96 |
+
}
|
| 97 |
+
messages.append(question)
|
| 98 |
+
msg = []
|
| 99 |
+
for m in messages:
|
| 100 |
+
if m["role"] == "system":
|
| 101 |
+
continue
|
| 102 |
+
if m["role"] == "assistant" and not msg:
|
| 103 |
+
continue
|
| 104 |
+
msg.append(m)
|
| 105 |
+
if not msg[-1].get("id"):
|
| 106 |
+
msg[-1]["id"] = get_uuid()
|
| 107 |
+
message_id = msg[-1]["id"]
|
| 108 |
+
|
| 109 |
+
if not conv.reference:
|
| 110 |
+
conv.reference = []
|
| 111 |
+
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
| 112 |
+
conv.reference.append({"chunks": [], "doc_aggs": []})
|
| 113 |
+
|
| 114 |
+
final_ans = {"reference": [], "content": ""}
|
| 115 |
+
|
| 116 |
+
canvas.add_user_input(msg[-1]["content"])
|
| 117 |
+
|
| 118 |
+
if stream:
|
| 119 |
+
try:
|
| 120 |
+
for ans in canvas.run(stream=stream):
|
| 121 |
+
if ans.get("running_status"):
|
| 122 |
+
yield "data:" + json.dumps({"code": 0, "message": "",
|
| 123 |
+
"data": {"answer": ans["content"],
|
| 124 |
+
"running_status": True}},
|
| 125 |
+
ensure_ascii=False) + "\n\n"
|
| 126 |
+
continue
|
| 127 |
+
for k in ans.keys():
|
| 128 |
+
final_ans[k] = ans[k]
|
| 129 |
+
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
| 130 |
+
ans = structure_answer(conv, ans, message_id, session_id)
|
| 131 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
| 132 |
+
ensure_ascii=False) + "\n\n"
|
| 133 |
+
|
| 134 |
+
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
| 135 |
+
canvas.history.append(("assistant", final_ans["content"]))
|
| 136 |
+
if final_ans.get("reference"):
|
| 137 |
+
canvas.reference.append(final_ans["reference"])
|
| 138 |
+
conv.dsl = json.loads(str(canvas))
|
| 139 |
+
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 140 |
+
except Exception as e:
|
| 141 |
+
conv.dsl = json.loads(str(canvas))
|
| 142 |
+
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 143 |
+
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
| 144 |
+
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
| 145 |
+
ensure_ascii=False) + "\n\n"
|
| 146 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
| 147 |
+
|
| 148 |
+
else:
|
| 149 |
+
for answer in canvas.run(stream=False):
|
| 150 |
+
if answer.get("running_status"):
|
| 151 |
+
continue
|
| 152 |
+
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
| 153 |
+
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
| 154 |
+
if final_ans.get("reference"):
|
| 155 |
+
canvas.reference.append(final_ans["reference"])
|
| 156 |
+
conv.dsl = json.loads(str(canvas))
|
| 157 |
+
|
| 158 |
+
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
| 159 |
+
result = structure_answer(conv, result, message_id, session_id)
|
| 160 |
+
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 161 |
+
yield result
|
| 162 |
+
break
|
api/db/services/conversation_service.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
#
|
| 16 |
+
from uuid import uuid4
|
| 17 |
+
from api.db import StatusEnum
|
| 18 |
+
from api.db.db_models import Conversation, DB
|
| 19 |
+
from api.db.services.api_service import API4ConversationService
|
| 20 |
+
from api.db.services.common_service import CommonService
|
| 21 |
+
from api.db.services.dialog_service import DialogService, chat
|
| 22 |
+
from api.utils import get_uuid
|
| 23 |
+
import json
|
| 24 |
+
from copy import deepcopy
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class ConversationService(CommonService):
|
| 28 |
+
model = Conversation
|
| 29 |
+
|
| 30 |
+
@classmethod
|
| 31 |
+
@DB.connection_context()
|
| 32 |
+
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
| 33 |
+
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
| 34 |
+
if id:
|
| 35 |
+
sessions = sessions.where(cls.model.id == id)
|
| 36 |
+
if name:
|
| 37 |
+
sessions = sessions.where(cls.model.name == name)
|
| 38 |
+
if desc:
|
| 39 |
+
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
| 40 |
+
else:
|
| 41 |
+
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
| 42 |
+
|
| 43 |
+
sessions = sessions.paginate(page_number, items_per_page)
|
| 44 |
+
|
| 45 |
+
return list(sessions.dicts())
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def structure_answer(conv, ans, message_id, session_id):
|
| 49 |
+
reference = ans["reference"]
|
| 50 |
+
temp_reference = deepcopy(ans["reference"])
|
| 51 |
+
if not conv.reference:
|
| 52 |
+
conv.reference.append(temp_reference)
|
| 53 |
+
else:
|
| 54 |
+
conv.reference[-1] = temp_reference
|
| 55 |
+
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
| 56 |
+
|
| 57 |
+
chunk_list = [{
|
| 58 |
+
"id": chunk["chunk_id"],
|
| 59 |
+
"content": chunk["content"],
|
| 60 |
+
"document_id": chunk["doc_id"],
|
| 61 |
+
"document_name": chunk["docnm_kwd"],
|
| 62 |
+
"dataset_id": chunk["kb_id"],
|
| 63 |
+
"image_id": chunk["image_id"],
|
| 64 |
+
"similarity": chunk["similarity"],
|
| 65 |
+
"vector_similarity": chunk["vector_similarity"],
|
| 66 |
+
"term_similarity": chunk["term_similarity"],
|
| 67 |
+
"positions": chunk["positions"],
|
| 68 |
+
} for chunk in reference.get("chunks", [])]
|
| 69 |
+
|
| 70 |
+
reference["chunks"] = chunk_list
|
| 71 |
+
ans["id"] = message_id
|
| 72 |
+
ans["session_id"] = session_id
|
| 73 |
+
|
| 74 |
+
return ans
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def completion(tenant_id, chat_id, question, name="New session", session_id=None, stream=True, **kwargs):
|
| 78 |
+
assert name, "`name` can not be empty."
|
| 79 |
+
dia = DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
| 80 |
+
assert dia, "You do not own the chat."
|
| 81 |
+
|
| 82 |
+
if not session_id:
|
| 83 |
+
conv = {
|
| 84 |
+
"id": get_uuid(),
|
| 85 |
+
"dialog_id": chat_id,
|
| 86 |
+
"name": name,
|
| 87 |
+
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue")}]
|
| 88 |
+
}
|
| 89 |
+
ConversationService.save(**conv)
|
| 90 |
+
yield "data:" + json.dumps({"code": 0, "message": "",
|
| 91 |
+
"data": {
|
| 92 |
+
"answer": conv["message"][0]["content"],
|
| 93 |
+
"reference": {},
|
| 94 |
+
"audio_binary": None,
|
| 95 |
+
"id": None,
|
| 96 |
+
"session_id": session_id
|
| 97 |
+
}},
|
| 98 |
+
ensure_ascii=False) + "\n\n"
|
| 99 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
| 103 |
+
if not conv:
|
| 104 |
+
raise LookupError("Session does not exist")
|
| 105 |
+
|
| 106 |
+
conv = conv[0]
|
| 107 |
+
msg = []
|
| 108 |
+
question = {
|
| 109 |
+
"content": question,
|
| 110 |
+
"role": "user",
|
| 111 |
+
"id": str(uuid4())
|
| 112 |
+
}
|
| 113 |
+
conv.message.append(question)
|
| 114 |
+
for m in conv.message:
|
| 115 |
+
if m["role"] == "system":
|
| 116 |
+
continue
|
| 117 |
+
if m["role"] == "assistant" and not msg:
|
| 118 |
+
continue
|
| 119 |
+
msg.append(m)
|
| 120 |
+
message_id = msg[-1].get("id")
|
| 121 |
+
e, dia = DialogService.get_by_id(conv.dialog_id)
|
| 122 |
+
|
| 123 |
+
if not conv.reference:
|
| 124 |
+
conv.reference = []
|
| 125 |
+
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
| 126 |
+
conv.reference.append({"chunks": [], "doc_aggs": []})
|
| 127 |
+
|
| 128 |
+
if stream:
|
| 129 |
+
try:
|
| 130 |
+
for ans in chat(dia, msg, True, **kwargs):
|
| 131 |
+
ans = structure_answer(conv, ans, message_id, session_id)
|
| 132 |
+
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
| 133 |
+
ConversationService.update_by_id(conv.id, conv.to_dict())
|
| 134 |
+
except Exception as e:
|
| 135 |
+
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
| 136 |
+
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
| 137 |
+
ensure_ascii=False) + "\n\n"
|
| 138 |
+
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
| 139 |
+
|
| 140 |
+
else:
|
| 141 |
+
answer = None
|
| 142 |
+
for ans in chat(dia, msg, False, **kwargs):
|
| 143 |
+
answer = structure_answer(conv, ans, message_id, session_id)
|
| 144 |
+
ConversationService.update_by_id(conv.id, conv.to_dict())
|
| 145 |
+
break
|
| 146 |
+
yield answer
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwargs):
|
| 150 |
+
e, dia = DialogService.get_by_id(dialog_id)
|
| 151 |
+
assert e, "Dialog not found"
|
| 152 |
+
if not session_id:
|
| 153 |
+
session_id = get_uuid()
|
| 154 |
+
conv = {
|
| 155 |
+
"id": session_id,
|
| 156 |
+
"dialog_id": dialog_id,
|
| 157 |
+
"user_id": kwargs.get("user_id", ""),
|
| 158 |
+
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
| 159 |
+
}
|
| 160 |
+
API4ConversationService.save(**conv)
|
| 161 |
+
yield "data:" + json.dumps({"code": 0, "message": "",
|
| 162 |
+
"data": {
|
| 163 |
+
"answer": conv["message"][0]["content"],
|
| 164 |
+
"reference": {},
|
| 165 |
+
"audio_binary": None,
|
| 166 |
+
"id": None,
|
| 167 |
+
"session_id": session_id
|
| 168 |
+
}},
|
| 169 |
+
ensure_ascii=False) + "\n\n"
|
| 170 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
| 171 |
+
return
|
| 172 |
+
else:
|
| 173 |
+
session_id = session_id
|
| 174 |
+
e, conv = API4ConversationService.get_by_id(session_id)
|
| 175 |
+
assert e, "Session not found!"
|
| 176 |
+
|
| 177 |
+
messages = conv.message
|
| 178 |
+
question = {
|
| 179 |
+
"role": "user",
|
| 180 |
+
"content": question,
|
| 181 |
+
"id": str(uuid4())
|
| 182 |
+
}
|
| 183 |
+
messages.append(question)
|
| 184 |
+
|
| 185 |
+
msg = []
|
| 186 |
+
for m in messages:
|
| 187 |
+
if m["role"] == "system":
|
| 188 |
+
continue
|
| 189 |
+
if m["role"] == "assistant" and not msg:
|
| 190 |
+
continue
|
| 191 |
+
msg.append(m)
|
| 192 |
+
if not msg[-1].get("id"):
|
| 193 |
+
msg[-1]["id"] = get_uuid()
|
| 194 |
+
message_id = msg[-1]["id"]
|
| 195 |
+
|
| 196 |
+
if not conv.reference:
|
| 197 |
+
conv.reference = []
|
| 198 |
+
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
| 199 |
+
conv.reference.append({"chunks": [], "doc_aggs": []})
|
| 200 |
+
|
| 201 |
+
if stream:
|
| 202 |
+
try:
|
| 203 |
+
for ans in chat(dia, msg, True, **kwargs):
|
| 204 |
+
ans = structure_answer(conv, ans, message_id, session_id)
|
| 205 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
| 206 |
+
ensure_ascii=False) + "\n\n"
|
| 207 |
+
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 208 |
+
except Exception as e:
|
| 209 |
+
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
| 210 |
+
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
| 211 |
+
ensure_ascii=False) + "\n\n"
|
| 212 |
+
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
| 213 |
+
|
| 214 |
+
else:
|
| 215 |
+
answer = None
|
| 216 |
+
for ans in chat(dia, msg, False, **kwargs):
|
| 217 |
+
answer = structure_answer(conv, ans, message_id, session_id)
|
| 218 |
+
API4ConversationService.append_message(conv.id, conv.to_dict())
|
| 219 |
+
break
|
| 220 |
+
yield answer
|
| 221 |
+
|
api/db/services/dialog_service.py
CHANGED
|
@@ -23,7 +23,7 @@ from timeit import default_timer as timer
|
|
| 23 |
import datetime
|
| 24 |
from datetime import timedelta
|
| 25 |
from api.db import LLMType, ParserType,StatusEnum
|
| 26 |
-
from api.db.db_models import Dialog,
|
| 27 |
from api.db.services.common_service import CommonService
|
| 28 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
| 29 |
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
|
@@ -60,27 +60,6 @@ class DialogService(CommonService):
|
|
| 60 |
return list(chats.dicts())
|
| 61 |
|
| 62 |
|
| 63 |
-
class ConversationService(CommonService):
|
| 64 |
-
model = Conversation
|
| 65 |
-
|
| 66 |
-
@classmethod
|
| 67 |
-
@DB.connection_context()
|
| 68 |
-
def get_list(cls,dialog_id,page_number, items_per_page, orderby, desc, id , name):
|
| 69 |
-
sessions = cls.model.select().where(cls.model.dialog_id ==dialog_id)
|
| 70 |
-
if id:
|
| 71 |
-
sessions = sessions.where(cls.model.id == id)
|
| 72 |
-
if name:
|
| 73 |
-
sessions = sessions.where(cls.model.name == name)
|
| 74 |
-
if desc:
|
| 75 |
-
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
| 76 |
-
else:
|
| 77 |
-
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
| 78 |
-
|
| 79 |
-
sessions = sessions.paginate(page_number, items_per_page)
|
| 80 |
-
|
| 81 |
-
return list(sessions.dicts())
|
| 82 |
-
|
| 83 |
-
|
| 84 |
def message_fit_in(msg, max_length=4000):
|
| 85 |
def count():
|
| 86 |
nonlocal msg
|
|
|
|
| 23 |
import datetime
|
| 24 |
from datetime import timedelta
|
| 25 |
from api.db import LLMType, ParserType,StatusEnum
|
| 26 |
+
from api.db.db_models import Dialog, DB
|
| 27 |
from api.db.services.common_service import CommonService
|
| 28 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
| 29 |
from api.db.services.llm_service import LLMService, TenantLLMService, LLMBundle
|
|
|
|
| 60 |
return list(chats.dicts())
|
| 61 |
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
def message_fit_in(msg, max_length=4000):
|
| 64 |
def count():
|
| 65 |
nonlocal msg
|
api/db/services/document_service.py
CHANGED
|
@@ -425,11 +425,12 @@ def queue_raptor_tasks(doc):
|
|
| 425 |
|
| 426 |
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
| 427 |
from rag.app import presentation, picture, naive, audio, email
|
| 428 |
-
from api.db.services.dialog_service import
|
| 429 |
from api.db.services.file_service import FileService
|
| 430 |
from api.db.services.llm_service import LLMBundle
|
| 431 |
from api.db.services.user_service import TenantService
|
| 432 |
from api.db.services.api_service import API4ConversationService
|
|
|
|
| 433 |
|
| 434 |
e, conv = ConversationService.get_by_id(conversation_id)
|
| 435 |
if not e:
|
|
|
|
| 425 |
|
| 426 |
def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
| 427 |
from rag.app import presentation, picture, naive, audio, email
|
| 428 |
+
from api.db.services.dialog_service import DialogService
|
| 429 |
from api.db.services.file_service import FileService
|
| 430 |
from api.db.services.llm_service import LLMBundle
|
| 431 |
from api.db.services.user_service import TenantService
|
| 432 |
from api.db.services.api_service import API4ConversationService
|
| 433 |
+
from api.db.services.conversation_service import ConversationService
|
| 434 |
|
| 435 |
e, conv = ConversationService.get_by_id(conversation_id)
|
| 436 |
if not e:
|