KevinHuSh
commited on
Commit
·
a3ebd45
1
Parent(s):
e4c23fc
support graph (#1152)
Browse files### What problem does this PR solve?
#918
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
- api/db/db_models.py +25 -0
- graph/__init__.py +0 -0
- graph/canvas.py +243 -0
- graph/component/__init__.py +16 -0
- graph/component/answer.py +77 -0
- graph/component/base.py +466 -0
- graph/component/begin.py +49 -0
- graph/component/categorize.py +87 -0
- graph/component/cite.py +75 -0
- graph/component/generate.py +156 -0
- graph/component/message.py +52 -0
- graph/component/relevant.py +78 -0
- graph/component/retrieval.py +88 -0
- graph/component/rewrite.py +72 -0
- graph/component/switch.py +80 -0
- graph/settings.py +33 -0
- graph/test/client.py +48 -0
- graph/test/dsl_examples/categorize.json +45 -0
- graph/test/dsl_examples/customer_service.json +157 -0
- graph/test/dsl_examples/headhunter_zh.json +194 -0
- graph/test/dsl_examples/retrieval_and_generate.json +54 -0
- graph/test/dsl_examples/retrieval_categorize_and_generate.json +88 -0
- graph/test/dsl_examples/retrieval_relevant_and_generate.json +82 -0
- graph/test/dsl_examples/retrieval_relevant_rewrite_and_generate.json +79 -0
api/db/db_models.py
CHANGED
@@ -833,6 +833,31 @@ class API4Conversation(DataBaseModel):
|
|
833 |
db_table = "api_4_conversation"
|
834 |
|
835 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
836 |
def migrate_db():
|
837 |
with DB.transaction():
|
838 |
migrator = MySQLMigrator(DB)
|
|
|
833 |
db_table = "api_4_conversation"
|
834 |
|
835 |
|
836 |
+
class UserCanvas(DataBaseModel):
|
837 |
+
id = CharField(max_length=32, primary_key=True)
|
838 |
+
avatar = TextField(null=True, help_text="avatar base64 string")
|
839 |
+
user_id = CharField(max_length=255, null=False, help_text="user_id")
|
840 |
+
title = CharField(max_length=255, null=True, help_text="Canvas title")
|
841 |
+
description = TextField(null=True, help_text="Canvas description")
|
842 |
+
canvas_type = CharField(max_length=32, null=True, help_text="Canvas type")
|
843 |
+
dsl = JSONField(null=True, default={})
|
844 |
+
|
845 |
+
class Meta:
|
846 |
+
db_table = "user_canvas"
|
847 |
+
|
848 |
+
|
849 |
+
class CanvasTemplate(DataBaseModel):
|
850 |
+
id = CharField(max_length=32, primary_key=True)
|
851 |
+
avatar = TextField(null=True, help_text="avatar base64 string")
|
852 |
+
title = CharField(max_length=255, null=True, help_text="Canvas title")
|
853 |
+
description = TextField(null=True, help_text="Canvas description")
|
854 |
+
canvas_type = CharField(max_length=32, null=True, help_text="Canvas type")
|
855 |
+
dsl = JSONField(null=True, default={})
|
856 |
+
|
857 |
+
class Meta:
|
858 |
+
db_table = "canvas_template"
|
859 |
+
|
860 |
+
|
861 |
def migrate_db():
|
862 |
with DB.transaction():
|
863 |
migrator = MySQLMigrator(DB)
|
graph/__init__.py
ADDED
File without changes
|
graph/canvas.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
import importlib
|
17 |
+
import json
|
18 |
+
import traceback
|
19 |
+
from abc import ABC
|
20 |
+
from copy import deepcopy
|
21 |
+
from functools import partial
|
22 |
+
|
23 |
+
import pandas as pd
|
24 |
+
|
25 |
+
from graph.component import component_class
|
26 |
+
from graph.component.base import ComponentBase
|
27 |
+
from graph.settings import flow_logger
|
28 |
+
|
29 |
+
|
30 |
+
class Canvas(ABC):
|
31 |
+
"""
|
32 |
+
dsl = {
|
33 |
+
"components": {
|
34 |
+
"begin": {
|
35 |
+
"obj":{
|
36 |
+
"component_name": "Begin",
|
37 |
+
"params": {},
|
38 |
+
},
|
39 |
+
"downstream": ["answer_0"],
|
40 |
+
"upstream": [],
|
41 |
+
},
|
42 |
+
"answer_0": {
|
43 |
+
"obj": {
|
44 |
+
"component_name": "Answer",
|
45 |
+
"params": {}
|
46 |
+
},
|
47 |
+
"downstream": ["retrieval_0"],
|
48 |
+
"upstream": ["begin", "generate_0"],
|
49 |
+
},
|
50 |
+
"retrieval_0": {
|
51 |
+
"obj": {
|
52 |
+
"component_name": "Retrieval",
|
53 |
+
"params": {}
|
54 |
+
},
|
55 |
+
"downstream": ["generate_0"],
|
56 |
+
"upstream": ["answer_0"],
|
57 |
+
},
|
58 |
+
"generate_0": {
|
59 |
+
"obj": {
|
60 |
+
"component_name": "Generate",
|
61 |
+
"params": {}
|
62 |
+
},
|
63 |
+
"downstream": ["answer_0"],
|
64 |
+
"upstream": ["retrieval_0"],
|
65 |
+
}
|
66 |
+
},
|
67 |
+
"history": [],
|
68 |
+
"messages": [],
|
69 |
+
"reference": [],
|
70 |
+
"path": [["begin"]],
|
71 |
+
"answer": []
|
72 |
+
}
|
73 |
+
"""
|
74 |
+
|
75 |
+
def __init__(self, dsl: str, tenant_id=None):
|
76 |
+
self.path = []
|
77 |
+
self.history = []
|
78 |
+
self.messages = []
|
79 |
+
self.answer = []
|
80 |
+
self.components = {}
|
81 |
+
self.dsl = json.loads(dsl) if dsl else {
|
82 |
+
"components": {
|
83 |
+
"begin": {
|
84 |
+
"obj": {
|
85 |
+
"component_name": "Begin",
|
86 |
+
"params": {
|
87 |
+
"prologue": "Hi there!"
|
88 |
+
}
|
89 |
+
},
|
90 |
+
"downstream": [],
|
91 |
+
"upstream": []
|
92 |
+
}
|
93 |
+
},
|
94 |
+
"history": [],
|
95 |
+
"messages": [],
|
96 |
+
"reference": [],
|
97 |
+
"path": [],
|
98 |
+
"answer": []
|
99 |
+
}
|
100 |
+
self._tenant_id = tenant_id
|
101 |
+
self._embed_id = ""
|
102 |
+
self.load()
|
103 |
+
|
104 |
+
def load(self):
|
105 |
+
assert self.dsl.get("components", {}).get("begin"), "There have to be a 'Begin' component."
|
106 |
+
|
107 |
+
self.components = self.dsl["components"]
|
108 |
+
for k, cpn in self.components.items():
|
109 |
+
param = component_class(cpn["obj"]["component_name"] + "Param")()
|
110 |
+
param.update(cpn["obj"]["params"])
|
111 |
+
param.check()
|
112 |
+
cpn["obj"] = component_class(cpn["obj"]["component_name"])(self, k, param)
|
113 |
+
if cpn["obj"].component_name == "Categorize":
|
114 |
+
for _,desc in param.category_description.items():
|
115 |
+
if desc["to"] not in cpn["downstream"]:
|
116 |
+
cpn["downstream"].append(desc["to"])
|
117 |
+
|
118 |
+
self.path = self.dsl["path"]
|
119 |
+
self.history = self.dsl["history"]
|
120 |
+
self.messages = self.dsl["messages"]
|
121 |
+
self.answer = self.dsl["answer"]
|
122 |
+
self.reference = self.dsl["reference"]
|
123 |
+
self._embed_id = self.dsl.get("embed_id", "")
|
124 |
+
|
125 |
+
def __str__(self):
|
126 |
+
self.dsl["path"] = self.path
|
127 |
+
self.dsl["history"] = self.history
|
128 |
+
self.dsl["messages"] = self.messages
|
129 |
+
self.dsl["answer"] = self.answer
|
130 |
+
self.dsl["reference"] = self.reference
|
131 |
+
self.dsl["embed_id"] = self._embed_id
|
132 |
+
dsl = deepcopy(self.dsl)
|
133 |
+
for k, cpn in self.components.items():
|
134 |
+
dsl["components"][k]["obj"] = json.loads(str(cpn["obj"]))
|
135 |
+
return json.dumps(dsl, ensure_ascii=False)
|
136 |
+
|
137 |
+
def reset(self):
|
138 |
+
self.path = []
|
139 |
+
self.history = []
|
140 |
+
self.messages = []
|
141 |
+
self.answer = []
|
142 |
+
self.reference = []
|
143 |
+
self.components = {}
|
144 |
+
self._embed_id = ""
|
145 |
+
|
146 |
+
def run(self, **kwargs):
|
147 |
+
ans = ""
|
148 |
+
if self.answer:
|
149 |
+
cpn_id = self.answer[0]
|
150 |
+
self.answer.pop(0)
|
151 |
+
try:
|
152 |
+
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
153 |
+
except Exception as e:
|
154 |
+
ans = ComponentBase.be_output(str(e))
|
155 |
+
self.path[-1].append(cpn_id)
|
156 |
+
self.history.append(("assistant", ans.to_dict("records")))
|
157 |
+
return ans
|
158 |
+
|
159 |
+
if not self.path:
|
160 |
+
self.components["begin"]["obj"].run(self.history, **kwargs)
|
161 |
+
self.path.append(["begin"])
|
162 |
+
|
163 |
+
self.path.append([])
|
164 |
+
ran = -1
|
165 |
+
|
166 |
+
def prepare2run(cpns):
|
167 |
+
nonlocal ran, ans
|
168 |
+
for c in cpns:
|
169 |
+
cpn = self.components[c]["obj"]
|
170 |
+
if cpn.component_name == "Answer":
|
171 |
+
self.answer.append(c)
|
172 |
+
else:
|
173 |
+
print("RUN: ", c)
|
174 |
+
ans = cpn.run(self.history, **kwargs)
|
175 |
+
self.path[-1].append(c)
|
176 |
+
ran += 1
|
177 |
+
|
178 |
+
prepare2run(self.components[self.path[-2][-1]]["downstream"])
|
179 |
+
while ran < len(self.path[-1]):
|
180 |
+
print(ran, self.path)
|
181 |
+
cpn_id = self.path[-1][ran]
|
182 |
+
cpn = self.get_component(cpn_id)
|
183 |
+
if not cpn["downstream"]: break
|
184 |
+
|
185 |
+
if cpn["obj"].component_name.lower() in ["switch", "categorize", "relevant"]:
|
186 |
+
switch_out = cpn["obj"].output()[1].iloc[0, 0]
|
187 |
+
assert switch_out in self.components, \
|
188 |
+
"{}'s output: {} not valid.".format(cpn_id, switch_out)
|
189 |
+
try:
|
190 |
+
prepare2run([switch_out])
|
191 |
+
except Exception as e:
|
192 |
+
for p in [c for p in self.path for c in p][::-1]:
|
193 |
+
if p.lower().find("answer") >= 0:
|
194 |
+
self.get_component(p)["obj"].set_exception(e)
|
195 |
+
prepare2run([p])
|
196 |
+
break
|
197 |
+
traceback.print_exc()
|
198 |
+
continue
|
199 |
+
|
200 |
+
try:
|
201 |
+
prepare2run(cpn["downstream"])
|
202 |
+
except Exception as e:
|
203 |
+
for p in [c for p in self.path for c in p][::-1]:
|
204 |
+
if p.lower().find("answer") >= 0:
|
205 |
+
self.get_component(p)["obj"].set_exception(e)
|
206 |
+
prepare2run([p])
|
207 |
+
break
|
208 |
+
traceback.print_exc()
|
209 |
+
|
210 |
+
if self.answer:
|
211 |
+
cpn_id = self.answer[0]
|
212 |
+
self.answer.pop(0)
|
213 |
+
ans = self.components[cpn_id]["obj"].run(self.history, **kwargs)
|
214 |
+
self.path[-1].append(cpn_id)
|
215 |
+
if kwargs.get("stream"):
|
216 |
+
assert isinstance(ans, partial)
|
217 |
+
return ans
|
218 |
+
|
219 |
+
self.history.append(("assistant", ans.to_dict("records")))
|
220 |
+
|
221 |
+
return ans
|
222 |
+
|
223 |
+
def get_component(self, cpn_id):
|
224 |
+
return self.components[cpn_id]
|
225 |
+
|
226 |
+
def get_tenant_id(self):
|
227 |
+
return self._tenant_id
|
228 |
+
|
229 |
+
def get_history(self, window_size):
|
230 |
+
convs = []
|
231 |
+
for role, obj in self.history[window_size * -2:]:
|
232 |
+
convs.append({"role": role, "content": (obj if role == "user" else
|
233 |
+
'\n'.join(pd.DataFrame(obj)['content']))})
|
234 |
+
return convs
|
235 |
+
|
236 |
+
def add_user_input(self, question):
|
237 |
+
self.history.append(("user", question))
|
238 |
+
|
239 |
+
def set_embedding_model(self, embed_id):
|
240 |
+
self._embed_id = embed_id
|
241 |
+
|
242 |
+
def get_embedding_model(self):
|
243 |
+
return self._embed_id
|
graph/component/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
from .begin import Begin, BeginParam
|
3 |
+
from .generate import Generate, GenerateParam
|
4 |
+
from .retrieval import Retrieval, RetrievalParam
|
5 |
+
from .answer import Answer, AnswerParam
|
6 |
+
from .categorize import Categorize, CategorizeParam
|
7 |
+
from .switch import Switch, SwitchParam
|
8 |
+
from .relevant import Relevant, RelevantParam
|
9 |
+
from .message import Message, MessageParam
|
10 |
+
from .rewrite import RewriteQuestion, RewriteQuestionParam
|
11 |
+
|
12 |
+
|
13 |
+
def component_class(class_name):
|
14 |
+
m = importlib.import_module("graph.component")
|
15 |
+
c = getattr(m, class_name)
|
16 |
+
return c
|
graph/component/answer.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
import random
|
17 |
+
from abc import ABC
|
18 |
+
from functools import partial
|
19 |
+
|
20 |
+
import pandas as pd
|
21 |
+
|
22 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
23 |
+
|
24 |
+
|
25 |
+
class AnswerParam(ComponentParamBase):
|
26 |
+
|
27 |
+
"""
|
28 |
+
Define the Answer component parameters.
|
29 |
+
"""
|
30 |
+
def __init__(self):
|
31 |
+
super().__init__()
|
32 |
+
self.post_answers = []
|
33 |
+
|
34 |
+
def check(self):
|
35 |
+
return True
|
36 |
+
|
37 |
+
|
38 |
+
class Answer(ComponentBase, ABC):
|
39 |
+
component_name = "Answer"
|
40 |
+
|
41 |
+
def _run(self, history, **kwargs):
|
42 |
+
if kwargs.get("stream"):
|
43 |
+
return partial(self.stream_output)
|
44 |
+
|
45 |
+
ans = self.get_input()
|
46 |
+
if self._param.post_answers:
|
47 |
+
ans = pd.concat([ans, pd.DataFrame([{"content": random.choice(self._param.post_answers)}])], ignore_index=False)
|
48 |
+
return ans
|
49 |
+
|
50 |
+
def stream_output(self):
|
51 |
+
res = None
|
52 |
+
if hasattr(self, "exception") and self.exception:
|
53 |
+
res = {"content": str(self.exception)}
|
54 |
+
self.exception = None
|
55 |
+
yield res
|
56 |
+
self.set_output(res)
|
57 |
+
return
|
58 |
+
|
59 |
+
stream = self.get_stream_input()
|
60 |
+
if isinstance(stream, pd.DataFrame):
|
61 |
+
res = stream
|
62 |
+
for ii, row in stream.iterrows():
|
63 |
+
yield row.to_dict()
|
64 |
+
else:
|
65 |
+
for st in stream():
|
66 |
+
res = st
|
67 |
+
yield st
|
68 |
+
if self._param.post_answers:
|
69 |
+
res["content"] += random.choice(self._param.post_answers)
|
70 |
+
yield res
|
71 |
+
|
72 |
+
self.set_output(res)
|
73 |
+
|
74 |
+
def set_exception(self, e):
|
75 |
+
self.exception = e
|
76 |
+
|
77 |
+
|
graph/component/base.py
ADDED
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
import builtins
|
18 |
+
import json
|
19 |
+
import os
|
20 |
+
from copy import deepcopy
|
21 |
+
from functools import partial
|
22 |
+
from typing import List, Dict
|
23 |
+
|
24 |
+
import pandas as pd
|
25 |
+
|
26 |
+
from graph import settings
|
27 |
+
from graph.settings import flow_logger
|
28 |
+
|
29 |
+
_FEEDED_DEPRECATED_PARAMS = "_feeded_deprecated_params"
|
30 |
+
_DEPRECATED_PARAMS = "_deprecated_params"
|
31 |
+
_USER_FEEDED_PARAMS = "_user_feeded_params"
|
32 |
+
_IS_RAW_CONF = "_is_raw_conf"
|
33 |
+
|
34 |
+
|
35 |
+
class ComponentParamBase(ABC):
|
36 |
+
def __init__(self):
|
37 |
+
self.output_var_name = "output"
|
38 |
+
self.message_history_window_size = 4
|
39 |
+
|
40 |
+
def set_name(self, name: str):
|
41 |
+
self._name = name
|
42 |
+
return self
|
43 |
+
|
44 |
+
def check(self):
|
45 |
+
raise NotImplementedError("Parameter Object should be checked.")
|
46 |
+
|
47 |
+
@classmethod
|
48 |
+
def _get_or_init_deprecated_params_set(cls):
|
49 |
+
if not hasattr(cls, _DEPRECATED_PARAMS):
|
50 |
+
setattr(cls, _DEPRECATED_PARAMS, set())
|
51 |
+
return getattr(cls, _DEPRECATED_PARAMS)
|
52 |
+
|
53 |
+
def _get_or_init_feeded_deprecated_params_set(self, conf=None):
|
54 |
+
if not hasattr(self, _FEEDED_DEPRECATED_PARAMS):
|
55 |
+
if conf is None:
|
56 |
+
setattr(self, _FEEDED_DEPRECATED_PARAMS, set())
|
57 |
+
else:
|
58 |
+
setattr(
|
59 |
+
self,
|
60 |
+
_FEEDED_DEPRECATED_PARAMS,
|
61 |
+
set(conf[_FEEDED_DEPRECATED_PARAMS]),
|
62 |
+
)
|
63 |
+
return getattr(self, _FEEDED_DEPRECATED_PARAMS)
|
64 |
+
|
65 |
+
def _get_or_init_user_feeded_params_set(self, conf=None):
|
66 |
+
if not hasattr(self, _USER_FEEDED_PARAMS):
|
67 |
+
if conf is None:
|
68 |
+
setattr(self, _USER_FEEDED_PARAMS, set())
|
69 |
+
else:
|
70 |
+
setattr(self, _USER_FEEDED_PARAMS, set(conf[_USER_FEEDED_PARAMS]))
|
71 |
+
return getattr(self, _USER_FEEDED_PARAMS)
|
72 |
+
|
73 |
+
def get_user_feeded(self):
|
74 |
+
return self._get_or_init_user_feeded_params_set()
|
75 |
+
|
76 |
+
def get_feeded_deprecated_params(self):
|
77 |
+
return self._get_or_init_feeded_deprecated_params_set()
|
78 |
+
|
79 |
+
@property
|
80 |
+
def _deprecated_params_set(self):
|
81 |
+
return {name: True for name in self.get_feeded_deprecated_params()}
|
82 |
+
|
83 |
+
def __str__(self):
|
84 |
+
|
85 |
+
return json.dumps(self.as_dict(), ensure_ascii=False)
|
86 |
+
|
87 |
+
def as_dict(self):
|
88 |
+
def _recursive_convert_obj_to_dict(obj):
|
89 |
+
ret_dict = {}
|
90 |
+
for attr_name in list(obj.__dict__):
|
91 |
+
if attr_name in [_FEEDED_DEPRECATED_PARAMS, _DEPRECATED_PARAMS, _USER_FEEDED_PARAMS, _IS_RAW_CONF]:
|
92 |
+
continue
|
93 |
+
# get attr
|
94 |
+
attr = getattr(obj, attr_name)
|
95 |
+
if isinstance(attr, pd.DataFrame):
|
96 |
+
ret_dict[attr_name] = attr.to_dict()
|
97 |
+
continue
|
98 |
+
if attr and type(attr).__name__ not in dir(builtins):
|
99 |
+
ret_dict[attr_name] = _recursive_convert_obj_to_dict(attr)
|
100 |
+
else:
|
101 |
+
ret_dict[attr_name] = attr
|
102 |
+
|
103 |
+
return ret_dict
|
104 |
+
|
105 |
+
return _recursive_convert_obj_to_dict(self)
|
106 |
+
|
107 |
+
def update(self, conf, allow_redundant=False):
|
108 |
+
update_from_raw_conf = conf.get(_IS_RAW_CONF, True)
|
109 |
+
if update_from_raw_conf:
|
110 |
+
deprecated_params_set = self._get_or_init_deprecated_params_set()
|
111 |
+
feeded_deprecated_params_set = (
|
112 |
+
self._get_or_init_feeded_deprecated_params_set()
|
113 |
+
)
|
114 |
+
user_feeded_params_set = self._get_or_init_user_feeded_params_set()
|
115 |
+
setattr(self, _IS_RAW_CONF, False)
|
116 |
+
else:
|
117 |
+
feeded_deprecated_params_set = (
|
118 |
+
self._get_or_init_feeded_deprecated_params_set(conf)
|
119 |
+
)
|
120 |
+
user_feeded_params_set = self._get_or_init_user_feeded_params_set(conf)
|
121 |
+
|
122 |
+
def _recursive_update_param(param, config, depth, prefix):
|
123 |
+
if depth > settings.PARAM_MAXDEPTH:
|
124 |
+
raise ValueError("Param define nesting too deep!!!, can not parse it")
|
125 |
+
|
126 |
+
inst_variables = param.__dict__
|
127 |
+
redundant_attrs = []
|
128 |
+
for config_key, config_value in config.items():
|
129 |
+
# redundant attr
|
130 |
+
if config_key not in inst_variables:
|
131 |
+
if not update_from_raw_conf and config_key.startswith("_"):
|
132 |
+
setattr(param, config_key, config_value)
|
133 |
+
else:
|
134 |
+
setattr(param, config_key, config_value)
|
135 |
+
# redundant_attrs.append(config_key)
|
136 |
+
continue
|
137 |
+
|
138 |
+
full_config_key = f"{prefix}{config_key}"
|
139 |
+
|
140 |
+
if update_from_raw_conf:
|
141 |
+
# add user feeded params
|
142 |
+
user_feeded_params_set.add(full_config_key)
|
143 |
+
|
144 |
+
# update user feeded deprecated param set
|
145 |
+
if full_config_key in deprecated_params_set:
|
146 |
+
feeded_deprecated_params_set.add(full_config_key)
|
147 |
+
|
148 |
+
# supported attr
|
149 |
+
attr = getattr(param, config_key)
|
150 |
+
if type(attr).__name__ in dir(builtins) or attr is None:
|
151 |
+
setattr(param, config_key, config_value)
|
152 |
+
|
153 |
+
else:
|
154 |
+
# recursive set obj attr
|
155 |
+
sub_params = _recursive_update_param(
|
156 |
+
attr, config_value, depth + 1, prefix=f"{prefix}{config_key}."
|
157 |
+
)
|
158 |
+
setattr(param, config_key, sub_params)
|
159 |
+
|
160 |
+
if not allow_redundant and redundant_attrs:
|
161 |
+
raise ValueError(
|
162 |
+
f"cpn `{getattr(self, '_name', type(self))}` has redundant parameters: `{[redundant_attrs]}`"
|
163 |
+
)
|
164 |
+
|
165 |
+
return param
|
166 |
+
|
167 |
+
return _recursive_update_param(param=self, config=conf, depth=0, prefix="")
|
168 |
+
|
169 |
+
def extract_not_builtin(self):
|
170 |
+
def _get_not_builtin_types(obj):
|
171 |
+
ret_dict = {}
|
172 |
+
for variable in obj.__dict__:
|
173 |
+
attr = getattr(obj, variable)
|
174 |
+
if attr and type(attr).__name__ not in dir(builtins):
|
175 |
+
ret_dict[variable] = _get_not_builtin_types(attr)
|
176 |
+
|
177 |
+
return ret_dict
|
178 |
+
|
179 |
+
return _get_not_builtin_types(self)
|
180 |
+
|
181 |
+
def validate(self):
|
182 |
+
self.builtin_types = dir(builtins)
|
183 |
+
self.func = {
|
184 |
+
"ge": self._greater_equal_than,
|
185 |
+
"le": self._less_equal_than,
|
186 |
+
"in": self._in,
|
187 |
+
"not_in": self._not_in,
|
188 |
+
"range": self._range,
|
189 |
+
}
|
190 |
+
home_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
|
191 |
+
param_validation_path_prefix = home_dir + "/param_validation/"
|
192 |
+
|
193 |
+
param_name = type(self).__name__
|
194 |
+
param_validation_path = "/".join(
|
195 |
+
[param_validation_path_prefix, param_name + ".json"]
|
196 |
+
)
|
197 |
+
|
198 |
+
validation_json = None
|
199 |
+
|
200 |
+
try:
|
201 |
+
with open(param_validation_path, "r") as fin:
|
202 |
+
validation_json = json.loads(fin.read())
|
203 |
+
except BaseException:
|
204 |
+
return
|
205 |
+
|
206 |
+
self._validate_param(self, validation_json)
|
207 |
+
|
208 |
+
def _validate_param(self, param_obj, validation_json):
|
209 |
+
default_section = type(param_obj).__name__
|
210 |
+
var_list = param_obj.__dict__
|
211 |
+
|
212 |
+
for variable in var_list:
|
213 |
+
attr = getattr(param_obj, variable)
|
214 |
+
|
215 |
+
if type(attr).__name__ in self.builtin_types or attr is None:
|
216 |
+
if variable not in validation_json:
|
217 |
+
continue
|
218 |
+
|
219 |
+
validation_dict = validation_json[default_section][variable]
|
220 |
+
value = getattr(param_obj, variable)
|
221 |
+
value_legal = False
|
222 |
+
|
223 |
+
for op_type in validation_dict:
|
224 |
+
if self.func[op_type](value, validation_dict[op_type]):
|
225 |
+
value_legal = True
|
226 |
+
break
|
227 |
+
|
228 |
+
if not value_legal:
|
229 |
+
raise ValueError(
|
230 |
+
"Plase check runtime conf, {} = {} does not match user-parameter restriction".format(
|
231 |
+
variable, value
|
232 |
+
)
|
233 |
+
)
|
234 |
+
|
235 |
+
elif variable in validation_json:
|
236 |
+
self._validate_param(attr, validation_json)
|
237 |
+
|
238 |
+
@staticmethod
|
239 |
+
def check_string(param, descr):
|
240 |
+
if type(param).__name__ not in ["str"]:
|
241 |
+
raise ValueError(
|
242 |
+
descr + " {} not supported, should be string type".format(param)
|
243 |
+
)
|
244 |
+
|
245 |
+
@staticmethod
|
246 |
+
def check_empty(param, descr):
|
247 |
+
if not param:
|
248 |
+
raise ValueError(
|
249 |
+
descr + " {} not supported empty value."
|
250 |
+
)
|
251 |
+
|
252 |
+
@staticmethod
|
253 |
+
def check_positive_integer(param, descr):
|
254 |
+
if type(param).__name__ not in ["int", "long"] or param <= 0:
|
255 |
+
raise ValueError(
|
256 |
+
descr + " {} not supported, should be positive integer".format(param)
|
257 |
+
)
|
258 |
+
|
259 |
+
@staticmethod
|
260 |
+
def check_positive_number(param, descr):
|
261 |
+
if type(param).__name__ not in ["float", "int", "long"] or param <= 0:
|
262 |
+
raise ValueError(
|
263 |
+
descr + " {} not supported, should be positive numeric".format(param)
|
264 |
+
)
|
265 |
+
|
266 |
+
@staticmethod
|
267 |
+
def check_nonnegative_number(param, descr):
|
268 |
+
if type(param).__name__ not in ["float", "int", "long"] or param < 0:
|
269 |
+
raise ValueError(
|
270 |
+
descr
|
271 |
+
+ " {} not supported, should be non-negative numeric".format(param)
|
272 |
+
)
|
273 |
+
|
274 |
+
@staticmethod
|
275 |
+
def check_decimal_float(param, descr):
|
276 |
+
if type(param).__name__ not in ["float", "int"] or param < 0 or param > 1:
|
277 |
+
raise ValueError(
|
278 |
+
descr
|
279 |
+
+ " {} not supported, should be a float number in range [0, 1]".format(
|
280 |
+
param
|
281 |
+
)
|
282 |
+
)
|
283 |
+
|
284 |
+
@staticmethod
|
285 |
+
def check_boolean(param, descr):
|
286 |
+
if type(param).__name__ != "bool":
|
287 |
+
raise ValueError(
|
288 |
+
descr + " {} not supported, should be bool type".format(param)
|
289 |
+
)
|
290 |
+
|
291 |
+
@staticmethod
|
292 |
+
def check_open_unit_interval(param, descr):
|
293 |
+
if type(param).__name__ not in ["float"] or param <= 0 or param >= 1:
|
294 |
+
raise ValueError(
|
295 |
+
descr + " should be a numeric number between 0 and 1 exclusively"
|
296 |
+
)
|
297 |
+
|
298 |
+
@staticmethod
|
299 |
+
def check_valid_value(param, descr, valid_values):
|
300 |
+
if param not in valid_values:
|
301 |
+
raise ValueError(
|
302 |
+
descr
|
303 |
+
+ " {} is not supported, it should be in {}".format(param, valid_values)
|
304 |
+
)
|
305 |
+
|
306 |
+
@staticmethod
|
307 |
+
def check_defined_type(param, descr, types):
|
308 |
+
if type(param).__name__ not in types:
|
309 |
+
raise ValueError(
|
310 |
+
descr + " {} not supported, should be one of {}".format(param, types)
|
311 |
+
)
|
312 |
+
|
313 |
+
@staticmethod
|
314 |
+
def check_and_change_lower(param, valid_list, descr=""):
|
315 |
+
if type(param).__name__ != "str":
|
316 |
+
raise ValueError(
|
317 |
+
descr
|
318 |
+
+ " {} not supported, should be one of {}".format(param, valid_list)
|
319 |
+
)
|
320 |
+
|
321 |
+
lower_param = param.lower()
|
322 |
+
if lower_param in valid_list:
|
323 |
+
return lower_param
|
324 |
+
else:
|
325 |
+
raise ValueError(
|
326 |
+
descr
|
327 |
+
+ " {} not supported, should be one of {}".format(param, valid_list)
|
328 |
+
)
|
329 |
+
|
330 |
+
@staticmethod
|
331 |
+
def _greater_equal_than(value, limit):
|
332 |
+
return value >= limit - settings.FLOAT_ZERO
|
333 |
+
|
334 |
+
@staticmethod
|
335 |
+
def _less_equal_than(value, limit):
|
336 |
+
return value <= limit + settings.FLOAT_ZERO
|
337 |
+
|
338 |
+
@staticmethod
|
339 |
+
def _range(value, ranges):
|
340 |
+
in_range = False
|
341 |
+
for left_limit, right_limit in ranges:
|
342 |
+
if (
|
343 |
+
left_limit - settings.FLOAT_ZERO
|
344 |
+
<= value
|
345 |
+
<= right_limit + settings.FLOAT_ZERO
|
346 |
+
):
|
347 |
+
in_range = True
|
348 |
+
break
|
349 |
+
|
350 |
+
return in_range
|
351 |
+
|
352 |
+
@staticmethod
|
353 |
+
def _in(value, right_value_list):
|
354 |
+
return value in right_value_list
|
355 |
+
|
356 |
+
@staticmethod
|
357 |
+
def _not_in(value, wrong_value_list):
|
358 |
+
return value not in wrong_value_list
|
359 |
+
|
360 |
+
def _warn_deprecated_param(self, param_name, descr):
|
361 |
+
if self._deprecated_params_set.get(param_name):
|
362 |
+
flow_logger.warning(
|
363 |
+
f"{descr} {param_name} is deprecated and ignored in this version."
|
364 |
+
)
|
365 |
+
|
366 |
+
def _warn_to_deprecate_param(self, param_name, descr, new_param):
|
367 |
+
if self._deprecated_params_set.get(param_name):
|
368 |
+
flow_logger.warning(
|
369 |
+
f"{descr} {param_name} will be deprecated in future release; "
|
370 |
+
f"please use {new_param} instead."
|
371 |
+
)
|
372 |
+
return True
|
373 |
+
return False
|
374 |
+
|
375 |
+
|
376 |
+
class ComponentBase(ABC):
|
377 |
+
component_name: str
|
378 |
+
|
379 |
+
def __str__(self):
|
380 |
+
"""
|
381 |
+
{
|
382 |
+
"component_name": "Begin",
|
383 |
+
"params": {}
|
384 |
+
}
|
385 |
+
"""
|
386 |
+
return """{{
|
387 |
+
"component_name": "{}",
|
388 |
+
"params": {}
|
389 |
+
}}""".format(self.component_name,
|
390 |
+
self._param
|
391 |
+
)
|
392 |
+
|
393 |
+
def __init__(self, canvas, id, param: ComponentParamBase):
|
394 |
+
self._canvas = canvas
|
395 |
+
self._id = id
|
396 |
+
self._param = param
|
397 |
+
self._param.check()
|
398 |
+
|
399 |
+
def run(self, history, **kwargs):
|
400 |
+
flow_logger.info("{}, history: {}, kwargs: {}".format(self, json.dumps(history, ensure_ascii=False),
|
401 |
+
json.dumps(kwargs, ensure_ascii=False)))
|
402 |
+
try:
|
403 |
+
res = self._run(history, **kwargs)
|
404 |
+
self.set_output(res)
|
405 |
+
except Exception as e:
|
406 |
+
self.set_output(pd.DataFrame([{"content": str(e)}]))
|
407 |
+
raise e
|
408 |
+
|
409 |
+
return res
|
410 |
+
|
411 |
+
def _run(self, history, **kwargs):
|
412 |
+
raise NotImplementedError()
|
413 |
+
|
414 |
+
def output(self) -> pd.DataFrame:
|
415 |
+
o = getattr(self._param, self._param.output_var_name)
|
416 |
+
if not isinstance(o, partial) and not isinstance(o, pd.DataFrame):
|
417 |
+
if not isinstance(o, list): o = [o]
|
418 |
+
o = pd.DataFrame(o)
|
419 |
+
return self._param.output_var_name, o
|
420 |
+
|
421 |
+
def set_output(self, v: pd.DataFrame):
|
422 |
+
setattr(self._param, self._param.output_var_name, v)
|
423 |
+
|
424 |
+
def get_input(self):
|
425 |
+
upstream_outs = []
|
426 |
+
reversed_cpnts = []
|
427 |
+
if len(self._canvas.path) > 1:
|
428 |
+
reversed_cpnts.extend(self._canvas.path[-2])
|
429 |
+
reversed_cpnts.extend(self._canvas.path[-1])
|
430 |
+
|
431 |
+
print(self.component_name, reversed_cpnts[::-1])
|
432 |
+
for u in reversed_cpnts[::-1]:
|
433 |
+
if self.get_component_name(u) in ["switch"]: continue
|
434 |
+
if self.component_name.lower().find("switch") < 0 \
|
435 |
+
and self.get_component_name(u) in ["relevant", "categorize"]:
|
436 |
+
continue
|
437 |
+
if u.lower().find("answer") >= 0:
|
438 |
+
for r, c in self._canvas.history[::-1]:
|
439 |
+
if r == "user":
|
440 |
+
upstream_outs.append(pd.DataFrame([{"content": c}]))
|
441 |
+
break
|
442 |
+
break
|
443 |
+
if self.component_name.lower().find("answer") >= 0:
|
444 |
+
if self.get_component_name(u) in ["relevant"]: continue
|
445 |
+
|
446 |
+
upstream_outs.append(self._canvas.get_component(u)["obj"].output()[1])
|
447 |
+
break
|
448 |
+
|
449 |
+
return pd.concat(upstream_outs, ignore_index=False)
|
450 |
+
|
451 |
+
def get_stream_input(self):
|
452 |
+
reversed_cpnts = []
|
453 |
+
if len(self._canvas.path) > 1:
|
454 |
+
reversed_cpnts.extend(self._canvas.path[-2])
|
455 |
+
reversed_cpnts.extend(self._canvas.path[-1])
|
456 |
+
|
457 |
+
for u in reversed_cpnts[::-1]:
|
458 |
+
if self.get_component_name(u) in ["switch", "answer"]: continue
|
459 |
+
return self._canvas.get_component(u)["obj"].output()[1]
|
460 |
+
|
461 |
+
@staticmethod
|
462 |
+
def be_output(v):
|
463 |
+
return pd.DataFrame([{"content": v}])
|
464 |
+
|
465 |
+
def get_component_name(self, cpn_id):
|
466 |
+
return self._canvas.get_component(cpn_id)["obj"].component_name.lower()
|
graph/component/begin.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
import json
|
17 |
+
from functools import partial
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
21 |
+
|
22 |
+
class BeginParam(ComponentParamBase):
|
23 |
+
|
24 |
+
"""
|
25 |
+
Define the Begin component parameters.
|
26 |
+
"""
|
27 |
+
def __init__(self):
|
28 |
+
super().__init__()
|
29 |
+
self.prologue = "Hi! I'm your smart assistant. What can I do for you?"
|
30 |
+
|
31 |
+
def check(self):
|
32 |
+
return True
|
33 |
+
|
34 |
+
|
35 |
+
class Begin(ComponentBase):
|
36 |
+
component_name = "Begin"
|
37 |
+
|
38 |
+
def _run(self, history, **kwargs):
|
39 |
+
if kwargs.get("stream"):
|
40 |
+
return partial(self.stream_output)
|
41 |
+
return pd.DataFrame([{"content": self._param.prologue}])
|
42 |
+
|
43 |
+
def stream_output(self):
|
44 |
+
res = {"content": self._param.prologue}
|
45 |
+
yield res
|
46 |
+
self.set_output(res)
|
47 |
+
|
48 |
+
|
49 |
+
|
graph/component/categorize.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
from api.db import LLMType
|
21 |
+
from api.db.services.llm_service import LLMBundle
|
22 |
+
from graph.component import GenerateParam, Generate
|
23 |
+
|
24 |
+
|
25 |
+
class CategorizeParam(GenerateParam):
|
26 |
+
|
27 |
+
"""
|
28 |
+
Define the Categorize component parameters.
|
29 |
+
"""
|
30 |
+
def __init__(self):
|
31 |
+
super().__init__()
|
32 |
+
self.category_description = {}
|
33 |
+
self.prompt = ""
|
34 |
+
|
35 |
+
def check(self):
|
36 |
+
super().check()
|
37 |
+
self.check_empty(self.category_description, "Category examples")
|
38 |
+
|
39 |
+
def get_prompt(self):
|
40 |
+
cate_lines = []
|
41 |
+
for c, desc in self.category_description.items():
|
42 |
+
for l in desc["examples"].split("\n"):
|
43 |
+
if not l: continue
|
44 |
+
cate_lines.append("Question: {}\tCategory: {}".format(l, c))
|
45 |
+
descriptions = []
|
46 |
+
for c, desc in self.category_description.items():
|
47 |
+
if desc.get("description"):
|
48 |
+
descriptions.append(
|
49 |
+
"--------------------\nCategory: {}\nDescription: {}\n".format(c, desc["description"]))
|
50 |
+
|
51 |
+
self.prompt = """
|
52 |
+
You're a text classifier. You need to categorize the user’s questions into {} categories,
|
53 |
+
namely: {}
|
54 |
+
Here's description of each category:
|
55 |
+
{}
|
56 |
+
|
57 |
+
You could learn from the following examples:
|
58 |
+
{}
|
59 |
+
You could learn from the above examples.
|
60 |
+
Just mention the category names, no need for any additional words.
|
61 |
+
""".format(
|
62 |
+
len(self.category_description.keys()),
|
63 |
+
"/".join(list(self.category_description.keys())),
|
64 |
+
"\n".join(descriptions),
|
65 |
+
"- ".join(cate_lines)
|
66 |
+
)
|
67 |
+
return self.prompt
|
68 |
+
|
69 |
+
|
70 |
+
class Categorize(Generate, ABC):
|
71 |
+
component_name = "Categorize"
|
72 |
+
|
73 |
+
def _run(self, history, **kwargs):
|
74 |
+
input = self.get_input()
|
75 |
+
print(input, "DDDDDDDDDDDDDDDDDDDDDDDDDDDDD")
|
76 |
+
input = "Question: " + ("; ".join(input["content"]) if "content" in input else "") + "Category: "
|
77 |
+
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
78 |
+
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": input}],
|
79 |
+
self._param.gen_conf())
|
80 |
+
print(ans, ":::::::::::::::::::::::::::::::::")
|
81 |
+
for c in self._param.category_description.keys():
|
82 |
+
if ans.lower().find(c.lower()) >= 0:
|
83 |
+
return Categorize.be_output(self._param.category_description[c]["to"])
|
84 |
+
|
85 |
+
return Categorize.be_output(self._param.category_description.items()[-1][1]["to"])
|
86 |
+
|
87 |
+
|
graph/component/cite.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
from api.db import LLMType
|
21 |
+
from api.db.services.knowledgebase_service import KnowledgebaseService
|
22 |
+
from api.db.services.llm_service import LLMBundle
|
23 |
+
from api.settings import retrievaler
|
24 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
25 |
+
|
26 |
+
|
27 |
+
class CiteParam(ComponentParamBase):
|
28 |
+
|
29 |
+
"""
|
30 |
+
Define the Retrieval component parameters.
|
31 |
+
"""
|
32 |
+
def __init__(self):
|
33 |
+
super().__init__()
|
34 |
+
self.cite_sources = []
|
35 |
+
|
36 |
+
def check(self):
|
37 |
+
self.check_empty(self.cite_source, "Please specify where you want to cite from.")
|
38 |
+
|
39 |
+
|
40 |
+
class Cite(ComponentBase, ABC):
|
41 |
+
component_name = "Cite"
|
42 |
+
|
43 |
+
def _run(self, history, **kwargs):
|
44 |
+
input = "\n- ".join(self.get_input()["content"])
|
45 |
+
sources = [self._canvas.get_component(cpn_id).output()[1] for cpn_id in self._param.cite_source]
|
46 |
+
query = []
|
47 |
+
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
48 |
+
if role != "user":continue
|
49 |
+
query.append(cnt)
|
50 |
+
query = "\n".join(query)
|
51 |
+
|
52 |
+
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
53 |
+
if not kbs:
|
54 |
+
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
55 |
+
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
56 |
+
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
57 |
+
|
58 |
+
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, embd_nms[0])
|
59 |
+
|
60 |
+
rerank_mdl = None
|
61 |
+
if self._param.rerank_id:
|
62 |
+
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
63 |
+
|
64 |
+
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
65 |
+
1, self._param.top_n,
|
66 |
+
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
67 |
+
aggs=False, rerank_mdl=rerank_mdl)
|
68 |
+
|
69 |
+
if not kbinfos["chunks"]: return pd.DataFrame()
|
70 |
+
df = pd.DataFrame(kbinfos["chunks"])
|
71 |
+
df["content"] = df["content_with_weight"]
|
72 |
+
del df["content_with_weight"]
|
73 |
+
return df
|
74 |
+
|
75 |
+
|
graph/component/generate.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
import re
|
17 |
+
from functools import partial
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
|
21 |
+
from api.db import LLMType
|
22 |
+
from api.db.services.llm_service import LLMBundle
|
23 |
+
from api.settings import retrievaler
|
24 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
25 |
+
|
26 |
+
|
27 |
+
class GenerateParam(ComponentParamBase):
|
28 |
+
"""
|
29 |
+
Define the Generate component parameters.
|
30 |
+
"""
|
31 |
+
|
32 |
+
def __init__(self):
|
33 |
+
super().__init__()
|
34 |
+
self.llm_id = ""
|
35 |
+
self.prompt = ""
|
36 |
+
self.max_tokens = 256
|
37 |
+
self.temperature = 0.1
|
38 |
+
self.top_p = 0.3
|
39 |
+
self.presence_penalty = 0.4
|
40 |
+
self.frequency_penalty = 0.7
|
41 |
+
self.cite = True
|
42 |
+
#self.parameters = []
|
43 |
+
|
44 |
+
def check(self):
|
45 |
+
self.check_decimal_float(self.temperature, "Temperature")
|
46 |
+
self.check_decimal_float(self.presence_penalty, "Presence penalty")
|
47 |
+
self.check_decimal_float(self.frequency_penalty, "Frequency penalty")
|
48 |
+
self.check_positive_number(self.max_tokens, "Max tokens")
|
49 |
+
self.check_decimal_float(self.top_p, "Top P")
|
50 |
+
self.check_empty(self.llm_id, "LLM")
|
51 |
+
#self.check_defined_type(self.parameters, "Parameters", ["list"])
|
52 |
+
|
53 |
+
def gen_conf(self):
|
54 |
+
return {
|
55 |
+
"max_tokens": self.max_tokens,
|
56 |
+
"temperature": self.temperature,
|
57 |
+
"top_p": self.top_p,
|
58 |
+
"presence_penalty": self.presence_penalty,
|
59 |
+
"frequency_penalty": self.frequency_penalty,
|
60 |
+
}
|
61 |
+
|
62 |
+
|
63 |
+
class Generate(ComponentBase):
|
64 |
+
component_name = "Generate"
|
65 |
+
|
66 |
+
def _run(self, history, **kwargs):
|
67 |
+
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
68 |
+
prompt = self._param.prompt
|
69 |
+
|
70 |
+
retrieval_res = self.get_input()
|
71 |
+
input = "\n- ".join(retrieval_res["content"])
|
72 |
+
|
73 |
+
|
74 |
+
kwargs["input"] = input
|
75 |
+
for n, v in kwargs.items():
|
76 |
+
#prompt = re.sub(r"\{%s\}"%n, re.escape(str(v)), prompt)
|
77 |
+
prompt = re.sub(r"\{%s\}"%n, str(v), prompt)
|
78 |
+
|
79 |
+
if kwargs.get("stream"):
|
80 |
+
return partial(self.stream_output, chat_mdl, prompt, retrieval_res)
|
81 |
+
|
82 |
+
if "empty_response" in retrieval_res.columns:
|
83 |
+
return Generate.be_output(input)
|
84 |
+
|
85 |
+
ans = chat_mdl.chat(prompt, self._canvas.get_history(self._param.message_history_window_size), self._param.gen_conf())
|
86 |
+
|
87 |
+
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
88 |
+
ans, idx = retrievaler.insert_citations(ans,
|
89 |
+
[ck["content_ltks"]
|
90 |
+
for _, ck in retrieval_res.iterrows()],
|
91 |
+
[ck["vector"]
|
92 |
+
for _,ck in retrieval_res.iterrows()],
|
93 |
+
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, self._canvas.get_embedding_model()),
|
94 |
+
tkweight=0.7,
|
95 |
+
vtweight=0.3)
|
96 |
+
del retrieval_res["vector"]
|
97 |
+
retrieval_res = retrieval_res.to_dict("records")
|
98 |
+
df = []
|
99 |
+
for i in idx:
|
100 |
+
df.append(retrieval_res[int(i)])
|
101 |
+
r = re.search(r"^((.|[\r\n])*? ##%s\$\$)"%str(i), ans)
|
102 |
+
assert r, f"{i} => {ans}"
|
103 |
+
df[-1]["content"] = r.group(1)
|
104 |
+
ans = re.sub(r"^((.|[\r\n])*? ##%s\$\$)" % str(i), "", ans)
|
105 |
+
if ans: df.append({"content": ans})
|
106 |
+
return pd.DataFrame(df)
|
107 |
+
|
108 |
+
return Generate.be_output(ans)
|
109 |
+
|
110 |
+
def stream_output(self, chat_mdl, prompt, retrieval_res):
|
111 |
+
res = None
|
112 |
+
if "empty_response" in retrieval_res.columns and "\n- ".join(retrieval_res["content"]):
|
113 |
+
res = {"content": "\n- ".join(retrieval_res["content"]), "reference": []}
|
114 |
+
yield res
|
115 |
+
self.set_output(res)
|
116 |
+
return
|
117 |
+
|
118 |
+
answer = ""
|
119 |
+
for ans in chat_mdl.chat_streamly(prompt, self._canvas.get_history(self._param.message_history_window_size), self._param.gen_conf()):
|
120 |
+
res = {"content": ans, "reference": []}
|
121 |
+
answer = ans
|
122 |
+
yield res
|
123 |
+
|
124 |
+
if self._param.cite and "content_ltks" in retrieval_res.columns and "vector" in retrieval_res.columns:
|
125 |
+
answer, idx = retrievaler.insert_citations(answer,
|
126 |
+
[ck["content_ltks"]
|
127 |
+
for _, ck in retrieval_res.iterrows()],
|
128 |
+
[ck["vector"]
|
129 |
+
for _, ck in retrieval_res.iterrows()],
|
130 |
+
LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, self._canvas.get_embedding_model()),
|
131 |
+
tkweight=0.7,
|
132 |
+
vtweight=0.3)
|
133 |
+
doc_ids = set([])
|
134 |
+
recall_docs = []
|
135 |
+
for i in idx:
|
136 |
+
did = retrieval_res.loc[int(i), "doc_id"]
|
137 |
+
if did in doc_ids: continue
|
138 |
+
doc_ids.add(did)
|
139 |
+
recall_docs.append({"doc_id": did, "doc_name": retrieval_res.loc[int(i), "docnm_kwd"]})
|
140 |
+
|
141 |
+
del retrieval_res["vector"]
|
142 |
+
del retrieval_res["content_ltks"]
|
143 |
+
|
144 |
+
reference = {
|
145 |
+
"chunks": [ck.to_dict() for _, ck in retrieval_res.iterrows()],
|
146 |
+
"doc_aggs": recall_docs
|
147 |
+
}
|
148 |
+
|
149 |
+
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
150 |
+
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
151 |
+
res = {"content": answer, "reference": reference}
|
152 |
+
yield res
|
153 |
+
|
154 |
+
self.set_output(res)
|
155 |
+
|
156 |
+
|
graph/component/message.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
import random
|
17 |
+
from abc import ABC
|
18 |
+
from functools import partial
|
19 |
+
|
20 |
+
import pandas as pd
|
21 |
+
|
22 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
23 |
+
|
24 |
+
|
25 |
+
class MessageParam(ComponentParamBase):
|
26 |
+
|
27 |
+
"""
|
28 |
+
Define the Message component parameters.
|
29 |
+
"""
|
30 |
+
def __init__(self):
|
31 |
+
super().__init__()
|
32 |
+
self.messages = []
|
33 |
+
|
34 |
+
def check(self):
|
35 |
+
self.check_empty(self.messages, "Message")
|
36 |
+
return True
|
37 |
+
|
38 |
+
|
39 |
+
class Message(ComponentBase, ABC):
|
40 |
+
component_name = "Message"
|
41 |
+
|
42 |
+
def _run(self, history, **kwargs):
|
43 |
+
if kwargs.get("stream"):
|
44 |
+
return partial(self.stream_output)
|
45 |
+
|
46 |
+
return Message.be_output(random.choice(self._param.messages))
|
47 |
+
|
48 |
+
def stream_output(self):
|
49 |
+
if self._param.messages:
|
50 |
+
yield {"content": random.choice(self._param.messages)}
|
51 |
+
|
52 |
+
|
graph/component/relevant.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
from api.db import LLMType
|
18 |
+
from api.db.services.llm_service import LLMBundle
|
19 |
+
from graph.component import GenerateParam, Generate
|
20 |
+
from rag.utils import num_tokens_from_string, encoder
|
21 |
+
|
22 |
+
|
23 |
+
class RelevantParam(GenerateParam):
|
24 |
+
|
25 |
+
"""
|
26 |
+
Define the Relevant component parameters.
|
27 |
+
"""
|
28 |
+
def __init__(self):
|
29 |
+
super().__init__()
|
30 |
+
self.prompt = ""
|
31 |
+
self.yes = ""
|
32 |
+
self.no = ""
|
33 |
+
|
34 |
+
def check(self):
|
35 |
+
super().check()
|
36 |
+
|
37 |
+
def get_prompt(self):
|
38 |
+
self.prompt = """
|
39 |
+
You are a grader assessing relevance of a retrieved document to a user question.
|
40 |
+
It does not need to be a stringent test. The goal is to filter out erroneous retrievals.
|
41 |
+
If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant.
|
42 |
+
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.
|
43 |
+
No other words needed except 'yes' or 'no'.
|
44 |
+
"""
|
45 |
+
return self.prompt
|
46 |
+
|
47 |
+
|
48 |
+
class Relevant(Generate, ABC):
|
49 |
+
component_name = "Relevant"
|
50 |
+
|
51 |
+
def _run(self, history, **kwargs):
|
52 |
+
q = ""
|
53 |
+
for r, c in self._canvas.history[::-1]:
|
54 |
+
if r == "user":
|
55 |
+
q = c
|
56 |
+
break
|
57 |
+
ans = self.get_input()
|
58 |
+
ans = " - ".join(ans["content"]) if "content" in ans else ""
|
59 |
+
if not ans:
|
60 |
+
return Relevant.be_output(self._param.no)
|
61 |
+
ans = "Documents: \n" + ans
|
62 |
+
ans = f"Question: {q}\n" + ans
|
63 |
+
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
64 |
+
|
65 |
+
if num_tokens_from_string(ans) >= chat_mdl.max_length - 4:
|
66 |
+
ans = encoder.decode(encoder.encode(ans)[:chat_mdl.max_length - 4])
|
67 |
+
|
68 |
+
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": ans}],
|
69 |
+
self._param.gen_conf())
|
70 |
+
|
71 |
+
print(ans, ":::::::::::::::::::::::::::::::::")
|
72 |
+
if ans.lower().find("yes") >= 0:
|
73 |
+
return Relevant.be_output(self._param.yes)
|
74 |
+
if ans.lower().find("no") >= 0:
|
75 |
+
return Relevant.be_output(self._param.no)
|
76 |
+
assert False, f"Relevant component got: {ans}"
|
77 |
+
|
78 |
+
|
graph/component/retrieval.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
from api.db import LLMType
|
21 |
+
from api.db.services.knowledgebase_service import KnowledgebaseService
|
22 |
+
from api.db.services.llm_service import LLMBundle
|
23 |
+
from api.settings import retrievaler
|
24 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
25 |
+
|
26 |
+
|
27 |
+
class RetrievalParam(ComponentParamBase):
|
28 |
+
|
29 |
+
"""
|
30 |
+
Define the Retrieval component parameters.
|
31 |
+
"""
|
32 |
+
def __init__(self):
|
33 |
+
super().__init__()
|
34 |
+
self.similarity_threshold = 0.2
|
35 |
+
self.keywords_similarity_weight = 0.5
|
36 |
+
self.top_n = 8
|
37 |
+
self.top_k = 1024
|
38 |
+
self.kb_ids = []
|
39 |
+
self.rerank_id = ""
|
40 |
+
self.empty_response = ""
|
41 |
+
|
42 |
+
def check(self):
|
43 |
+
self.check_decimal_float(self.similarity_threshold, "Similarity threshold")
|
44 |
+
self.check_decimal_float(self.keywords_similarity_weight, "Keywords similarity weight")
|
45 |
+
self.check_positive_number(self.top_n, "Top N")
|
46 |
+
self.check_empty(self.kb_ids, "Knowledge bases")
|
47 |
+
|
48 |
+
|
49 |
+
class Retrieval(ComponentBase, ABC):
|
50 |
+
component_name = "Retrieval"
|
51 |
+
|
52 |
+
def _run(self, history, **kwargs):
|
53 |
+
query = []
|
54 |
+
for role, cnt in history[::-1][:self._param.message_history_window_size]:
|
55 |
+
if role != "user":continue
|
56 |
+
query.append(cnt)
|
57 |
+
query = "\n".join(query)
|
58 |
+
|
59 |
+
kbs = KnowledgebaseService.get_by_ids(self._param.kb_ids)
|
60 |
+
if not kbs:
|
61 |
+
raise ValueError("Can't find knowledgebases by {}".format(self._param.kb_ids))
|
62 |
+
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
63 |
+
assert len(embd_nms) == 1, "Knowledge bases use different embedding models."
|
64 |
+
|
65 |
+
embd_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.EMBEDDING, embd_nms[0])
|
66 |
+
self._canvas.set_embedding_model(embd_nms[0])
|
67 |
+
|
68 |
+
rerank_mdl = None
|
69 |
+
if self._param.rerank_id:
|
70 |
+
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, self._param.rerank_id)
|
71 |
+
|
72 |
+
kbinfos = retrievaler.retrieval(query, embd_mdl, kbs[0].tenant_id, self._param.kb_ids,
|
73 |
+
1, self._param.top_n,
|
74 |
+
self._param.similarity_threshold, 1 - self._param.keywords_similarity_weight,
|
75 |
+
aggs=False, rerank_mdl=rerank_mdl)
|
76 |
+
|
77 |
+
if not kbinfos["chunks"]:
|
78 |
+
df = Retrieval.be_output(self._param.empty_response)
|
79 |
+
df["empty_response"] = True
|
80 |
+
return df
|
81 |
+
|
82 |
+
df = pd.DataFrame(kbinfos["chunks"])
|
83 |
+
df["content"] = df["content_with_weight"]
|
84 |
+
del df["content_with_weight"]
|
85 |
+
print(">>>>>>>>>>>>>>>>>>>>>>>>>>\n", query, df)
|
86 |
+
return df
|
87 |
+
|
88 |
+
|
graph/component/rewrite.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
from api.db import LLMType
|
18 |
+
from api.db.services.llm_service import LLMBundle
|
19 |
+
from graph.component import GenerateParam, Generate
|
20 |
+
|
21 |
+
|
22 |
+
class RewriteQuestionParam(GenerateParam):
|
23 |
+
|
24 |
+
"""
|
25 |
+
Define the QuestionRewrite component parameters.
|
26 |
+
"""
|
27 |
+
def __init__(self):
|
28 |
+
super().__init__()
|
29 |
+
self.temperature = 0.9
|
30 |
+
self.prompt = ""
|
31 |
+
self.loop = 1
|
32 |
+
|
33 |
+
def check(self):
|
34 |
+
super().check()
|
35 |
+
|
36 |
+
def get_prompt(self):
|
37 |
+
self.prompt = """
|
38 |
+
You are an expert at query expansion to generate a paraphrasing of a question.
|
39 |
+
I can't retrieval relevant information from the knowledge base by using user's question directly.
|
40 |
+
You need to expand or paraphrase user's question by multiple ways such as using synonyms words/phrase,
|
41 |
+
writing the abbreviation in its entirety, adding some extra descriptions or explanations,
|
42 |
+
changing the way of expression, translating the original question into another language (English/Chinese), etc.
|
43 |
+
And return 5 versions of question and one is from translation.
|
44 |
+
Just list the question. No other words are needed.
|
45 |
+
"""
|
46 |
+
return self.prompt
|
47 |
+
|
48 |
+
|
49 |
+
class RewriteQuestion(Generate, ABC):
|
50 |
+
component_name = "RewriteQuestion"
|
51 |
+
|
52 |
+
def _run(self, history, **kwargs):
|
53 |
+
if not hasattr(self, "_loop"):
|
54 |
+
setattr(self, "_loop", 0)
|
55 |
+
if self._loop >= self._param.loop:
|
56 |
+
self._loop = 0
|
57 |
+
raise Exception("Can't find relevant information.")
|
58 |
+
self._loop += 1
|
59 |
+
q = "Question: "
|
60 |
+
for r, c in self._canvas.history[::-1]:
|
61 |
+
if r == "user":
|
62 |
+
q += c
|
63 |
+
break
|
64 |
+
|
65 |
+
chat_mdl = LLMBundle(self._canvas.get_tenant_id(), LLMType.CHAT, self._param.llm_id)
|
66 |
+
ans = chat_mdl.chat(self._param.get_prompt(), [{"role": "user", "content": q}],
|
67 |
+
self._param.gen_conf())
|
68 |
+
|
69 |
+
print(ans, ":::::::::::::::::::::::::::::::::")
|
70 |
+
return RewriteQuestion.be_output(ans)
|
71 |
+
|
72 |
+
|
graph/component/switch.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
from abc import ABC
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
from api.db import LLMType
|
21 |
+
from api.db.services.knowledgebase_service import KnowledgebaseService
|
22 |
+
from api.db.services.llm_service import LLMBundle
|
23 |
+
from api.settings import retrievaler
|
24 |
+
from graph.component.base import ComponentBase, ComponentParamBase
|
25 |
+
|
26 |
+
|
27 |
+
class SwitchParam(ComponentParamBase):
|
28 |
+
|
29 |
+
"""
|
30 |
+
Define the Switch component parameters.
|
31 |
+
"""
|
32 |
+
def __init__(self):
|
33 |
+
super().__init__()
|
34 |
+
"""
|
35 |
+
{
|
36 |
+
"cpn_id": "categorize:0",
|
37 |
+
"not": False,
|
38 |
+
"operator": "gt/gte/lt/lte/eq/in",
|
39 |
+
"value": "",
|
40 |
+
"to": ""
|
41 |
+
}
|
42 |
+
"""
|
43 |
+
self.conditions = []
|
44 |
+
self.default = ""
|
45 |
+
|
46 |
+
def check(self):
|
47 |
+
self.check_empty(self.conditions, "Switch conditions")
|
48 |
+
self.check_empty(self.default, "Default path")
|
49 |
+
|
50 |
+
def operators(self, field, op, value):
|
51 |
+
if op == "gt":
|
52 |
+
return float(field) > float(value)
|
53 |
+
if op == "gte":
|
54 |
+
return float(field) >= float(value)
|
55 |
+
if op == "lt":
|
56 |
+
return float(field) < float(value)
|
57 |
+
if op == "lte":
|
58 |
+
return float(field) <= float(value)
|
59 |
+
if op == "eq":
|
60 |
+
return str(field) == str(value)
|
61 |
+
if op == "in":
|
62 |
+
return str(field).find(str(value)) >= 0
|
63 |
+
return False
|
64 |
+
|
65 |
+
|
66 |
+
class Switch(ComponentBase, ABC):
|
67 |
+
component_name = "Switch"
|
68 |
+
|
69 |
+
def _run(self, history, **kwargs):
|
70 |
+
for cond in self._param.conditions:
|
71 |
+
input = self._canvas.get_component(cond["cpn_id"])["obj"].output()[1]
|
72 |
+
if self._param.operators(input.iloc[0, 0], cond["operator"], cond["value"]):
|
73 |
+
if not cond["not"]:
|
74 |
+
return pd.DataFrame([{"content": cond["to"]}])
|
75 |
+
|
76 |
+
return pd.DataFrame([{"content": self._param.default}])
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
|
graph/settings.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2019 The FATE Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
# Logger
|
17 |
+
import os
|
18 |
+
|
19 |
+
from api.utils.file_utils import get_project_base_directory
|
20 |
+
from api.utils.log_utils import LoggerFactory, getLogger
|
21 |
+
|
22 |
+
LoggerFactory.set_directory(
|
23 |
+
os.path.join(
|
24 |
+
get_project_base_directory(),
|
25 |
+
"logs",
|
26 |
+
"flow"))
|
27 |
+
# {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
|
28 |
+
LoggerFactory.LEVEL = 30
|
29 |
+
|
30 |
+
flow_logger = getLogger("flow")
|
31 |
+
database_logger = getLogger("database")
|
32 |
+
FLOAT_ZERO = 1e-8
|
33 |
+
PARAM_MAXDEPTH = 5
|
graph/test/client.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
import argparse
|
17 |
+
import os
|
18 |
+
from functools import partial
|
19 |
+
import readline
|
20 |
+
from graph.canvas import Canvas
|
21 |
+
|
22 |
+
if __name__ == '__main__':
|
23 |
+
parser = argparse.ArgumentParser()
|
24 |
+
dsl_default_path = os.path.join(
|
25 |
+
os.path.dirname(os.path.realpath(__file__)),
|
26 |
+
"dsl_examples",
|
27 |
+
"retrieval_and_generate.json",
|
28 |
+
)
|
29 |
+
parser.add_argument('-s', '--dsl', default=dsl_default_path, help="input dsl", action='store', required=True)
|
30 |
+
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
|
31 |
+
parser.add_argument('-m', '--stream', default=False, help="Stream output", action='store_true', required=True)
|
32 |
+
args = parser.parse_args()
|
33 |
+
|
34 |
+
canvas = Canvas(open(args.dsl, "r").read(), args.tenant_id)
|
35 |
+
while True:
|
36 |
+
ans = canvas.run(stream=args.stream)
|
37 |
+
print("==================== Bot =====================\n> ")
|
38 |
+
if args.stream and isinstance(ans, partial):
|
39 |
+
cont = ""
|
40 |
+
for an in ans():
|
41 |
+
print(an["content"][len(cont):], end='')
|
42 |
+
cont = an["content"]
|
43 |
+
else:
|
44 |
+
print(ans["content"])
|
45 |
+
|
46 |
+
print(canvas.path)
|
47 |
+
question = input("==================== User =====================\n> ")
|
48 |
+
canvas.add_user_input(question)
|
graph/test/dsl_examples/categorize.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "Hi there!"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["categorize:0"],
|
19 |
+
"upstream": ["begin"]
|
20 |
+
},
|
21 |
+
"categorize:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Categorize",
|
24 |
+
"params": {
|
25 |
+
"llm_id": "deepseek-chat",
|
26 |
+
"category_description": {
|
27 |
+
"product_related": {
|
28 |
+
"description": "The question is about the product usage, appearance and how it works.",
|
29 |
+
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?"
|
30 |
+
},
|
31 |
+
"others": {
|
32 |
+
"description": "The question is not about the product usage, appearance and how it works.",
|
33 |
+
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?"
|
34 |
+
}
|
35 |
+
}
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"downstream": [],
|
39 |
+
"upstream": ["answer:0"]
|
40 |
+
}
|
41 |
+
},
|
42 |
+
"history": [],
|
43 |
+
"path": [],
|
44 |
+
"answer": []
|
45 |
+
}
|
graph/test/dsl_examples/customer_service.json
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "Hi there!"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["categorize:0"],
|
19 |
+
"upstream": ["begin", "generate:0", "generate:casual", "generate:answer", "generate:complain", "generate:ask_contact", "message:get_contact"]
|
20 |
+
},
|
21 |
+
"categorize:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Categorize",
|
24 |
+
"params": {
|
25 |
+
"llm_id": "deepseek-chat",
|
26 |
+
"category_description": {
|
27 |
+
"product_related": {
|
28 |
+
"description": "The question is about the product usage, appearance and how it works.",
|
29 |
+
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?\nException: Can't connect to ES cluster\nHow to build the RAGFlow image from scratch",
|
30 |
+
"to": "retrieval:0"
|
31 |
+
},
|
32 |
+
"casual": {
|
33 |
+
"description": "The question is not about the product usage, appearance and how it works. Just casual chat.",
|
34 |
+
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
35 |
+
"to": "generate:casual"
|
36 |
+
},
|
37 |
+
"complain": {
|
38 |
+
"description": "Complain even curse about the product or service you provide. But the comment is not specific enough.",
|
39 |
+
"examples": "How bad is it.\nIt's really sucks.\nDamn, for God's sake, can it be more steady?\nShit, I just can't use this shit.\nI can't stand it anymore.",
|
40 |
+
"to": "generate:complain"
|
41 |
+
},
|
42 |
+
"answer": {
|
43 |
+
"description": "This answer provide a specific contact information, like e-mail, phone number, wechat number, line number, twitter, discord, etc,.",
|
44 |
+
"examples": "My phone number is 203921\[email protected]\nThis is my discord number: johndowson_29384",
|
45 |
+
"to": "message:get_contact"
|
46 |
+
}
|
47 |
+
},
|
48 |
+
"message_history_window_size": 8
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"downstream": ["retrieval:0", "generate:casual", "generate:complain", "message:get_contact"],
|
52 |
+
"upstream": ["answer:0"]
|
53 |
+
},
|
54 |
+
"generate:casual": {
|
55 |
+
"obj": {
|
56 |
+
"component_name": "Generate",
|
57 |
+
"params": {
|
58 |
+
"llm_id": "deepseek-chat",
|
59 |
+
"prompt": "You are a customer support. But the customer wants to have a casual chat with you instead of consulting about the product. Be nice, funny, enthusiasm and concern.",
|
60 |
+
"temperature": 0.9,
|
61 |
+
"message_history_window_size": 12,
|
62 |
+
"cite": false
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"downstream": ["answer:0"],
|
66 |
+
"upstream": ["categorize:0"]
|
67 |
+
},
|
68 |
+
"generate:complain": {
|
69 |
+
"obj": {
|
70 |
+
"component_name": "Generate",
|
71 |
+
"params": {
|
72 |
+
"llm_id": "deepseek-chat",
|
73 |
+
"prompt": "You are a customer support. the Customers complain even curse about the products but not specific enough. You need to ask him/her what's the specific problem with the product. Be nice, patient and concern to soothe your customers’ emotions at first place.",
|
74 |
+
"temperature": 0.9,
|
75 |
+
"message_history_window_size": 12,
|
76 |
+
"cite": false
|
77 |
+
}
|
78 |
+
},
|
79 |
+
"downstream": ["answer:0"],
|
80 |
+
"upstream": ["categorize:0"]
|
81 |
+
},
|
82 |
+
"retrieval:0": {
|
83 |
+
"obj": {
|
84 |
+
"component_name": "Retrieval",
|
85 |
+
"params": {
|
86 |
+
"similarity_threshold": 0.2,
|
87 |
+
"keywords_similarity_weight": 0.3,
|
88 |
+
"top_n": 6,
|
89 |
+
"top_k": 1024,
|
90 |
+
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
91 |
+
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
92 |
+
}
|
93 |
+
},
|
94 |
+
"downstream": ["relevant:0"],
|
95 |
+
"upstream": ["categorize:0"]
|
96 |
+
},
|
97 |
+
"relevant:0": {
|
98 |
+
"obj": {
|
99 |
+
"component_name": "Relevant",
|
100 |
+
"params": {
|
101 |
+
"llm_id": "deepseek-chat",
|
102 |
+
"temperature": 0.02,
|
103 |
+
"yes": "generate:answer",
|
104 |
+
"no": "generate:ask_contact"
|
105 |
+
}
|
106 |
+
},
|
107 |
+
"downstream": ["generate:answer", "generate:ask_contact"],
|
108 |
+
"upstream": ["retrieval:0"]
|
109 |
+
},
|
110 |
+
"generate:answer": {
|
111 |
+
"obj": {
|
112 |
+
"component_name": "Generate",
|
113 |
+
"params": {
|
114 |
+
"llm_id": "deepseek-chat",
|
115 |
+
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
116 |
+
"temperature": 0.02
|
117 |
+
}
|
118 |
+
},
|
119 |
+
"downstream": ["answer:0"],
|
120 |
+
"upstream": ["relevant:0"]
|
121 |
+
},
|
122 |
+
"generate:ask_contact": {
|
123 |
+
"obj": {
|
124 |
+
"component_name": "Generate",
|
125 |
+
"params": {
|
126 |
+
"llm_id": "deepseek-chat",
|
127 |
+
"prompt": "You are a customer support. But you can't answer to customers' question. You need to request their contact like E-mail, phone number, Wechat number, LINE number, twitter, discord, etc,. Product experts will contact them later. Please do not ask the same question twice.",
|
128 |
+
"temperature": 0.9,
|
129 |
+
"message_history_window_size": 12,
|
130 |
+
"cite": false
|
131 |
+
}
|
132 |
+
},
|
133 |
+
"downstream": ["answer:0"],
|
134 |
+
"upstream": ["categorize:0"]
|
135 |
+
},
|
136 |
+
"message:get_contact": {
|
137 |
+
"obj":{
|
138 |
+
"component_name": "Message",
|
139 |
+
"params": {
|
140 |
+
"messages": [
|
141 |
+
"Okay, I've already write this down. What else I can do for you?",
|
142 |
+
"Get it. What else I can do for you?",
|
143 |
+
"Thanks for your trust! Our expert will contact ASAP. So, anything else I can do for you?",
|
144 |
+
"Thanks! So, anything else I can do for you?"
|
145 |
+
]
|
146 |
+
}
|
147 |
+
},
|
148 |
+
"downstream": ["answer:0"],
|
149 |
+
"upstream": ["categorize:0"]
|
150 |
+
}
|
151 |
+
},
|
152 |
+
"history": [],
|
153 |
+
"messages": [],
|
154 |
+
"path": [],
|
155 |
+
"reference": {},
|
156 |
+
"answer": []
|
157 |
+
}
|
graph/test/dsl_examples/headhunter_zh.json
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "您好!我是AGI方向的猎头,了解到您是这方面的大佬,然后冒昧的就联系到您。这边有个机会想和您分享,RAGFlow正在招聘您这个岗位的资深的工程师不知道您那边是不是感兴趣?"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["categorize:0"],
|
19 |
+
"upstream": ["begin", "message:reject"]
|
20 |
+
},
|
21 |
+
"categorize:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Categorize",
|
24 |
+
"params": {
|
25 |
+
"llm_id": "deepseek-chat",
|
26 |
+
"category_description": {
|
27 |
+
"about_job": {
|
28 |
+
"description": "该问题关于职位本身或公司的信息。",
|
29 |
+
"examples": "什么岗位?\n汇报对象是谁?\n公司多少人?\n公司有啥产品?\n具体工作内容是啥?\n地点哪里?\n双休吗?",
|
30 |
+
"to": "retrieval:0"
|
31 |
+
},
|
32 |
+
"casual": {
|
33 |
+
"description": "该问题不关于职位本身或公司的信息,属于闲聊。",
|
34 |
+
"examples": "你好\n好久不见\n你男的女的?\n你是猴子派来的救兵吗?\n上午开会了?\n你叫啥?\n最近市场如何?生意好做吗?",
|
35 |
+
"to": "generate:casual"
|
36 |
+
},
|
37 |
+
"interested": {
|
38 |
+
"description": "该回答表示他对于该职位感兴趣。",
|
39 |
+
"examples": "嗯\n说吧\n说说看\n还好吧\n是的\n哦\nyes\n具体说说",
|
40 |
+
"to": "message:introduction"
|
41 |
+
},
|
42 |
+
"answer": {
|
43 |
+
"description": "该回答表示他对于该职位不感兴趣,或感觉受到骚扰。",
|
44 |
+
"examples": "不需要\n不感兴趣\n暂时不看\n不要\nno\n我已经不干这个了\n我不是这个方向的",
|
45 |
+
"to": "message:reject"
|
46 |
+
}
|
47 |
+
}
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"downstream": ["message:introduction", "generate:casual", "message:reject", "retrieval:0"],
|
51 |
+
"upstream": ["answer:0"]
|
52 |
+
},
|
53 |
+
"message:introduction": {
|
54 |
+
"obj":{
|
55 |
+
"component_name": "Message",
|
56 |
+
"params": {
|
57 |
+
"messages": [
|
58 |
+
"我简单介绍以下:\nRAGFlow 是一款基于深度文档理解构建的开源 RAG(Retrieval-Augmented Generation)引擎。RAGFlow 可以为各种规模的企业及个人提供一套精简的 RAG 工作流程,结合大语言模型(LLM)针对用户各类不同的复杂格式数据提供可靠的问答以及有理有据的引用。https://github.com/infiniflow/ragflow\n您那边还有什么要了解的?"
|
59 |
+
]
|
60 |
+
}
|
61 |
+
},
|
62 |
+
"downstream": ["answer:1"],
|
63 |
+
"upstream": ["categorize:0"]
|
64 |
+
},
|
65 |
+
"answer:1": {
|
66 |
+
"obj": {
|
67 |
+
"component_name": "Answer",
|
68 |
+
"params": {}
|
69 |
+
},
|
70 |
+
"downstream": ["categorize:1"],
|
71 |
+
"upstream": ["message:introduction", "generate:aboutJob", "generate:casual", "generate:get_wechat", "generate:nowechat"]
|
72 |
+
},
|
73 |
+
"categorize:1": {
|
74 |
+
"obj": {
|
75 |
+
"component_name": "Categorize",
|
76 |
+
"params": {
|
77 |
+
"llm_id": "deepseek-chat",
|
78 |
+
"category_description": {
|
79 |
+
"about_job": {
|
80 |
+
"description": "该问题关于职位本身或公司的信息。",
|
81 |
+
"examples": "什么岗位?\n汇报对象是谁?\n公司多少人?\n公司有啥产品?\n具体工作内容是啥?\n地点哪里?\n双休吗?",
|
82 |
+
"to": "retrieval:0"
|
83 |
+
},
|
84 |
+
"casual": {
|
85 |
+
"description": "该问题不关于职位本身或公司的信息,属于闲聊。",
|
86 |
+
"examples": "你好\n好久不见\n你男的女的?\n你是猴子派来的救兵吗?\n上午开会了?\n你叫啥?\n最近市场如何?生意好做吗?",
|
87 |
+
"to": "generate:casual"
|
88 |
+
},
|
89 |
+
"wechat": {
|
90 |
+
"description": "该回答表示他愿意加微信,或者已经报了微信号。",
|
91 |
+
"examples": "嗯\n可以\n是的\n哦\nyes\n15002333453\nwindblow_2231",
|
92 |
+
"to": "generate:get_wechat"
|
93 |
+
},
|
94 |
+
"giveup": {
|
95 |
+
"description": "该回答表示他不愿意加微信。",
|
96 |
+
"examples": "不需要\n不感兴趣\n暂时不看\n不要\nno\n不方便\n不知道还要加我微信",
|
97 |
+
"to": "generate:nowechat"
|
98 |
+
}
|
99 |
+
},
|
100 |
+
"message_history_window_size": 8
|
101 |
+
}
|
102 |
+
},
|
103 |
+
"downstream": ["retrieval:0", "generate:casual", "generate:get_wechat", "generate:nowechat"],
|
104 |
+
"upstream": ["answer:0"]
|
105 |
+
},
|
106 |
+
"generate:casual": {
|
107 |
+
"obj": {
|
108 |
+
"component_name": "Generate",
|
109 |
+
"params": {
|
110 |
+
"llm_id": "deepseek-chat",
|
111 |
+
"prompt": "你是AGI方向的猎头,现在候选人的聊了和职位无关的话题,请耐心的回应候选人,并将话题往该AGI的职位上带,最好能要到候选人微信号以便后面保持联系。",
|
112 |
+
"temperature": 0.9,
|
113 |
+
"message_history_window_size": 12,
|
114 |
+
"cite": false
|
115 |
+
}
|
116 |
+
},
|
117 |
+
"downstream": ["answer:1"],
|
118 |
+
"upstream": ["categorize:0", "categorize:1"]
|
119 |
+
},
|
120 |
+
"retrieval:0": {
|
121 |
+
"obj": {
|
122 |
+
"component_name": "Retrieval",
|
123 |
+
"params": {
|
124 |
+
"similarity_threshold": 0.2,
|
125 |
+
"keywords_similarity_weight": 0.3,
|
126 |
+
"top_n": 6,
|
127 |
+
"top_k": 1024,
|
128 |
+
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
129 |
+
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
130 |
+
}
|
131 |
+
},
|
132 |
+
"downstream": ["generate:aboutJob"],
|
133 |
+
"upstream": ["categorize:0", "categorize:1"]
|
134 |
+
},
|
135 |
+
"generate:aboutJob": {
|
136 |
+
"obj": {
|
137 |
+
"component_name": "Generate",
|
138 |
+
"params": {
|
139 |
+
"llm_id": "deepseek-chat",
|
140 |
+
"prompt": "你是AGI方向的猎头,候选人问了有关职位或公司的问题,你根据以下职位信息回答。如果职位信息中不包含候选人的问题就回答不清楚、不知道、有待确认等。回答完后引导候选人加微信号,如:\n - 方便加一下微信吗,我把JD发您看看?\n - 微信号多少,我把详细职位JD发您?\n 职位信息如下:\n {input}\n 职位信息如上。",
|
141 |
+
"temperature": 0.02
|
142 |
+
}
|
143 |
+
},
|
144 |
+
"downstream": ["answer:1"],
|
145 |
+
"upstream": ["relevant:0"]
|
146 |
+
},
|
147 |
+
"generate:get_wechat": {
|
148 |
+
"obj": {
|
149 |
+
"component_name": "Generate",
|
150 |
+
"params": {
|
151 |
+
"llm_id": "deepseek-chat",
|
152 |
+
"prompt": "你是AGI方向的猎头,候选人表示不反感加微信,如果对方已经报了微信号,表示感谢和信任并表示马上会加上;如果没有,则问对方微信号多少。你的微信号是weixin_kevin,E-mail是[email protected]。说话不要重复。不要总是您好。",
|
153 |
+
"temperature": 0.1,
|
154 |
+
"message_history_window_size": 12,
|
155 |
+
"cite": false
|
156 |
+
}
|
157 |
+
},
|
158 |
+
"downstream": ["answer:1"],
|
159 |
+
"upstream": ["categorize:1"]
|
160 |
+
},
|
161 |
+
"generate:nowechat": {
|
162 |
+
"obj":{
|
163 |
+
"component_name": "Generate",
|
164 |
+
"params": {
|
165 |
+
"llm_id": "deepseek-chat",
|
166 |
+
"prompt": "你是AGI方向的猎头,当你提出加微信时对方表示拒绝。你需要耐心礼貌的回应候选人,表示对于保护隐私信息给予理解,也可以询问他对该职位的看法和顾虑。并在恰当的时机再次询问微信联系方式。也可以鼓励候选人主动与你取得联系。你的微信号是weixin_kevin,E-mail是[email protected]。说话不要重复。不要总是您好。",
|
167 |
+
"temperature": 0.1,
|
168 |
+
"message_history_window_size": 12,
|
169 |
+
"cite": false
|
170 |
+
}
|
171 |
+
},
|
172 |
+
"downstream": ["answer:1"],
|
173 |
+
"upstream": ["categorize:1"]
|
174 |
+
},
|
175 |
+
"message:reject": {
|
176 |
+
"obj":{
|
177 |
+
"component_name": "Message",
|
178 |
+
"params": {
|
179 |
+
"messages": [
|
180 |
+
"好的,祝您生活愉快,���作顺利。",
|
181 |
+
"哦,好的,感谢您宝贵的时间!"
|
182 |
+
]
|
183 |
+
}
|
184 |
+
},
|
185 |
+
"downstream": ["answer:0"],
|
186 |
+
"upstream": ["categorize:0"]
|
187 |
+
}
|
188 |
+
},
|
189 |
+
"history": [],
|
190 |
+
"messages": [],
|
191 |
+
"path": [],
|
192 |
+
"reference": {},
|
193 |
+
"answer": []
|
194 |
+
}
|
graph/test/dsl_examples/retrieval_and_generate.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "Hi there!"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["retrieval:0"],
|
19 |
+
"upstream": ["begin", "generate:0"]
|
20 |
+
},
|
21 |
+
"retrieval:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Retrieval",
|
24 |
+
"params": {
|
25 |
+
"similarity_threshold": 0.2,
|
26 |
+
"keywords_similarity_weight": 0.3,
|
27 |
+
"top_n": 6,
|
28 |
+
"top_k": 1024,
|
29 |
+
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
30 |
+
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"downstream": ["generate:0"],
|
34 |
+
"upstream": ["answer:0"]
|
35 |
+
},
|
36 |
+
"generate:0": {
|
37 |
+
"obj": {
|
38 |
+
"component_name": "Generate",
|
39 |
+
"params": {
|
40 |
+
"llm_id": "deepseek-chat",
|
41 |
+
"prompt": "You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\" Answers need to consider chat history.\n Here is the knowledge base:\n {input}\n The above is the knowledge base.",
|
42 |
+
"temperature": 0.2
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"downstream": ["answer:0"],
|
46 |
+
"upstream": ["retrieval:0"]
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"history": [],
|
50 |
+
"messages": [],
|
51 |
+
"reference": {},
|
52 |
+
"path": [],
|
53 |
+
"answer": []
|
54 |
+
}
|
graph/test/dsl_examples/retrieval_categorize_and_generate.json
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "Hi there!"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["categorize:0"],
|
19 |
+
"upstream": ["begin", "generate:0", "switch:0"]
|
20 |
+
},
|
21 |
+
"categorize:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Categorize",
|
24 |
+
"params": {
|
25 |
+
"llm_id": "deepseek-chat",
|
26 |
+
"category_description": {
|
27 |
+
"product_related": {
|
28 |
+
"description": "The question is about the product usage, appearance and how it works.",
|
29 |
+
"examples": "Why it always beaming?\nHow to install it onto the wall?\nIt leaks, what to do?",
|
30 |
+
"to": "retrieval:0"
|
31 |
+
},
|
32 |
+
"others": {
|
33 |
+
"description": "The question is not about the product usage, appearance and how it works.",
|
34 |
+
"examples": "How are you doing?\nWhat is your name?\nAre you a robot?\nWhat's the weather?\nWill it rain?",
|
35 |
+
"to": "message:0"
|
36 |
+
}
|
37 |
+
}
|
38 |
+
}
|
39 |
+
},
|
40 |
+
"downstream": ["retrieval:0", "message:0"],
|
41 |
+
"upstream": ["answer:0"]
|
42 |
+
},
|
43 |
+
"message:0": {
|
44 |
+
"obj":{
|
45 |
+
"component_name": "Message",
|
46 |
+
"params": {
|
47 |
+
"messages": [
|
48 |
+
"Sorry, I don't know. I'm an AI bot."
|
49 |
+
]
|
50 |
+
}
|
51 |
+
},
|
52 |
+
"downstream": ["answer:0"],
|
53 |
+
"upstream": ["categorize:0"]
|
54 |
+
},
|
55 |
+
"retrieval:0": {
|
56 |
+
"obj": {
|
57 |
+
"component_name": "Retrieval",
|
58 |
+
"params": {
|
59 |
+
"similarity_threshold": 0.2,
|
60 |
+
"keywords_similarity_weight": 0.3,
|
61 |
+
"top_n": 6,
|
62 |
+
"top_k": 1024,
|
63 |
+
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
64 |
+
"kb_ids": ["869a236818b811ef91dffa163e197198"]
|
65 |
+
}
|
66 |
+
},
|
67 |
+
"downstream": ["generate:0"],
|
68 |
+
"upstream": ["switch:0"]
|
69 |
+
},
|
70 |
+
"generate:0": {
|
71 |
+
"obj": {
|
72 |
+
"component_name": "Generate",
|
73 |
+
"params": {
|
74 |
+
"llm_id": "deepseek-chat",
|
75 |
+
"prompt": "You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\" Answers need to consider chat history.\n Here is the knowledge base:\n {input}\n The above is the knowledge base.",
|
76 |
+
"temperature": 0.2
|
77 |
+
}
|
78 |
+
},
|
79 |
+
"downstream": ["answer:0"],
|
80 |
+
"upstream": ["retrieval:0"]
|
81 |
+
}
|
82 |
+
},
|
83 |
+
"history": [],
|
84 |
+
"messages": [],
|
85 |
+
"reference": {},
|
86 |
+
"path": [],
|
87 |
+
"answer": []
|
88 |
+
}
|
graph/test/dsl_examples/retrieval_relevant_and_generate.json
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "Hi there!"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["retrieval:0"],
|
19 |
+
"upstream": ["begin", "generate:0", "switch:0"]
|
20 |
+
},
|
21 |
+
"retrieval:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Retrieval",
|
24 |
+
"params": {
|
25 |
+
"similarity_threshold": 0.2,
|
26 |
+
"keywords_similarity_weight": 0.3,
|
27 |
+
"top_n": 6,
|
28 |
+
"top_k": 1024,
|
29 |
+
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
30 |
+
"kb_ids": ["869a236818b811ef91dffa163e197198"],
|
31 |
+
"empty_response": "Sorry, knowledge base has noting related information."
|
32 |
+
}
|
33 |
+
},
|
34 |
+
"downstream": ["relevant:0"],
|
35 |
+
"upstream": ["answer:0"]
|
36 |
+
},
|
37 |
+
"relevant:0": {
|
38 |
+
"obj": {
|
39 |
+
"component_name": "Relevant",
|
40 |
+
"params": {
|
41 |
+
"llm_id": "deepseek-chat",
|
42 |
+
"temperature": 0.02,
|
43 |
+
"yes": "generate:0",
|
44 |
+
"no": "message:0"
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"downstream": ["message:0", "generate:0"],
|
48 |
+
"upstream": ["retrieval:0"]
|
49 |
+
},
|
50 |
+
"generate:0": {
|
51 |
+
"obj": {
|
52 |
+
"component_name": "Generate",
|
53 |
+
"params": {
|
54 |
+
"llm_id": "deepseek-chat",
|
55 |
+
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
56 |
+
"temperature": 0.2
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"downstream": ["answer:0"],
|
60 |
+
"upstream": ["relevant:0"]
|
61 |
+
},
|
62 |
+
"message:0": {
|
63 |
+
"obj":{
|
64 |
+
"component_name": "Message",
|
65 |
+
"params": {
|
66 |
+
"messages": [
|
67 |
+
"Sorry, I don't know. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?",
|
68 |
+
"I'm an AI bot and not quite sure about this question. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?",
|
69 |
+
"Can't find answer in my knowledge base. Please leave your contact, our experts will contact you later. What's your e-mail/phone/wechat?"
|
70 |
+
]
|
71 |
+
}
|
72 |
+
},
|
73 |
+
"downstream": ["answer:0"],
|
74 |
+
"upstream": ["relevant:0"]
|
75 |
+
}
|
76 |
+
},
|
77 |
+
"history": [],
|
78 |
+
"path": [],
|
79 |
+
"messages": [],
|
80 |
+
"reference": {},
|
81 |
+
"answer": []
|
82 |
+
}
|
graph/test/dsl_examples/retrieval_relevant_rewrite_and_generate.json
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"begin": {
|
4 |
+
"obj":{
|
5 |
+
"component_name": "Begin",
|
6 |
+
"params": {
|
7 |
+
"prologue": "Hi there!"
|
8 |
+
}
|
9 |
+
},
|
10 |
+
"downstream": ["answer:0"],
|
11 |
+
"upstream": []
|
12 |
+
},
|
13 |
+
"answer:0": {
|
14 |
+
"obj": {
|
15 |
+
"component_name": "Answer",
|
16 |
+
"params": {}
|
17 |
+
},
|
18 |
+
"downstream": ["retrieval:0"],
|
19 |
+
"upstream": ["begin", "generate:0", "switch:0"]
|
20 |
+
},
|
21 |
+
"retrieval:0": {
|
22 |
+
"obj": {
|
23 |
+
"component_name": "Retrieval",
|
24 |
+
"params": {
|
25 |
+
"similarity_threshold": 0.2,
|
26 |
+
"keywords_similarity_weight": 0.3,
|
27 |
+
"top_n": 6,
|
28 |
+
"top_k": 1024,
|
29 |
+
"rerank_id": "BAAI/bge-reranker-v2-m3",
|
30 |
+
"kb_ids": ["869a236818b811ef91dffa163e197198"],
|
31 |
+
"empty_response": "Sorry, knowledge base has noting related information."
|
32 |
+
}
|
33 |
+
},
|
34 |
+
"downstream": ["relevant:0"],
|
35 |
+
"upstream": ["answer:0"]
|
36 |
+
},
|
37 |
+
"relevant:0": {
|
38 |
+
"obj": {
|
39 |
+
"component_name": "Relevant",
|
40 |
+
"params": {
|
41 |
+
"llm_id": "deepseek-chat",
|
42 |
+
"temperature": 0.02,
|
43 |
+
"yes": "generate:0",
|
44 |
+
"no": "rewrite:0"
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"downstream": ["generate:0", "rewrite:0"],
|
48 |
+
"upstream": ["retrieval:0"]
|
49 |
+
},
|
50 |
+
"generate:0": {
|
51 |
+
"obj": {
|
52 |
+
"component_name": "Generate",
|
53 |
+
"params": {
|
54 |
+
"llm_id": "deepseek-chat",
|
55 |
+
"prompt": "You are an intelligent assistant. Please answer the question based on content of knowledge base. When all knowledge base content is irrelevant to the question, your answer must include the sentence \"The answer you are looking for is not found in the knowledge base!\". Answers need to consider chat history.\n Knowledge base content is as following:\n {input}\n The above is the content of knowledge base.",
|
56 |
+
"temperature": 0.02
|
57 |
+
}
|
58 |
+
},
|
59 |
+
"downstream": ["answer:0"],
|
60 |
+
"upstream": ["relevant:0"]
|
61 |
+
},
|
62 |
+
"rewrite:0": {
|
63 |
+
"obj":{
|
64 |
+
"component_name": "RewriteQuestion",
|
65 |
+
"params": {
|
66 |
+
"llm_id": "deepseek-chat",
|
67 |
+
"temperature": 0.8
|
68 |
+
}
|
69 |
+
},
|
70 |
+
"downstream": ["retrieval:0"],
|
71 |
+
"upstream": ["relevant:0"]
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"history": [],
|
75 |
+
"messages": [],
|
76 |
+
"path": [],
|
77 |
+
"reference": {},
|
78 |
+
"answer": []
|
79 |
+
}
|