KevinHuSh
commited on
Commit
·
5e0a689
1
Parent(s):
0a903c7
refactor retieval_test, add SQl retrieval methods (#61)
Browse files- api/apps/chunk_app.py +4 -1
- api/apps/conversation_app.py +2 -0
- api/apps/document_app.py +13 -4
- api/db/__init__.py +2 -0
- api/db/db_models.py +1 -1
- api/db/init_data.py +36 -3
- api/settings.py +1 -1
- rag/app/naive.py +2 -1
- rag/app/qa.py +32 -16
- rag/app/resume.py +37 -18
- rag/app/table.py +40 -18
- rag/llm/chat_model.py +18 -0
- rag/llm/cv_model.py +20 -1
- rag/llm/embedding_model.py +18 -2
- rag/nlp/search.py +5 -3
- rag/svr/task_executor.py +7 -5
api/apps/chunk_app.py
CHANGED
@@ -227,7 +227,7 @@ def retrieval_test():
|
|
227 |
doc_ids = req.get("doc_ids", [])
|
228 |
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
229 |
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
230 |
-
top = int(req.get("
|
231 |
try:
|
232 |
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
233 |
if not e:
|
@@ -237,6 +237,9 @@ def retrieval_test():
|
|
237 |
kb.tenant_id, LLMType.EMBEDDING.value)
|
238 |
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size, similarity_threshold,
|
239 |
vector_similarity_weight, top, doc_ids)
|
|
|
|
|
|
|
240 |
|
241 |
return get_json_result(data=ranks)
|
242 |
except Exception as e:
|
|
|
227 |
doc_ids = req.get("doc_ids", [])
|
228 |
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
229 |
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
230 |
+
top = int(req.get("top_k", 1024))
|
231 |
try:
|
232 |
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
233 |
if not e:
|
|
|
237 |
kb.tenant_id, LLMType.EMBEDDING.value)
|
238 |
ranks = retrievaler.retrieval(question, embd_mdl, kb.tenant_id, [kb_id], page, size, similarity_threshold,
|
239 |
vector_similarity_weight, top, doc_ids)
|
240 |
+
for c in ranks["chunks"]:
|
241 |
+
if "vector" in c:
|
242 |
+
del c["vector"]
|
243 |
|
244 |
return get_json_result(data=ranks)
|
245 |
except Exception as e:
|
api/apps/conversation_app.py
CHANGED
@@ -229,6 +229,7 @@ def use_sql(question,field_map, tenant_id, chat_mdl):
|
|
229 |
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {"temperature": 0.1})
|
230 |
sql = re.sub(r".*?select ", "select ", sql, flags=re.IGNORECASE)
|
231 |
sql = re.sub(r" +", " ", sql)
|
|
|
232 |
if sql[:len("select ")].lower() != "select ":
|
233 |
return None, None
|
234 |
if sql[:len("select *")].lower() != "select *":
|
@@ -241,6 +242,7 @@ def use_sql(question,field_map, tenant_id, chat_mdl):
|
|
241 |
docnm_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
|
242 |
clmn_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx|docnm_idx)]
|
243 |
|
|
|
244 |
clmns = "|".join([re.sub(r"/.*", "", field_map.get(tbl["columns"][i]["name"], f"C{i}")) for i in clmn_idx]) + "|原文"
|
245 |
line = "|".join(["------" for _ in range(len(clmn_idx))]) + "|------"
|
246 |
rows = ["|".join([str(r[i]) for i in clmn_idx])+"|" for r in tbl["rows"]]
|
|
|
229 |
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_promt}], {"temperature": 0.1})
|
230 |
sql = re.sub(r".*?select ", "select ", sql, flags=re.IGNORECASE)
|
231 |
sql = re.sub(r" +", " ", sql)
|
232 |
+
sql = re.sub(r"[;;].*", "", sql)
|
233 |
if sql[:len("select ")].lower() != "select ":
|
234 |
return None, None
|
235 |
if sql[:len("select *")].lower() != "select *":
|
|
|
242 |
docnm_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
|
243 |
clmn_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx|docnm_idx)]
|
244 |
|
245 |
+
# compose markdown table
|
246 |
clmns = "|".join([re.sub(r"/.*", "", field_map.get(tbl["columns"][i]["name"], f"C{i}")) for i in clmn_idx]) + "|原文"
|
247 |
line = "|".join(["------" for _ in range(len(clmn_idx))]) + "|------"
|
248 |
rows = ["|".join([str(r[i]) for i in clmn_idx])+"|" for r in tbl["rows"]]
|
api/apps/document_app.py
CHANGED
@@ -13,9 +13,10 @@
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License
|
15 |
#
|
16 |
-
|
17 |
import base64
|
18 |
import pathlib
|
|
|
19 |
|
20 |
import flask
|
21 |
from elasticsearch_dsl import Q
|
@@ -27,7 +28,7 @@ from api.db.services import duplicate_name
|
|
27 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
28 |
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
29 |
from api.utils import get_uuid
|
30 |
-
from api.db import FileType, TaskStatus
|
31 |
from api.db.services.document_service import DocumentService
|
32 |
from api.settings import RetCode
|
33 |
from api.utils.api_utils import get_json_result
|
@@ -66,7 +67,7 @@ def upload():
|
|
66 |
location += "_"
|
67 |
blob = request.files['file'].read()
|
68 |
MINIO.put(kb_id, location, blob)
|
69 |
-
doc =
|
70 |
"id": get_uuid(),
|
71 |
"kb_id": kb.id,
|
72 |
"parser_id": kb.parser_id,
|
@@ -77,7 +78,12 @@ def upload():
|
|
77 |
"location": location,
|
78 |
"size": len(blob),
|
79 |
"thumbnail": thumbnail(filename, blob)
|
80 |
-
}
|
|
|
|
|
|
|
|
|
|
|
81 |
return get_json_result(data=doc.to_json())
|
82 |
except Exception as e:
|
83 |
return server_error_response(e)
|
@@ -283,6 +289,9 @@ def change_parser():
|
|
283 |
if doc.parser_id.lower() == req["parser_id"].lower():
|
284 |
return get_json_result(data=True)
|
285 |
|
|
|
|
|
|
|
286 |
e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress":0, "progress_msg": ""})
|
287 |
if not e:
|
288 |
return get_data_error_result(retmsg="Document not found!")
|
|
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License
|
15 |
#
|
16 |
+
|
17 |
import base64
|
18 |
import pathlib
|
19 |
+
import re
|
20 |
|
21 |
import flask
|
22 |
from elasticsearch_dsl import Q
|
|
|
28 |
from api.db.services.knowledgebase_service import KnowledgebaseService
|
29 |
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
30 |
from api.utils import get_uuid
|
31 |
+
from api.db import FileType, TaskStatus, ParserType
|
32 |
from api.db.services.document_service import DocumentService
|
33 |
from api.settings import RetCode
|
34 |
from api.utils.api_utils import get_json_result
|
|
|
67 |
location += "_"
|
68 |
blob = request.files['file'].read()
|
69 |
MINIO.put(kb_id, location, blob)
|
70 |
+
doc = {
|
71 |
"id": get_uuid(),
|
72 |
"kb_id": kb.id,
|
73 |
"parser_id": kb.parser_id,
|
|
|
78 |
"location": location,
|
79 |
"size": len(blob),
|
80 |
"thumbnail": thumbnail(filename, blob)
|
81 |
+
}
|
82 |
+
if doc["type"] == FileType.VISUAL:
|
83 |
+
doc["parser_id"] = ParserType.PICTURE.value
|
84 |
+
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
85 |
+
doc["parser_id"] = ParserType.PRESENTATION.value
|
86 |
+
doc = DocumentService.insert(doc)
|
87 |
return get_json_result(data=doc.to_json())
|
88 |
except Exception as e:
|
89 |
return server_error_response(e)
|
|
|
289 |
if doc.parser_id.lower() == req["parser_id"].lower():
|
290 |
return get_json_result(data=True)
|
291 |
|
292 |
+
if doc.type == FileType.VISUAL or re.search(r"\.(ppt|pptx|pages)$", doc.name):
|
293 |
+
return get_data_error_result(retmsg="Not supported yet!")
|
294 |
+
|
295 |
e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress":0, "progress_msg": ""})
|
296 |
if not e:
|
297 |
return get_data_error_result(retmsg="Document not found!")
|
api/db/__init__.py
CHANGED
@@ -78,3 +78,5 @@ class ParserType(StrEnum):
|
|
78 |
BOOK = "book"
|
79 |
QA = "qa"
|
80 |
TABLE = "table"
|
|
|
|
|
|
78 |
BOOK = "book"
|
79 |
QA = "qa"
|
80 |
TABLE = "table"
|
81 |
+
NAIVE = "naive"
|
82 |
+
PICTURE = "picture"
|
api/db/db_models.py
CHANGED
@@ -381,7 +381,7 @@ class Tenant(DataBaseModel):
|
|
381 |
embd_id = CharField(max_length=128, null=False, help_text="default embedding model ID")
|
382 |
asr_id = CharField(max_length=128, null=False, help_text="default ASR model ID")
|
383 |
img2txt_id = CharField(max_length=128, null=False, help_text="default image to text model ID")
|
384 |
-
parser_ids = CharField(max_length=
|
385 |
credit = IntegerField(default=512)
|
386 |
status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted,1: validate)", default="1")
|
387 |
|
|
|
381 |
embd_id = CharField(max_length=128, null=False, help_text="default embedding model ID")
|
382 |
asr_id = CharField(max_length=128, null=False, help_text="default ASR model ID")
|
383 |
img2txt_id = CharField(max_length=128, null=False, help_text="default image to text model ID")
|
384 |
+
parser_ids = CharField(max_length=256, null=False, help_text="document processors")
|
385 |
credit = IntegerField(default=512)
|
386 |
status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted,1: validate)", default="1")
|
387 |
|
api/db/init_data.py
CHANGED
@@ -63,7 +63,9 @@ def init_llm_factory():
|
|
63 |
"status": "1",
|
64 |
},
|
65 |
]
|
66 |
-
llm_infos = [
|
|
|
|
|
67 |
"fid": factory_infos[0]["name"],
|
68 |
"llm_name": "gpt-3.5-turbo",
|
69 |
"tags": "LLM,CHAT,4K",
|
@@ -105,7 +107,9 @@ def init_llm_factory():
|
|
105 |
"tags": "LLM,CHAT,IMAGE2TEXT",
|
106 |
"max_tokens": 765,
|
107 |
"model_type": LLMType.IMAGE2TEXT.value
|
108 |
-
},
|
|
|
|
|
109 |
"fid": factory_infos[1]["name"],
|
110 |
"llm_name": "qwen-turbo",
|
111 |
"tags": "LLM,CHAT,8K",
|
@@ -135,7 +139,9 @@ def init_llm_factory():
|
|
135 |
"tags": "LLM,CHAT,IMAGE2TEXT",
|
136 |
"max_tokens": 765,
|
137 |
"model_type": LLMType.IMAGE2TEXT.value
|
138 |
-
},
|
|
|
|
|
139 |
"fid": factory_infos[2]["name"],
|
140 |
"llm_name": "gpt-3.5-turbo",
|
141 |
"tags": "LLM,CHAT,4K",
|
@@ -160,6 +166,33 @@ def init_llm_factory():
|
|
160 |
"max_tokens": 765,
|
161 |
"model_type": LLMType.IMAGE2TEXT.value
|
162 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
]
|
164 |
for info in factory_infos:
|
165 |
LLMFactoriesService.save(**info)
|
|
|
63 |
"status": "1",
|
64 |
},
|
65 |
]
|
66 |
+
llm_infos = [
|
67 |
+
# ---------------------- OpenAI ------------------------
|
68 |
+
{
|
69 |
"fid": factory_infos[0]["name"],
|
70 |
"llm_name": "gpt-3.5-turbo",
|
71 |
"tags": "LLM,CHAT,4K",
|
|
|
107 |
"tags": "LLM,CHAT,IMAGE2TEXT",
|
108 |
"max_tokens": 765,
|
109 |
"model_type": LLMType.IMAGE2TEXT.value
|
110 |
+
},
|
111 |
+
# ----------------------- Qwen -----------------------
|
112 |
+
{
|
113 |
"fid": factory_infos[1]["name"],
|
114 |
"llm_name": "qwen-turbo",
|
115 |
"tags": "LLM,CHAT,8K",
|
|
|
139 |
"tags": "LLM,CHAT,IMAGE2TEXT",
|
140 |
"max_tokens": 765,
|
141 |
"model_type": LLMType.IMAGE2TEXT.value
|
142 |
+
},
|
143 |
+
# ----------------------- Infiniflow -----------------------
|
144 |
+
{
|
145 |
"fid": factory_infos[2]["name"],
|
146 |
"llm_name": "gpt-3.5-turbo",
|
147 |
"tags": "LLM,CHAT,4K",
|
|
|
166 |
"max_tokens": 765,
|
167 |
"model_type": LLMType.IMAGE2TEXT.value
|
168 |
},
|
169 |
+
# ---------------------- ZhipuAI ----------------------
|
170 |
+
{
|
171 |
+
"fid": factory_infos[3]["name"],
|
172 |
+
"llm_name": "glm-3-turbo",
|
173 |
+
"tags": "LLM,CHAT,",
|
174 |
+
"max_tokens": 128 * 1000,
|
175 |
+
"model_type": LLMType.CHAT.value
|
176 |
+
}, {
|
177 |
+
"fid": factory_infos[3]["name"],
|
178 |
+
"llm_name": "glm-4",
|
179 |
+
"tags": "LLM,CHAT,",
|
180 |
+
"max_tokens": 128 * 1000,
|
181 |
+
"model_type": LLMType.CHAT.value
|
182 |
+
}, {
|
183 |
+
"fid": factory_infos[3]["name"],
|
184 |
+
"llm_name": "glm-4v",
|
185 |
+
"tags": "LLM,CHAT,IMAGE2TEXT",
|
186 |
+
"max_tokens": 2000,
|
187 |
+
"model_type": LLMType.IMAGE2TEXT.value
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"fid": factory_infos[3]["name"],
|
191 |
+
"llm_name": "embedding-2",
|
192 |
+
"tags": "TEXT EMBEDDING",
|
193 |
+
"max_tokens": 512,
|
194 |
+
"model_type": LLMType.SPEECH2TEXT.value
|
195 |
+
},
|
196 |
]
|
197 |
for info in factory_infos:
|
198 |
LLMFactoriesService.save(**info)
|
api/settings.py
CHANGED
@@ -47,7 +47,7 @@ LLM = get_base_config("llm", {})
|
|
47 |
CHAT_MDL = LLM.get("chat_model", "gpt-3.5-turbo")
|
48 |
EMBEDDING_MDL = LLM.get("embedding_model", "text-embedding-ada-002")
|
49 |
ASR_MDL = LLM.get("asr_model", "whisper-1")
|
50 |
-
PARSERS = LLM.get("parsers", "general:General,resume:
|
51 |
IMAGE2TEXT_MDL = LLM.get("image2text_model", "gpt-4-vision-preview")
|
52 |
|
53 |
# distribution
|
|
|
47 |
CHAT_MDL = LLM.get("chat_model", "gpt-3.5-turbo")
|
48 |
EMBEDDING_MDL = LLM.get("embedding_model", "text-embedding-ada-002")
|
49 |
ASR_MDL = LLM.get("asr_model", "whisper-1")
|
50 |
+
PARSERS = LLM.get("parsers", "general:General,qa:Q&A,resume:Resume,naive:Naive,table:Table,laws:Laws,manual:Manual,book:Book,paper:Paper,presentation:Presentation,picture:Picture")
|
51 |
IMAGE2TEXT_MDL = LLM.get("image2text_model", "gpt-4-vision-preview")
|
52 |
|
53 |
# distribution
|
rag/app/naive.py
CHANGED
@@ -57,7 +57,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **k
|
|
57 |
callback(0.8, "Finish parsing.")
|
58 |
else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
|
59 |
|
60 |
-
|
|
|
61 |
eng = is_english(cks)
|
62 |
res = []
|
63 |
# wrap up to es documents
|
|
|
57 |
callback(0.8, "Finish parsing.")
|
58 |
else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
|
59 |
|
60 |
+
parser_config = kwargs.get("parser_config", {"chunk_token_num": 128, "delimer": "\n。;!?"})
|
61 |
+
cks = naive_merge(sections, parser_config["chunk_token_num"], parser_config["delimer"])
|
62 |
eng = is_english(cks)
|
63 |
res = []
|
64 |
# wrap up to es documents
|
rag/app/qa.py
CHANGED
@@ -24,31 +24,45 @@ class Excel(object):
|
|
24 |
for i, r in enumerate(rows):
|
25 |
q, a = "", ""
|
26 |
for cell in r:
|
27 |
-
if not cell.value:
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
if len(res) % 999 == 0:
|
34 |
-
callback(len(res)*
|
|
|
|
|
|
|
|
|
35 |
|
36 |
callback(0.6, ("Extract Q&A: {}. ".format(len(res)) + (
|
37 |
f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
|
38 |
-
self.is_english = is_english(
|
|
|
39 |
return res
|
40 |
|
41 |
|
42 |
def rmPrefix(txt):
|
43 |
-
return re.sub(
|
|
|
44 |
|
45 |
|
46 |
def beAdoc(d, q, a, eng):
|
47 |
qprefix = "Question: " if eng else "问题:"
|
48 |
aprefix = "Answer: " if eng else "回答:"
|
49 |
-
d["content_with_weight"] = "\t".join(
|
|
|
50 |
if eng:
|
51 |
-
d["content_ltks"] = " ".join([stemmer.stem(w)
|
|
|
52 |
else:
|
53 |
d["content_ltks"] = huqie.qie(q)
|
54 |
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
@@ -61,7 +75,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
61 |
if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
62 |
callback(0.1, "Start to parse.")
|
63 |
excel_parser = Excel()
|
64 |
-
for q,a in excel_parser(filename, binary, callback):
|
65 |
res.append(beAdoc({}, q, a, excel_parser.is_english))
|
66 |
return res
|
67 |
elif re.search(r"\.(txt|csv)$", filename, re.IGNORECASE):
|
@@ -73,7 +87,8 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
73 |
with open(filename, "r") as f:
|
74 |
while True:
|
75 |
l = f.readline()
|
76 |
-
if not l:
|
|
|
77 |
txt += l
|
78 |
lines = txt.split("\n")
|
79 |
eng = is_english([rmPrefix(l) for l in lines[:100]])
|
@@ -93,12 +108,13 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
93 |
|
94 |
return res
|
95 |
|
96 |
-
raise NotImplementedError(
|
|
|
97 |
|
98 |
|
99 |
-
if __name__== "__main__":
|
100 |
import sys
|
|
|
101 |
def dummy(a, b):
|
102 |
pass
|
103 |
chunk(sys.argv[1], callback=dummy)
|
104 |
-
|
|
|
24 |
for i, r in enumerate(rows):
|
25 |
q, a = "", ""
|
26 |
for cell in r:
|
27 |
+
if not cell.value:
|
28 |
+
continue
|
29 |
+
if not q:
|
30 |
+
q = str(cell.value)
|
31 |
+
elif not a:
|
32 |
+
a = str(cell.value)
|
33 |
+
else:
|
34 |
+
break
|
35 |
+
if q and a:
|
36 |
+
res.append((q, a))
|
37 |
+
else:
|
38 |
+
fails.append(str(i + 1))
|
39 |
if len(res) % 999 == 0:
|
40 |
+
callback(len(res) *
|
41 |
+
0.6 /
|
42 |
+
total, ("Extract Q&A: {}".format(len(res)) +
|
43 |
+
(f"{len(fails)} failure, line: %s..." %
|
44 |
+
(",".join(fails[:3])) if fails else "")))
|
45 |
|
46 |
callback(0.6, ("Extract Q&A: {}. ".format(len(res)) + (
|
47 |
f"{len(fails)} failure, line: %s..." % (",".join(fails[:3])) if fails else "")))
|
48 |
+
self.is_english = is_english(
|
49 |
+
[rmPrefix(q) for q, _ in random_choices(res, k=30) if len(q) > 1])
|
50 |
return res
|
51 |
|
52 |
|
53 |
def rmPrefix(txt):
|
54 |
+
return re.sub(
|
55 |
+
r"^(问题|答案|回答|user|assistant|Q|A|Question|Answer|问|答)[\t:: ]+", "", txt.strip(), flags=re.IGNORECASE)
|
56 |
|
57 |
|
58 |
def beAdoc(d, q, a, eng):
|
59 |
qprefix = "Question: " if eng else "问题:"
|
60 |
aprefix = "Answer: " if eng else "回答:"
|
61 |
+
d["content_with_weight"] = "\t".join(
|
62 |
+
[qprefix + rmPrefix(q), aprefix + rmPrefix(a)])
|
63 |
if eng:
|
64 |
+
d["content_ltks"] = " ".join([stemmer.stem(w)
|
65 |
+
for w in word_tokenize(q)])
|
66 |
else:
|
67 |
d["content_ltks"] = huqie.qie(q)
|
68 |
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
|
|
75 |
if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
76 |
callback(0.1, "Start to parse.")
|
77 |
excel_parser = Excel()
|
78 |
+
for q, a in excel_parser(filename, binary, callback):
|
79 |
res.append(beAdoc({}, q, a, excel_parser.is_english))
|
80 |
return res
|
81 |
elif re.search(r"\.(txt|csv)$", filename, re.IGNORECASE):
|
|
|
87 |
with open(filename, "r") as f:
|
88 |
while True:
|
89 |
l = f.readline()
|
90 |
+
if not l:
|
91 |
+
break
|
92 |
txt += l
|
93 |
lines = txt.split("\n")
|
94 |
eng = is_english([rmPrefix(l) for l in lines[:100]])
|
|
|
108 |
|
109 |
return res
|
110 |
|
111 |
+
raise NotImplementedError(
|
112 |
+
"file type not supported yet(pptx, pdf supported)")
|
113 |
|
114 |
|
115 |
+
if __name__ == "__main__":
|
116 |
import sys
|
117 |
+
|
118 |
def dummy(a, b):
|
119 |
pass
|
120 |
chunk(sys.argv[1], callback=dummy)
|
|
rag/app/resume.py
CHANGED
@@ -11,15 +11,22 @@ from rag.utils import rmSpace
|
|
11 |
|
12 |
|
13 |
def chunk(filename, binary=None, callback=None, **kwargs):
|
14 |
-
if not re.search(r"\.(pdf|doc|docx|txt)$", filename, flags=re.IGNORECASE):
|
|
|
15 |
|
16 |
url = os.environ.get("INFINIFLOW_SERVER")
|
17 |
-
if not url:
|
|
|
|
|
18 |
token = os.environ.get("INFINIFLOW_TOKEN")
|
19 |
-
if not token:
|
|
|
|
|
20 |
|
21 |
if not binary:
|
22 |
-
with open(filename, "rb") as f:
|
|
|
|
|
23 |
def remote_call():
|
24 |
nonlocal filename, binary
|
25 |
for _ in range(3):
|
@@ -27,14 +34,17 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
27 |
res = requests.post(url + "/v1/layout/resume/", files=[(filename, binary)],
|
28 |
headers={"Authorization": token}, timeout=180)
|
29 |
res = res.json()
|
30 |
-
if res["retcode"] != 0:
|
|
|
31 |
return res["data"]
|
32 |
except RuntimeError as e:
|
33 |
raise e
|
34 |
except Exception as e:
|
35 |
cron_logger.error("resume parsing:" + str(e))
|
36 |
|
|
|
37 |
resume = remote_call()
|
|
|
38 |
print(json.dumps(resume, ensure_ascii=False, indent=2))
|
39 |
|
40 |
field_map = {
|
@@ -45,19 +55,19 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
45 |
"email_tks": "email/e-mail/邮箱",
|
46 |
"position_name_tks": "职位/职能/岗位/职责",
|
47 |
"expect_position_name_tks": "期望职位/期望职能/期望岗位",
|
48 |
-
|
49 |
"hightest_degree_kwd": "最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
|
50 |
"first_degree_kwd": "第一学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
|
51 |
"first_major_tks": "第一学历专业",
|
52 |
"first_school_name_tks": "第一学历毕业学校",
|
53 |
"edu_first_fea_kwd": "第一学历标签(211,留学,双一流,985,海外知名,重点大学,中专,专升本,专科,本科,大专)",
|
54 |
-
|
55 |
"degree_kwd": "过往学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
|
56 |
"major_tks": "学过的专业/过往专业",
|
57 |
"school_name_tks": "学校/毕业院校",
|
58 |
"sch_rank_kwd": "学校标签(顶尖学校,精英学校,优质学校,一般学校)",
|
59 |
"edu_fea_kwd": "教育标签(211,留学,双一流,985,海外知名,重点大学,中专,专升本,专科,本科,大专)",
|
60 |
-
|
61 |
"work_exp_flt": "工作年限/工作年份/N年经验/毕业了多少年",
|
62 |
"birth_dt": "生日/出生年份",
|
63 |
"corp_nm_tks": "就职过的公司/之前的公司/上过班的公司",
|
@@ -69,34 +79,43 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
69 |
titles = []
|
70 |
for n in ["name_kwd", "gender_kwd", "position_name_tks", "age_int"]:
|
71 |
v = resume.get(n, "")
|
72 |
-
if isinstance(v, list):
|
73 |
-
|
|
|
|
|
74 |
titles.append(str(v))
|
75 |
doc = {
|
76 |
"docnm_kwd": filename,
|
77 |
-
"title_tks": huqie.qie("-".join(titles)+"-简历")
|
78 |
}
|
79 |
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
80 |
pairs = []
|
81 |
-
for n,m in field_map.items():
|
82 |
-
if not resume.get(n):
|
|
|
83 |
v = resume[n]
|
84 |
-
if isinstance(v, list):
|
85 |
-
|
|
|
|
|
86 |
pairs.append((m, str(v)))
|
87 |
|
88 |
-
doc["content_with_weight"] = "\n".join(
|
|
|
89 |
doc["content_ltks"] = huqie.qie(doc["content_with_weight"])
|
90 |
doc["content_sm_ltks"] = huqie.qieqie(doc["content_ltks"])
|
91 |
-
for n, _ in field_map.items():
|
|
|
92 |
|
93 |
print(doc)
|
94 |
-
KnowledgebaseService.update_parser_config(
|
|
|
95 |
return [doc]
|
96 |
|
97 |
|
98 |
if __name__ == "__main__":
|
99 |
import sys
|
|
|
100 |
def dummy(a, b):
|
101 |
pass
|
102 |
chunk(sys.argv[1], callback=dummy)
|
|
|
11 |
|
12 |
|
13 |
def chunk(filename, binary=None, callback=None, **kwargs):
|
14 |
+
if not re.search(r"\.(pdf|doc|docx|txt)$", filename, flags=re.IGNORECASE):
|
15 |
+
raise NotImplementedError("file type not supported yet(pdf supported)")
|
16 |
|
17 |
url = os.environ.get("INFINIFLOW_SERVER")
|
18 |
+
if not url:
|
19 |
+
raise EnvironmentError(
|
20 |
+
"Please set environment variable: 'INFINIFLOW_SERVER'")
|
21 |
token = os.environ.get("INFINIFLOW_TOKEN")
|
22 |
+
if not token:
|
23 |
+
raise EnvironmentError(
|
24 |
+
"Please set environment variable: 'INFINIFLOW_TOKEN'")
|
25 |
|
26 |
if not binary:
|
27 |
+
with open(filename, "rb") as f:
|
28 |
+
binary = f.read()
|
29 |
+
|
30 |
def remote_call():
|
31 |
nonlocal filename, binary
|
32 |
for _ in range(3):
|
|
|
34 |
res = requests.post(url + "/v1/layout/resume/", files=[(filename, binary)],
|
35 |
headers={"Authorization": token}, timeout=180)
|
36 |
res = res.json()
|
37 |
+
if res["retcode"] != 0:
|
38 |
+
raise RuntimeError(res["retmsg"])
|
39 |
return res["data"]
|
40 |
except RuntimeError as e:
|
41 |
raise e
|
42 |
except Exception as e:
|
43 |
cron_logger.error("resume parsing:" + str(e))
|
44 |
|
45 |
+
callback(0.2, "Resume parsing is going on...")
|
46 |
resume = remote_call()
|
47 |
+
callback(0.6, "Done parsing. Chunking...")
|
48 |
print(json.dumps(resume, ensure_ascii=False, indent=2))
|
49 |
|
50 |
field_map = {
|
|
|
55 |
"email_tks": "email/e-mail/邮箱",
|
56 |
"position_name_tks": "职位/职能/岗位/职责",
|
57 |
"expect_position_name_tks": "期望职位/期望职能/期望岗位",
|
58 |
+
|
59 |
"hightest_degree_kwd": "最高学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
|
60 |
"first_degree_kwd": "第一学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
|
61 |
"first_major_tks": "第一学历专业",
|
62 |
"first_school_name_tks": "第一学历毕业学校",
|
63 |
"edu_first_fea_kwd": "第一学历标签(211,留学,双一流,985,海外知名,重点大学,中专,专升本,专科,本科,大专)",
|
64 |
+
|
65 |
"degree_kwd": "过往学历(高中,职高,硕士,本科,博士,初中,中技,中专,专科,专升本,MPA,MBA,EMBA)",
|
66 |
"major_tks": "学过的专业/过往专业",
|
67 |
"school_name_tks": "学校/毕业院校",
|
68 |
"sch_rank_kwd": "学校标签(顶尖学校,精英学校,优质学校,一般学校)",
|
69 |
"edu_fea_kwd": "教育标签(211,留学,双一流,985,海外知名,重点大学,中专,专升本,专科,本科,大专)",
|
70 |
+
|
71 |
"work_exp_flt": "工作年限/工作年份/N年经验/毕业了多少年",
|
72 |
"birth_dt": "生日/出生年份",
|
73 |
"corp_nm_tks": "就职过的公司/之前的公司/上过班的公司",
|
|
|
79 |
titles = []
|
80 |
for n in ["name_kwd", "gender_kwd", "position_name_tks", "age_int"]:
|
81 |
v = resume.get(n, "")
|
82 |
+
if isinstance(v, list):
|
83 |
+
v = v[0]
|
84 |
+
if n.find("tks") > 0:
|
85 |
+
v = rmSpace(v)
|
86 |
titles.append(str(v))
|
87 |
doc = {
|
88 |
"docnm_kwd": filename,
|
89 |
+
"title_tks": huqie.qie("-".join(titles) + "-简历")
|
90 |
}
|
91 |
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
92 |
pairs = []
|
93 |
+
for n, m in field_map.items():
|
94 |
+
if not resume.get(n):
|
95 |
+
continue
|
96 |
v = resume[n]
|
97 |
+
if isinstance(v, list):
|
98 |
+
v = " ".join(v)
|
99 |
+
if n.find("tks") > 0:
|
100 |
+
v = rmSpace(v)
|
101 |
pairs.append((m, str(v)))
|
102 |
|
103 |
+
doc["content_with_weight"] = "\n".join(
|
104 |
+
["{}: {}".format(re.sub(r"([^()]+)", "", k), v) for k, v in pairs])
|
105 |
doc["content_ltks"] = huqie.qie(doc["content_with_weight"])
|
106 |
doc["content_sm_ltks"] = huqie.qieqie(doc["content_ltks"])
|
107 |
+
for n, _ in field_map.items():
|
108 |
+
doc[n] = resume[n]
|
109 |
|
110 |
print(doc)
|
111 |
+
KnowledgebaseService.update_parser_config(
|
112 |
+
kwargs["kb_id"], {"field_map": field_map})
|
113 |
return [doc]
|
114 |
|
115 |
|
116 |
if __name__ == "__main__":
|
117 |
import sys
|
118 |
+
|
119 |
def dummy(a, b):
|
120 |
pass
|
121 |
chunk(sys.argv[1], callback=dummy)
|
rag/app/table.py
CHANGED
@@ -28,10 +28,15 @@ class Excel(object):
|
|
28 |
rows = list(ws.rows)
|
29 |
headers = [cell.value for cell in rows[0]]
|
30 |
missed = set([i for i, h in enumerate(headers) if h is None])
|
31 |
-
headers = [
|
|
|
|
|
|
|
32 |
data = []
|
33 |
for i, r in enumerate(rows[1:]):
|
34 |
-
row = [
|
|
|
|
|
35 |
if len(row) != len(headers):
|
36 |
fails.append(str(i))
|
37 |
continue
|
@@ -55,8 +60,10 @@ def trans_datatime(s):
|
|
55 |
|
56 |
|
57 |
def trans_bool(s):
|
58 |
-
if re.match(r"(true|yes|是)$", str(s).strip(), flags=re.IGNORECASE):
|
59 |
-
|
|
|
|
|
60 |
|
61 |
|
62 |
def column_data_type(arr):
|
@@ -65,7 +72,8 @@ def column_data_type(arr):
|
|
65 |
trans = {t: f for f, t in
|
66 |
[(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
|
67 |
for a in arr:
|
68 |
-
if a is None:
|
|
|
69 |
if re.match(r"[+-]?[0-9]+(\.0+)?$", str(a).replace("%%", "")):
|
70 |
counts["int"] += 1
|
71 |
elif re.match(r"[+-]?[0-9.]+$", str(a).replace("%%", "")):
|
@@ -79,7 +87,8 @@ def column_data_type(arr):
|
|
79 |
counts = sorted(counts.items(), key=lambda x: x[1] * -1)
|
80 |
ty = counts[0][0]
|
81 |
for i in range(len(arr)):
|
82 |
-
if arr[i] is None:
|
|
|
83 |
try:
|
84 |
arr[i] = trans[ty](str(arr[i]))
|
85 |
except Exception as e:
|
@@ -105,7 +114,8 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
105 |
with open(filename, "r") as f:
|
106 |
while True:
|
107 |
l = f.readline()
|
108 |
-
if not l:
|
|
|
109 |
txt += l
|
110 |
lines = txt.split("\n")
|
111 |
fails = []
|
@@ -127,14 +137,22 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
127 |
dfs = [pd.DataFrame(np.array(rows), columns=headers)]
|
128 |
|
129 |
else:
|
130 |
-
raise NotImplementedError(
|
|
|
131 |
|
132 |
res = []
|
133 |
PY = Pinyin()
|
134 |
-
fieds_map = {
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
for df in dfs:
|
136 |
for n in ["id", "_id", "index", "idx"]:
|
137 |
-
if n in df.columns:
|
|
|
138 |
clmns = df.columns.values
|
139 |
txts = list(copy.deepcopy(clmns))
|
140 |
py_clmns = [PY.get_pinyins(n)[0].replace("-", "_") for n in clmns]
|
@@ -143,23 +161,29 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
143 |
cln, ty = column_data_type(df[clmns[j]])
|
144 |
clmn_tys.append(ty)
|
145 |
df[clmns[j]] = cln
|
146 |
-
if ty == "text":
|
147 |
-
|
|
|
|
|
148 |
|
149 |
eng = is_english(txts)
|
150 |
for ii, row in df.iterrows():
|
151 |
d = {}
|
152 |
row_txt = []
|
153 |
for j in range(len(clmns)):
|
154 |
-
if row[clmns[j]] is None:
|
|
|
155 |
fld = clmns_map[j][0]
|
156 |
-
d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else huqie.qie(
|
|
|
157 |
row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
|
158 |
-
if not row_txt:
|
|
|
159 |
tokenize(d, "; ".join(row_txt), eng)
|
160 |
res.append(d)
|
161 |
|
162 |
-
KnowledgebaseService.update_parser_config(
|
|
|
163 |
callback(0.6, "")
|
164 |
|
165 |
return res
|
@@ -168,9 +192,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
|
|
168 |
if __name__ == "__main__":
|
169 |
import sys
|
170 |
|
171 |
-
|
172 |
def dummy(a, b):
|
173 |
pass
|
174 |
|
175 |
-
|
176 |
chunk(sys.argv[1], callback=dummy)
|
|
|
28 |
rows = list(ws.rows)
|
29 |
headers = [cell.value for cell in rows[0]]
|
30 |
missed = set([i for i, h in enumerate(headers) if h is None])
|
31 |
+
headers = [
|
32 |
+
cell.value for i,
|
33 |
+
cell in enumerate(
|
34 |
+
rows[0]) if i not in missed]
|
35 |
data = []
|
36 |
for i, r in enumerate(rows[1:]):
|
37 |
+
row = [
|
38 |
+
cell.value for ii,
|
39 |
+
cell in enumerate(r) if ii not in missed]
|
40 |
if len(row) != len(headers):
|
41 |
fails.append(str(i))
|
42 |
continue
|
|
|
60 |
|
61 |
|
62 |
def trans_bool(s):
|
63 |
+
if re.match(r"(true|yes|是)$", str(s).strip(), flags=re.IGNORECASE):
|
64 |
+
return ["yes", "是"]
|
65 |
+
if re.match(r"(false|no|否)$", str(s).strip(), flags=re.IGNORECASE):
|
66 |
+
return ["no", "否"]
|
67 |
|
68 |
|
69 |
def column_data_type(arr):
|
|
|
72 |
trans = {t: f for f, t in
|
73 |
[(int, "int"), (float, "float"), (trans_datatime, "datetime"), (trans_bool, "bool"), (str, "text")]}
|
74 |
for a in arr:
|
75 |
+
if a is None:
|
76 |
+
continue
|
77 |
if re.match(r"[+-]?[0-9]+(\.0+)?$", str(a).replace("%%", "")):
|
78 |
counts["int"] += 1
|
79 |
elif re.match(r"[+-]?[0-9.]+$", str(a).replace("%%", "")):
|
|
|
87 |
counts = sorted(counts.items(), key=lambda x: x[1] * -1)
|
88 |
ty = counts[0][0]
|
89 |
for i in range(len(arr)):
|
90 |
+
if arr[i] is None:
|
91 |
+
continue
|
92 |
try:
|
93 |
arr[i] = trans[ty](str(arr[i]))
|
94 |
except Exception as e:
|
|
|
114 |
with open(filename, "r") as f:
|
115 |
while True:
|
116 |
l = f.readline()
|
117 |
+
if not l:
|
118 |
+
break
|
119 |
txt += l
|
120 |
lines = txt.split("\n")
|
121 |
fails = []
|
|
|
137 |
dfs = [pd.DataFrame(np.array(rows), columns=headers)]
|
138 |
|
139 |
else:
|
140 |
+
raise NotImplementedError(
|
141 |
+
"file type not supported yet(excel, text, csv supported)")
|
142 |
|
143 |
res = []
|
144 |
PY = Pinyin()
|
145 |
+
fieds_map = {
|
146 |
+
"text": "_tks",
|
147 |
+
"int": "_int",
|
148 |
+
"keyword": "_kwd",
|
149 |
+
"float": "_flt",
|
150 |
+
"datetime": "_dt",
|
151 |
+
"bool": "_kwd"}
|
152 |
for df in dfs:
|
153 |
for n in ["id", "_id", "index", "idx"]:
|
154 |
+
if n in df.columns:
|
155 |
+
del df[n]
|
156 |
clmns = df.columns.values
|
157 |
txts = list(copy.deepcopy(clmns))
|
158 |
py_clmns = [PY.get_pinyins(n)[0].replace("-", "_") for n in clmns]
|
|
|
161 |
cln, ty = column_data_type(df[clmns[j]])
|
162 |
clmn_tys.append(ty)
|
163 |
df[clmns[j]] = cln
|
164 |
+
if ty == "text":
|
165 |
+
txts.extend([str(c) for c in cln if c])
|
166 |
+
clmns_map = [(py_clmns[j] + fieds_map[clmn_tys[j]], clmns[j])
|
167 |
+
for i in range(len(clmns))]
|
168 |
|
169 |
eng = is_english(txts)
|
170 |
for ii, row in df.iterrows():
|
171 |
d = {}
|
172 |
row_txt = []
|
173 |
for j in range(len(clmns)):
|
174 |
+
if row[clmns[j]] is None:
|
175 |
+
continue
|
176 |
fld = clmns_map[j][0]
|
177 |
+
d[fld] = row[clmns[j]] if clmn_tys[j] != "text" else huqie.qie(
|
178 |
+
row[clmns[j]])
|
179 |
row_txt.append("{}:{}".format(clmns[j], row[clmns[j]]))
|
180 |
+
if not row_txt:
|
181 |
+
continue
|
182 |
tokenize(d, "; ".join(row_txt), eng)
|
183 |
res.append(d)
|
184 |
|
185 |
+
KnowledgebaseService.update_parser_config(
|
186 |
+
kwargs["kb_id"], {"field_map": {k: v for k, v in clmns_map}})
|
187 |
callback(0.6, "")
|
188 |
|
189 |
return res
|
|
|
192 |
if __name__ == "__main__":
|
193 |
import sys
|
194 |
|
|
|
195 |
def dummy(a, b):
|
196 |
pass
|
197 |
|
|
|
198 |
chunk(sys.argv[1], callback=dummy)
|
rag/llm/chat_model.py
CHANGED
@@ -58,3 +58,21 @@ class QWenChat(Base):
|
|
58 |
if response.status_code == HTTPStatus.OK:
|
59 |
return response.output.choices[0]['message']['content'], response.usage.output_tokens
|
60 |
return response.message, 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
if response.status_code == HTTPStatus.OK:
|
59 |
return response.output.choices[0]['message']['content'], response.usage.output_tokens
|
60 |
return response.message, 0
|
61 |
+
|
62 |
+
|
63 |
+
from zhipuai import ZhipuAI
|
64 |
+
class ZhipuChat(Base):
|
65 |
+
def __init__(self, key, model_name="glm-3-turbo"):
|
66 |
+
self.client = ZhipuAI(api_key=key)
|
67 |
+
self.model_name = model_name
|
68 |
+
|
69 |
+
def chat(self, system, history, gen_conf):
|
70 |
+
from http import HTTPStatus
|
71 |
+
history.insert(0, {"role": "system", "content": system})
|
72 |
+
response = self.client.chat.completions.create(
|
73 |
+
self.model_name,
|
74 |
+
messages=history
|
75 |
+
)
|
76 |
+
if response.status_code == HTTPStatus.OK:
|
77 |
+
return response.output.choices[0]['message']['content'], response.usage.completion_tokens
|
78 |
+
return response.message, 0
|
rag/llm/cv_model.py
CHANGED
@@ -61,7 +61,7 @@ class Base(ABC):
|
|
61 |
|
62 |
class GptV4(Base):
|
63 |
def __init__(self, key, model_name="gpt-4-vision-preview"):
|
64 |
-
self.client = OpenAI(api_key
|
65 |
self.model_name = model_name
|
66 |
|
67 |
def describe(self, image, max_tokens=300):
|
@@ -89,3 +89,22 @@ class QWenCV(Base):
|
|
89 |
if response.status_code == HTTPStatus.OK:
|
90 |
return response.output.choices[0]['message']['content'], response.usage.output_tokens
|
91 |
return response.message, 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
class GptV4(Base):
|
63 |
def __init__(self, key, model_name="gpt-4-vision-preview"):
|
64 |
+
self.client = OpenAI(api_key=key)
|
65 |
self.model_name = model_name
|
66 |
|
67 |
def describe(self, image, max_tokens=300):
|
|
|
89 |
if response.status_code == HTTPStatus.OK:
|
90 |
return response.output.choices[0]['message']['content'], response.usage.output_tokens
|
91 |
return response.message, 0
|
92 |
+
|
93 |
+
|
94 |
+
from zhipuai import ZhipuAI
|
95 |
+
|
96 |
+
|
97 |
+
class Zhipu4V(Base):
|
98 |
+
def __init__(self, key, model_name="glm-4v"):
|
99 |
+
self.client = ZhipuAI(api_key=key)
|
100 |
+
self.model_name = model_name
|
101 |
+
|
102 |
+
def describe(self, image, max_tokens=1024):
|
103 |
+
b64 = self.image2base64(image)
|
104 |
+
|
105 |
+
res = self.client.chat.completions.create(
|
106 |
+
model=self.model_name,
|
107 |
+
messages=self.prompt(b64),
|
108 |
+
max_tokens=max_tokens,
|
109 |
+
)
|
110 |
+
return res.choices[0].message.content.strip(), res.usage.total_tokens
|
rag/llm/embedding_model.py
CHANGED
@@ -19,7 +19,6 @@ import dashscope
|
|
19 |
from openai import OpenAI
|
20 |
from FlagEmbedding import FlagModel
|
21 |
import torch
|
22 |
-
import os
|
23 |
import numpy as np
|
24 |
|
25 |
from rag.utils import num_tokens_from_string
|
@@ -114,4 +113,21 @@ class QWenEmbed(Base):
|
|
114 |
input=text[:2048],
|
115 |
text_type="query"
|
116 |
)
|
117 |
-
return np.array(resp["output"]["embeddings"][0]["embedding"]), resp["usage"]["input_tokens"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
from openai import OpenAI
|
20 |
from FlagEmbedding import FlagModel
|
21 |
import torch
|
|
|
22 |
import numpy as np
|
23 |
|
24 |
from rag.utils import num_tokens_from_string
|
|
|
113 |
input=text[:2048],
|
114 |
text_type="query"
|
115 |
)
|
116 |
+
return np.array(resp["output"]["embeddings"][0]["embedding"]), resp["usage"]["input_tokens"]
|
117 |
+
|
118 |
+
|
119 |
+
from zhipuai import ZhipuAI
|
120 |
+
class ZhipuEmbed(Base):
|
121 |
+
def __init__(self, key, model_name="embedding-2"):
|
122 |
+
self.client = ZhipuAI(api_key=key)
|
123 |
+
self.model_name = model_name
|
124 |
+
|
125 |
+
def encode(self, texts: list, batch_size=32):
|
126 |
+
res = self.client.embeddings.create(input=texts,
|
127 |
+
model=self.model_name)
|
128 |
+
return np.array([d.embedding for d in res.data]), res.usage.total_tokens
|
129 |
+
|
130 |
+
def encode_queries(self, text):
|
131 |
+
res = self.client.embeddings.create(input=text,
|
132 |
+
model=self.model_name)
|
133 |
+
return np.array(res["data"][0]["embedding"]), res.usage.total_tokens
|
rag/nlp/search.py
CHANGED
@@ -268,9 +268,9 @@ class Dealer:
|
|
268 |
dim = len(sres.query_vector)
|
269 |
start_idx = (page - 1) * page_size
|
270 |
for i in idx:
|
271 |
-
ranks["total"] += 1
|
272 |
if sim[i] < similarity_threshold:
|
273 |
break
|
|
|
274 |
start_idx -= 1
|
275 |
if start_idx >= 0:
|
276 |
continue
|
@@ -280,6 +280,7 @@ class Dealer:
|
|
280 |
break
|
281 |
id = sres.ids[i]
|
282 |
dnm = sres.field[id]["docnm_kwd"]
|
|
|
283 |
d = {
|
284 |
"chunk_id": id,
|
285 |
"content_ltks": sres.field[id]["content_ltks"],
|
@@ -296,8 +297,9 @@ class Dealer:
|
|
296 |
}
|
297 |
ranks["chunks"].append(d)
|
298 |
if dnm not in ranks["doc_aggs"]:
|
299 |
-
ranks["doc_aggs"][dnm] = 0
|
300 |
-
ranks["doc_aggs"][dnm] += 1
|
|
|
301 |
|
302 |
return ranks
|
303 |
|
|
|
268 |
dim = len(sres.query_vector)
|
269 |
start_idx = (page - 1) * page_size
|
270 |
for i in idx:
|
|
|
271 |
if sim[i] < similarity_threshold:
|
272 |
break
|
273 |
+
ranks["total"] += 1
|
274 |
start_idx -= 1
|
275 |
if start_idx >= 0:
|
276 |
continue
|
|
|
280 |
break
|
281 |
id = sres.ids[i]
|
282 |
dnm = sres.field[id]["docnm_kwd"]
|
283 |
+
did = sres.field[id]["doc_id"]
|
284 |
d = {
|
285 |
"chunk_id": id,
|
286 |
"content_ltks": sres.field[id]["content_ltks"],
|
|
|
297 |
}
|
298 |
ranks["chunks"].append(d)
|
299 |
if dnm not in ranks["doc_aggs"]:
|
300 |
+
ranks["doc_aggs"][dnm] = {"doc_id": did, "count": 0}
|
301 |
+
ranks["doc_aggs"][dnm]["count"] += 1
|
302 |
+
ranks["doc_aggs"] = [{"doc_name": k, "doc_id": v["doc_id"], "count": v["count"]} for k,v in sorted(ranks["doc_aggs"].items(), key=lambda x:x[1]["count"]*-1)]
|
303 |
|
304 |
return ranks
|
305 |
|
rag/svr/task_executor.py
CHANGED
@@ -36,7 +36,7 @@ from rag.nlp import search
|
|
36 |
from io import BytesIO
|
37 |
import pandas as pd
|
38 |
|
39 |
-
from rag.app import laws, paper, presentation, manual, qa, table,book
|
40 |
|
41 |
from api.db import LLMType, ParserType
|
42 |
from api.db.services.document_service import DocumentService
|
@@ -55,6 +55,7 @@ FACTORY = {
|
|
55 |
ParserType.LAWS.value: laws,
|
56 |
ParserType.QA.value: qa,
|
57 |
ParserType.TABLE.value: table,
|
|
|
58 |
}
|
59 |
|
60 |
|
@@ -119,7 +120,7 @@ def build(row, cvmdl):
|
|
119 |
try:
|
120 |
cron_logger.info("Chunkking {}/{}".format(row["location"], row["name"]))
|
121 |
cks = chunker.chunk(row["name"], MINIO.get(row["kb_id"], row["location"]), row["from_page"], row["to_page"],
|
122 |
-
callback, kb_id=row["kb_id"])
|
123 |
except Exception as e:
|
124 |
if re.search("(No such file|not found)", str(e)):
|
125 |
callback(-1, "Can not find file <%s>" % row["doc_name"])
|
@@ -171,7 +172,7 @@ def init_kb(row):
|
|
171 |
open(os.path.join(get_project_base_directory(), "conf", "mapping.json"), "r")))
|
172 |
|
173 |
|
174 |
-
def embedding(docs, mdl):
|
175 |
tts, cnts = [rmSpace(d["title_tks"]) for d in docs if d.get("title_tks")], [d["content_with_weight"] for d in docs]
|
176 |
tk_count = 0
|
177 |
if len(tts) == len(cnts):
|
@@ -180,7 +181,8 @@ def embedding(docs, mdl):
|
|
180 |
|
181 |
cnts, c = mdl.encode(cnts)
|
182 |
tk_count += c
|
183 |
-
|
|
|
184 |
|
185 |
assert len(vects) == len(docs)
|
186 |
for i, d in enumerate(docs):
|
@@ -216,7 +218,7 @@ def main(comm, mod):
|
|
216 |
# TODO: exception handler
|
217 |
## set_progress(r["did"], -1, "ERROR: ")
|
218 |
try:
|
219 |
-
tk_count = embedding(cks, embd_mdl)
|
220 |
except Exception as e:
|
221 |
callback(-1, "Embedding error:{}".format(str(e)))
|
222 |
cron_logger.error(str(e))
|
|
|
36 |
from io import BytesIO
|
37 |
import pandas as pd
|
38 |
|
39 |
+
from rag.app import laws, paper, presentation, manual, qa, table, book, resume
|
40 |
|
41 |
from api.db import LLMType, ParserType
|
42 |
from api.db.services.document_service import DocumentService
|
|
|
55 |
ParserType.LAWS.value: laws,
|
56 |
ParserType.QA.value: qa,
|
57 |
ParserType.TABLE.value: table,
|
58 |
+
ParserType.RESUME.value: resume,
|
59 |
}
|
60 |
|
61 |
|
|
|
120 |
try:
|
121 |
cron_logger.info("Chunkking {}/{}".format(row["location"], row["name"]))
|
122 |
cks = chunker.chunk(row["name"], MINIO.get(row["kb_id"], row["location"]), row["from_page"], row["to_page"],
|
123 |
+
callback, kb_id=row["kb_id"], parser_config=row["parser_config"])
|
124 |
except Exception as e:
|
125 |
if re.search("(No such file|not found)", str(e)):
|
126 |
callback(-1, "Can not find file <%s>" % row["doc_name"])
|
|
|
172 |
open(os.path.join(get_project_base_directory(), "conf", "mapping.json"), "r")))
|
173 |
|
174 |
|
175 |
+
def embedding(docs, mdl, parser_config={}):
|
176 |
tts, cnts = [rmSpace(d["title_tks"]) for d in docs if d.get("title_tks")], [d["content_with_weight"] for d in docs]
|
177 |
tk_count = 0
|
178 |
if len(tts) == len(cnts):
|
|
|
181 |
|
182 |
cnts, c = mdl.encode(cnts)
|
183 |
tk_count += c
|
184 |
+
title_w = float(parser_config.get("filename_embd_weight", 0.1))
|
185 |
+
vects = (title_w * tts + (1-title_w) * cnts) if len(tts) == len(cnts) else cnts
|
186 |
|
187 |
assert len(vects) == len(docs)
|
188 |
for i, d in enumerate(docs):
|
|
|
218 |
# TODO: exception handler
|
219 |
## set_progress(r["did"], -1, "ERROR: ")
|
220 |
try:
|
221 |
+
tk_count = embedding(cks, embd_mdl, r["parser_config"])
|
222 |
except Exception as e:
|
223 |
callback(-1, "Embedding error:{}".format(str(e)))
|
224 |
cron_logger.error(str(e))
|