KevinHuSh
commited on
Commit
·
7f98e24
1
Parent(s):
69fc4f3
refine page ranges (#147)
Browse files- api/db/db_models.py +2 -2
- deepdoc/parser/pdf_parser.py +3 -3
- deepdoc/vision/layout_recognizer.py +3 -1
- rag/app/manual.py +5 -50
- rag/app/one.py +2 -3
- rag/app/paper.py +1 -1
- rag/app/presentation.py +2 -2
- rag/nlp/__init__.py +11 -4
- rag/svr/task_broker.py +5 -4
- rag/svr/task_executor.py +1 -1
- requirements.txt +133 -0
- web/src/pages/add-knowledge/components/knowledge-file/chunk-method-modal.tsx +1 -1
- web/src/pages/add-knowledge/components/knowledge-setting/utils.ts +1 -1
api/db/db_models.py
CHANGED
@@ -477,7 +477,7 @@ class Knowledgebase(DataBaseModel):
|
|
477 |
vector_similarity_weight = FloatField(default=0.3)
|
478 |
|
479 |
parser_id = CharField(max_length=32, null=False, help_text="default parser ID", default=ParserType.NAIVE.value)
|
480 |
-
parser_config = JSONField(null=False, default={"pages":[[
|
481 |
status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted,1: validate)", default="1")
|
482 |
|
483 |
def __str__(self):
|
@@ -492,7 +492,7 @@ class Document(DataBaseModel):
|
|
492 |
thumbnail = TextField(null=True, help_text="thumbnail base64 string")
|
493 |
kb_id = CharField(max_length=256, null=False, index=True)
|
494 |
parser_id = CharField(max_length=32, null=False, help_text="default parser ID")
|
495 |
-
parser_config = JSONField(null=False, default={"pages":[[
|
496 |
source_type = CharField(max_length=128, null=False, default="local", help_text="where dose this document from")
|
497 |
type = CharField(max_length=32, null=False, help_text="file extension")
|
498 |
created_by = CharField(max_length=32, null=False, help_text="who created it")
|
|
|
477 |
vector_similarity_weight = FloatField(default=0.3)
|
478 |
|
479 |
parser_id = CharField(max_length=32, null=False, help_text="default parser ID", default=ParserType.NAIVE.value)
|
480 |
+
parser_config = JSONField(null=False, default={"pages":[[1,1000000]]})
|
481 |
status = CharField(max_length=1, null=True, help_text="is it validate(0: wasted,1: validate)", default="1")
|
482 |
|
483 |
def __str__(self):
|
|
|
492 |
thumbnail = TextField(null=True, help_text="thumbnail base64 string")
|
493 |
kb_id = CharField(max_length=256, null=False, index=True)
|
494 |
parser_id = CharField(max_length=32, null=False, help_text="default parser ID")
|
495 |
+
parser_config = JSONField(null=False, default={"pages":[[1,1000000]]})
|
496 |
source_type = CharField(max_length=128, null=False, default="local", help_text="where dose this document from")
|
497 |
type = CharField(max_length=32, null=False, help_text="file extension")
|
498 |
created_by = CharField(max_length=32, null=False, help_text="who created it")
|
deepdoc/parser/pdf_parser.py
CHANGED
@@ -1074,15 +1074,15 @@ class HuParser:
|
|
1074 |
|
1075 |
|
1076 |
class PlainParser(object):
|
1077 |
-
def __call__(self, filename, **kwargs):
|
1078 |
self.outlines = []
|
1079 |
lines = []
|
1080 |
try:
|
1081 |
self.pdf = pdf2_read(filename if isinstance(filename, str) else BytesIO(filename))
|
1082 |
-
|
1083 |
-
for page in self.pdf.pages:
|
1084 |
lines.extend([t for t in page.extract_text().split("\n")])
|
1085 |
|
|
|
1086 |
def dfs(arr, depth):
|
1087 |
for a in arr:
|
1088 |
if isinstance(a, dict):
|
|
|
1074 |
|
1075 |
|
1076 |
class PlainParser(object):
|
1077 |
+
def __call__(self, filename, from_page=0, to_page=100000, **kwargs):
|
1078 |
self.outlines = []
|
1079 |
lines = []
|
1080 |
try:
|
1081 |
self.pdf = pdf2_read(filename if isinstance(filename, str) else BytesIO(filename))
|
1082 |
+
for page in self.pdf.pages[from_page:to_page]:
|
|
|
1083 |
lines.extend([t for t in page.extract_text().split("\n")])
|
1084 |
|
1085 |
+
outlines = self.pdf.outline
|
1086 |
def dfs(arr, depth):
|
1087 |
for a in arr:
|
1088 |
if isinstance(a, dict):
|
deepdoc/vision/layout_recognizer.py
CHANGED
@@ -15,6 +15,7 @@ import re
|
|
15 |
from collections import Counter
|
16 |
from copy import deepcopy
|
17 |
import numpy as np
|
|
|
18 |
|
19 |
from api.db import ParserType
|
20 |
from api.utils.file_utils import get_project_base_directory
|
@@ -36,7 +37,8 @@ class LayoutRecognizer(Recognizer):
|
|
36 |
"Equation",
|
37 |
]
|
38 |
def __init__(self, domain):
|
39 |
-
|
|
|
40 |
self.garbage_layouts = ["footer", "header", "reference"]
|
41 |
|
42 |
def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True):
|
|
|
15 |
from collections import Counter
|
16 |
from copy import deepcopy
|
17 |
import numpy as np
|
18 |
+
from huggingface_hub import snapshot_download
|
19 |
|
20 |
from api.db import ParserType
|
21 |
from api.utils.file_utils import get_project_base_directory
|
|
|
37 |
"Equation",
|
38 |
]
|
39 |
def __init__(self, domain):
|
40 |
+
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc")
|
41 |
+
super().__init__(self.labels, domain, model_dir)#os.path.join(get_project_base_directory(), "rag/res/deepdoc/"))
|
42 |
self.garbage_layouts = ["footer", "header", "reference"]
|
43 |
|
44 |
def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16, drop=True):
|
rag/app/manual.py
CHANGED
@@ -30,8 +30,6 @@ class Pdf(PdfParser):
|
|
30 |
# print(b)
|
31 |
print("OCR:", timer()-start)
|
32 |
|
33 |
-
|
34 |
-
|
35 |
self._layouts_rec(zoomin)
|
36 |
callback(0.65, "Layout analysis finished.")
|
37 |
print("paddle layouts:", timer() - start)
|
@@ -47,53 +45,8 @@ class Pdf(PdfParser):
|
|
47 |
for b in self.boxes:
|
48 |
b["text"] = re.sub(r"([\t ]|\u3000){2,}", " ", b["text"].strip())
|
49 |
|
50 |
-
return [(b["text"], b.get("layout_no", ""), self.get_position(b, zoomin)) for i, b in enumerate(self.boxes)]
|
51 |
-
|
52 |
-
# set pivot using the most frequent type of title,
|
53 |
-
# then merge between 2 pivot
|
54 |
-
if len(self.boxes)>0 and len(self.outlines)/len(self.boxes) > 0.1:
|
55 |
-
max_lvl = max([lvl for _, lvl in self.outlines])
|
56 |
-
most_level = max(0, max_lvl-1)
|
57 |
-
levels = []
|
58 |
-
for b in self.boxes:
|
59 |
-
for t,lvl in self.outlines:
|
60 |
-
tks = set([t[i]+t[i+1] for i in range(len(t)-1)])
|
61 |
-
tks_ = set([b["text"][i]+b["text"][i+1] for i in range(min(len(t), len(b["text"])-1))])
|
62 |
-
if len(set(tks & tks_))/max([len(tks), len(tks_), 1]) > 0.8:
|
63 |
-
levels.append(lvl)
|
64 |
-
break
|
65 |
-
else:
|
66 |
-
levels.append(max_lvl + 1)
|
67 |
-
else:
|
68 |
-
bull = bullets_category([b["text"] for b in self.boxes])
|
69 |
-
most_level, levels = title_frequency(bull, [(b["text"], b.get("layout_no","")) for b in self.boxes])
|
70 |
-
|
71 |
-
assert len(self.boxes) == len(levels)
|
72 |
-
sec_ids = []
|
73 |
-
sid = 0
|
74 |
-
for i, lvl in enumerate(levels):
|
75 |
-
if lvl <= most_level and i > 0 and lvl != levels[i-1]: sid += 1
|
76 |
-
sec_ids.append(sid)
|
77 |
-
#print(lvl, self.boxes[i]["text"], most_level, sid)
|
78 |
-
|
79 |
-
sections = [(b["text"], sec_ids[i], self.get_position(b, zoomin)) for i, b in enumerate(self.boxes)]
|
80 |
-
for (img, rows), poss in tbls:
|
81 |
-
sections.append((rows if isinstance(rows, str) else rows[0], -1, [(p[0]+1-from_page, p[1], p[2], p[3], p[4]) for p in poss]))
|
82 |
-
|
83 |
-
chunks = []
|
84 |
-
last_sid = -2
|
85 |
-
tk_cnt = 0
|
86 |
-
for txt, sec_id, poss in sorted(sections, key=lambda x: (x[-1][0][0], x[-1][0][3], x[-1][0][1])):
|
87 |
-
poss = "\t".join([tag(*pos) for pos in poss])
|
88 |
-
if tk_cnt < 2048 and (sec_id == last_sid or sec_id == -1):
|
89 |
-
if chunks:
|
90 |
-
chunks[-1] += "\n" + txt + poss
|
91 |
-
tk_cnt += num_tokens_from_string(txt)
|
92 |
-
continue
|
93 |
-
chunks.append(txt + poss)
|
94 |
-
tk_cnt = num_tokens_from_string(txt)
|
95 |
-
if sec_id >-1: last_sid = sec_id
|
96 |
-
return chunks, tbls
|
97 |
|
98 |
|
99 |
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
|
@@ -106,7 +59,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
|
106 |
pdf_parser = Pdf() if kwargs.get("parser_config",{}).get("layout_recognize", True) else PlainParser()
|
107 |
sections, tbls = pdf_parser(filename if not binary else binary,
|
108 |
from_page=from_page, to_page=to_page, callback=callback)
|
109 |
-
if sections and len(sections[0])<3:
|
|
|
110 |
else: raise NotImplementedError("file type not supported yet(pdf supported)")
|
111 |
doc = {
|
112 |
"docnm_kwd": filename
|
@@ -131,6 +85,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
|
131 |
break
|
132 |
else:
|
133 |
levels.append(max_lvl + 1)
|
|
|
134 |
else:
|
135 |
bull = bullets_category([txt for txt,_,_ in sections])
|
136 |
most_level, levels = title_frequency(bull, [(txt, l) for txt, l, poss in sections])
|
|
|
30 |
# print(b)
|
31 |
print("OCR:", timer()-start)
|
32 |
|
|
|
|
|
33 |
self._layouts_rec(zoomin)
|
34 |
callback(0.65, "Layout analysis finished.")
|
35 |
print("paddle layouts:", timer() - start)
|
|
|
45 |
for b in self.boxes:
|
46 |
b["text"] = re.sub(r"([\t ]|\u3000){2,}", " ", b["text"].strip())
|
47 |
|
48 |
+
return [(b["text"], b.get("layout_no", ""), self.get_position(b, zoomin)) for i, b in enumerate(self.boxes)], tbls
|
49 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
|
52 |
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
|
|
|
59 |
pdf_parser = Pdf() if kwargs.get("parser_config",{}).get("layout_recognize", True) else PlainParser()
|
60 |
sections, tbls = pdf_parser(filename if not binary else binary,
|
61 |
from_page=from_page, to_page=to_page, callback=callback)
|
62 |
+
if sections and len(sections[0])<3: sections = [(t, l, [[0]*5]) for t, l in sections]
|
63 |
+
|
64 |
else: raise NotImplementedError("file type not supported yet(pdf supported)")
|
65 |
doc = {
|
66 |
"docnm_kwd": filename
|
|
|
85 |
break
|
86 |
else:
|
87 |
levels.append(max_lvl + 1)
|
88 |
+
|
89 |
else:
|
90 |
bull = bullets_category([txt for txt,_,_ in sections])
|
91 |
most_level, levels = title_frequency(bull, [(txt, l) for txt, l, poss in sections])
|
rag/app/one.py
CHANGED
@@ -45,7 +45,7 @@ class Pdf(PdfParser):
|
|
45 |
for (img, rows), poss in tbls:
|
46 |
sections.append((rows if isinstance(rows, str) else rows[0],
|
47 |
[(p[0] + 1 - from_page, p[1], p[2], p[3], p[4]) for p in poss]))
|
48 |
-
return [(txt, "") for txt, _ in sorted(sections, key=lambda x: (x[-1][0][0], x[-1][0][3], x[-1][0][1]))]
|
49 |
|
50 |
|
51 |
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
|
@@ -56,7 +56,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
|
56 |
|
57 |
eng = lang.lower() == "english"#is_english(cks)
|
58 |
|
59 |
-
sections = []
|
60 |
if re.search(r"\.docx?$", filename, re.IGNORECASE):
|
61 |
callback(0.1, "Start to parse.")
|
62 |
sections = [txt for txt in laws.Docx()(filename, binary) if txt]
|
@@ -64,7 +63,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
|
64 |
|
65 |
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
66 |
pdf_parser = Pdf() if kwargs.get("parser_config",{}).get("layout_recognize", True) else PlainParser()
|
67 |
-
sections = pdf_parser(filename if not binary else binary, to_page=to_page, callback=callback)
|
68 |
sections = [s for s, _ in sections if s]
|
69 |
|
70 |
elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
|
|
45 |
for (img, rows), poss in tbls:
|
46 |
sections.append((rows if isinstance(rows, str) else rows[0],
|
47 |
[(p[0] + 1 - from_page, p[1], p[2], p[3], p[4]) for p in poss]))
|
48 |
+
return [(txt, "") for txt, _ in sorted(sections, key=lambda x: (x[-1][0][0], x[-1][0][3], x[-1][0][1]))], None
|
49 |
|
50 |
|
51 |
def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", callback=None, **kwargs):
|
|
|
56 |
|
57 |
eng = lang.lower() == "english"#is_english(cks)
|
58 |
|
|
|
59 |
if re.search(r"\.docx?$", filename, re.IGNORECASE):
|
60 |
callback(0.1, "Start to parse.")
|
61 |
sections = [txt for txt in laws.Docx()(filename, binary) if txt]
|
|
|
63 |
|
64 |
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
65 |
pdf_parser = Pdf() if kwargs.get("parser_config",{}).get("layout_recognize", True) else PlainParser()
|
66 |
+
sections, _ = pdf_parser(filename if not binary else binary, to_page=to_page, callback=callback)
|
67 |
sections = [s for s, _ in sections if s]
|
68 |
|
69 |
elif re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
rag/app/paper.py
CHANGED
@@ -136,7 +136,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
|
|
136 |
"title": filename,
|
137 |
"authors": " ",
|
138 |
"abstract": "",
|
139 |
-
"sections": pdf_parser(filename if not binary else binary),
|
140 |
"tables": []
|
141 |
}
|
142 |
else:
|
|
|
136 |
"title": filename,
|
137 |
"authors": " ",
|
138 |
"abstract": "",
|
139 |
+
"sections": pdf_parser(filename if not binary else binary, from_page=from_page, to_page=to_page),
|
140 |
"tables": []
|
141 |
}
|
142 |
else:
|
rag/app/presentation.py
CHANGED
@@ -65,10 +65,10 @@ class Pdf(PdfParser):
|
|
65 |
|
66 |
|
67 |
class PlainPdf(PlainParser):
|
68 |
-
def __call__(self, filename, binary=None, callback=None, **kwargs):
|
69 |
self.pdf = pdf2_read(filename if not binary else BytesIO(filename))
|
70 |
page_txt = []
|
71 |
-
for page in self.pdf.pages:
|
72 |
page_txt.append(page.extract_text())
|
73 |
callback(0.9, "Parsing finished")
|
74 |
return [(txt, None) for txt in page_txt]
|
|
|
65 |
|
66 |
|
67 |
class PlainPdf(PlainParser):
|
68 |
+
def __call__(self, filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
69 |
self.pdf = pdf2_read(filename if not binary else BytesIO(filename))
|
70 |
page_txt = []
|
71 |
+
for page in self.pdf.pages[from_page: to_page]:
|
72 |
page_txt.append(page.extract_text())
|
73 |
callback(0.9, "Parsing finished")
|
74 |
return [(txt, None) for txt in page_txt]
|
rag/nlp/__init__.py
CHANGED
@@ -16,8 +16,8 @@ BULLET_PATTERN = [[
|
|
16 |
], [
|
17 |
r"第[0-9]+章",
|
18 |
r"第[0-9]+节",
|
19 |
-
r"[0-9]{,
|
20 |
-
r"[0-9]{,2}\.[0-9]{,2}",
|
21 |
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
22 |
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
23 |
], [
|
@@ -40,13 +40,20 @@ def random_choices(arr, k):
|
|
40 |
return random.choices(arr, k=k)
|
41 |
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def bullets_category(sections):
|
44 |
global BULLET_PATTERN
|
45 |
hits = [0] * len(BULLET_PATTERN)
|
46 |
for i, pro in enumerate(BULLET_PATTERN):
|
47 |
for sec in sections:
|
48 |
for p in pro:
|
49 |
-
if re.match(p, sec):
|
50 |
hits[i] += 1
|
51 |
break
|
52 |
maxium = 0
|
@@ -194,7 +201,7 @@ def title_frequency(bull, sections):
|
|
194 |
|
195 |
for i, (txt, layout) in enumerate(sections):
|
196 |
for j, p in enumerate(BULLET_PATTERN[bull]):
|
197 |
-
if re.match(p, txt.strip()):
|
198 |
levels[i] = j
|
199 |
break
|
200 |
else:
|
|
|
16 |
], [
|
17 |
r"第[0-9]+章",
|
18 |
r"第[0-9]+节",
|
19 |
+
r"[0-9]{,2}[\. 、]",
|
20 |
+
r"[0-9]{,2}\.[0-9]{,2}[^a-zA-Z/%~-]",
|
21 |
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
22 |
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
23 |
], [
|
|
|
40 |
return random.choices(arr, k=k)
|
41 |
|
42 |
|
43 |
+
def not_bullet(line):
|
44 |
+
patt = [
|
45 |
+
r"0", r"[0-9]+ +[0-9~个只-]", r"[0-9]+\.{2,}"
|
46 |
+
]
|
47 |
+
return any([re.match(r, line) for r in patt])
|
48 |
+
|
49 |
+
|
50 |
def bullets_category(sections):
|
51 |
global BULLET_PATTERN
|
52 |
hits = [0] * len(BULLET_PATTERN)
|
53 |
for i, pro in enumerate(BULLET_PATTERN):
|
54 |
for sec in sections:
|
55 |
for p in pro:
|
56 |
+
if re.match(p, sec) and not not_bullet(sec):
|
57 |
hits[i] += 1
|
58 |
break
|
59 |
maxium = 0
|
|
|
201 |
|
202 |
for i, (txt, layout) in enumerate(sections):
|
203 |
for j, p in enumerate(BULLET_PATTERN[bull]):
|
204 |
+
if re.match(p, txt.strip()) and not not_bullet(txt):
|
205 |
levels[i] = j
|
206 |
break
|
207 |
else:
|
rag/svr/task_broker.py
CHANGED
@@ -81,21 +81,22 @@ def dispatch():
|
|
81 |
|
82 |
tsks = []
|
83 |
if r["type"] == FileType.PDF.value:
|
84 |
-
|
85 |
-
tsks.append(new_task())
|
86 |
-
continue
|
87 |
pages = PdfParser.total_page_number(r["name"], MINIO.get(r["kb_id"], r["location"]))
|
88 |
page_size = r["parser_config"].get("task_page_size", 12)
|
89 |
if r["parser_id"] == "paper": page_size = r["parser_config"].get("task_page_size", 22)
|
90 |
if r["parser_id"] == "one": page_size = 1000000000
|
|
|
91 |
for s,e in r["parser_config"].get("pages", [(1, 100000)]):
|
92 |
s -= 1
|
93 |
-
|
|
|
94 |
for p in range(s, e, page_size):
|
95 |
task = new_task()
|
96 |
task["from_page"] = p
|
97 |
task["to_page"] = min(p + page_size, e)
|
98 |
tsks.append(task)
|
|
|
99 |
elif r["parser_id"] == "table":
|
100 |
rn = HuExcelParser.row_number(r["name"], MINIO.get(r["kb_id"], r["location"]))
|
101 |
for i in range(0, rn, 3000):
|
|
|
81 |
|
82 |
tsks = []
|
83 |
if r["type"] == FileType.PDF.value:
|
84 |
+
do_layout = r["parser_config"].get("layout_recognize", True)
|
|
|
|
|
85 |
pages = PdfParser.total_page_number(r["name"], MINIO.get(r["kb_id"], r["location"]))
|
86 |
page_size = r["parser_config"].get("task_page_size", 12)
|
87 |
if r["parser_id"] == "paper": page_size = r["parser_config"].get("task_page_size", 22)
|
88 |
if r["parser_id"] == "one": page_size = 1000000000
|
89 |
+
if not do_layout: page_size = 1000000000
|
90 |
for s,e in r["parser_config"].get("pages", [(1, 100000)]):
|
91 |
s -= 1
|
92 |
+
s = max(0, s)
|
93 |
+
e = min(e-1, pages)
|
94 |
for p in range(s, e, page_size):
|
95 |
task = new_task()
|
96 |
task["from_page"] = p
|
97 |
task["to_page"] = min(p + page_size, e)
|
98 |
tsks.append(task)
|
99 |
+
|
100 |
elif r["parser_id"] == "table":
|
101 |
rn = HuExcelParser.row_number(r["name"], MINIO.get(r["kb_id"], r["location"]))
|
102 |
for i in range(0, rn, 3000):
|
rag/svr/task_executor.py
CHANGED
@@ -75,7 +75,7 @@ def set_progress(task_id, from_page=0, to_page=-1,
|
|
75 |
|
76 |
if to_page > 0:
|
77 |
if msg:
|
78 |
-
msg = f"Page({from_page}~{to_page}): " + msg
|
79 |
d = {"progress_msg": msg}
|
80 |
if prog is not None:
|
81 |
d["progress"] = prog
|
|
|
75 |
|
76 |
if to_page > 0:
|
77 |
if msg:
|
78 |
+
msg = f"Page({from_page+1}~{to_page+1}): " + msg
|
79 |
d = {"progress_msg": msg}
|
80 |
if prog is not None:
|
81 |
d["progress"] = prog
|
requirements.txt
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.27.2
|
2 |
+
aiohttp==3.9.3
|
3 |
+
aiosignal==1.3.1
|
4 |
+
annotated-types==0.6.0
|
5 |
+
anyio==4.3.0
|
6 |
+
argon2-cffi==23.1.0
|
7 |
+
argon2-cffi-bindings==21.2.0
|
8 |
+
Aspose.Slides==24.2.0
|
9 |
+
attrs==23.2.0
|
10 |
+
blinker==1.7.0
|
11 |
+
cachelib==0.12.0
|
12 |
+
cachetools==5.3.3
|
13 |
+
certifi==2024.2.2
|
14 |
+
cffi==1.16.0
|
15 |
+
charset-normalizer==3.3.2
|
16 |
+
click==8.1.7
|
17 |
+
coloredlogs==15.0.1
|
18 |
+
cryptography==42.0.5
|
19 |
+
dashscope==1.14.1
|
20 |
+
datasets==2.17.1
|
21 |
+
datrie==0.8.2
|
22 |
+
demjson==2.2.4
|
23 |
+
dill==0.3.8
|
24 |
+
distro==1.9.0
|
25 |
+
elastic-transport==8.12.0
|
26 |
+
elasticsearch==8.12.1
|
27 |
+
elasticsearch-dsl==8.12.0
|
28 |
+
et-xmlfile==1.1.0
|
29 |
+
filelock==3.13.1
|
30 |
+
FlagEmbedding==1.2.5
|
31 |
+
Flask==3.0.2
|
32 |
+
Flask-Cors==4.0.0
|
33 |
+
Flask-Login==0.6.3
|
34 |
+
Flask-Session==0.6.0
|
35 |
+
flatbuffers==23.5.26
|
36 |
+
frozenlist==1.4.1
|
37 |
+
fsspec==2023.10.0
|
38 |
+
h11==0.14.0
|
39 |
+
hanziconv==0.3.2
|
40 |
+
httpcore==1.0.4
|
41 |
+
httpx==0.27.0
|
42 |
+
huggingface-hub==0.20.3
|
43 |
+
humanfriendly==10.0
|
44 |
+
idna==3.6
|
45 |
+
install==1.3.5
|
46 |
+
itsdangerous==2.1.2
|
47 |
+
Jinja2==3.1.3
|
48 |
+
joblib==1.3.2
|
49 |
+
lxml==5.1.0
|
50 |
+
MarkupSafe==2.1.5
|
51 |
+
minio==7.2.4
|
52 |
+
mpi4py==3.1.5
|
53 |
+
mpmath==1.3.0
|
54 |
+
multidict==6.0.5
|
55 |
+
multiprocess==0.70.16
|
56 |
+
networkx==3.2.1
|
57 |
+
nltk==3.8.1
|
58 |
+
numpy==1.26.4
|
59 |
+
nvidia-cublas-cu12==12.1.3.1
|
60 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
61 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
62 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
63 |
+
nvidia-cudnn-cu12==8.9.2.26
|
64 |
+
nvidia-cufft-cu12==11.0.2.54
|
65 |
+
nvidia-curand-cu12==10.3.2.106
|
66 |
+
nvidia-cusolver-cu12==11.4.5.107
|
67 |
+
nvidia-cusparse-cu12==12.1.0.106
|
68 |
+
nvidia-nccl-cu12==2.19.3
|
69 |
+
nvidia-nvjitlink-cu12==12.3.101
|
70 |
+
nvidia-nvtx-cu12==12.1.105
|
71 |
+
onnxruntime-gpu==1.17.1
|
72 |
+
openai==1.12.0
|
73 |
+
opencv-python==4.9.0.80
|
74 |
+
openpyxl==3.1.2
|
75 |
+
packaging==23.2
|
76 |
+
pandas==2.2.1
|
77 |
+
pdfminer.six==20221105
|
78 |
+
pdfplumber==0.10.4
|
79 |
+
peewee==3.17.1
|
80 |
+
pillow==10.2.0
|
81 |
+
protobuf==4.25.3
|
82 |
+
psutil==5.9.8
|
83 |
+
pyarrow==15.0.0
|
84 |
+
pyarrow-hotfix==0.6
|
85 |
+
pyclipper==1.3.0.post5
|
86 |
+
pycparser==2.21
|
87 |
+
pycryptodome==3.20.0
|
88 |
+
pycryptodome-test-vectors==1.0.14
|
89 |
+
pycryptodomex==3.20.0
|
90 |
+
pydantic==2.6.2
|
91 |
+
pydantic_core==2.16.3
|
92 |
+
PyJWT==2.8.0
|
93 |
+
PyMuPDF==1.23.25
|
94 |
+
PyMuPDFb==1.23.22
|
95 |
+
PyMySQL==1.1.0
|
96 |
+
PyPDF2==3.0.1
|
97 |
+
pypdfium2==4.27.0
|
98 |
+
python-dateutil==2.8.2
|
99 |
+
python-docx==1.1.0
|
100 |
+
python-dotenv==1.0.1
|
101 |
+
python-pptx==0.6.23
|
102 |
+
pytz==2024.1
|
103 |
+
PyYAML==6.0.1
|
104 |
+
regex==2023.12.25
|
105 |
+
requests==2.31.0
|
106 |
+
ruamel.yaml==0.18.6
|
107 |
+
ruamel.yaml.clib==0.2.8
|
108 |
+
safetensors==0.4.2
|
109 |
+
scikit-learn==1.4.1.post1
|
110 |
+
scipy==1.12.0
|
111 |
+
sentence-transformers==2.4.0
|
112 |
+
shapely==2.0.3
|
113 |
+
six==1.16.0
|
114 |
+
sniffio==1.3.1
|
115 |
+
StrEnum==0.4.15
|
116 |
+
sympy==1.12
|
117 |
+
threadpoolctl==3.3.0
|
118 |
+
tiktoken==0.6.0
|
119 |
+
tokenizers==0.15.2
|
120 |
+
torch==2.2.1
|
121 |
+
tqdm==4.66.2
|
122 |
+
transformers==4.38.1
|
123 |
+
triton==2.2.0
|
124 |
+
typing_extensions==4.10.0
|
125 |
+
tzdata==2024.1
|
126 |
+
urllib3==2.2.1
|
127 |
+
Werkzeug==3.0.1
|
128 |
+
xgboost==2.0.3
|
129 |
+
XlsxWriter==3.2.0
|
130 |
+
xpinyin==0.7.6
|
131 |
+
xxhash==3.4.1
|
132 |
+
yarl==1.9.4
|
133 |
+
zhipuai==2.0.1
|
web/src/pages/add-knowledge/components/knowledge-file/chunk-method-modal.tsx
CHANGED
@@ -193,7 +193,7 @@ const ChunkMethodModal: React.FC<IProps> = ({
|
|
193 |
rules={[
|
194 |
{
|
195 |
required: true,
|
196 |
-
message: 'Missing end page number(
|
197 |
},
|
198 |
({ getFieldValue }) => ({
|
199 |
validator(_, value) {
|
|
|
193 |
rules={[
|
194 |
{
|
195 |
required: true,
|
196 |
+
message: 'Missing end page number(excluded)',
|
197 |
},
|
198 |
({ getFieldValue }) => ({
|
199 |
validator(_, value) {
|
web/src/pages/add-knowledge/components/knowledge-setting/utils.ts
CHANGED
@@ -120,7 +120,7 @@ export const TextMap = {
|
|
120 |
</p><p>
|
121 |
For a document, it will be treated as an entire chunk, no split at all.
|
122 |
</p><p>
|
123 |
-
If you
|
124 |
</p>`,
|
125 |
},
|
126 |
};
|
|
|
120 |
</p><p>
|
121 |
For a document, it will be treated as an entire chunk, no split at all.
|
122 |
</p><p>
|
123 |
+
If you want to summarize something that needs all the context of an article and the selected LLM's context length covers the document length, you can try this method.
|
124 |
</p>`,
|
125 |
},
|
126 |
};
|