KevinHuSh
commited on
Commit
·
51482f3
1
Parent(s):
7b71fb2
Some document API refined. (#53)
Browse files- api/apps/document_app.py +8 -10
- api/db/services/document_service.py +2 -1
- rag/app/__init__.py +0 -91
- rag/app/book.py +16 -58
- rag/app/laws.py +26 -84
- rag/app/manual.py +5 -4
- rag/app/naive.py +79 -0
- rag/app/paper.py +5 -4
- rag/app/presentation.py +5 -3
- rag/app/qa.py +4 -4
- rag/parser/__init__.py +217 -0
- rag/parser/docx_parser.py +13 -2
- rag/parser/pdf_parser.py +67 -7
api/apps/document_app.py
CHANGED
|
@@ -133,9 +133,9 @@ def list():
|
|
| 133 |
orderby = request.args.get("orderby", "create_time")
|
| 134 |
desc = request.args.get("desc", True)
|
| 135 |
try:
|
| 136 |
-
docs = DocumentService.get_by_kb_id(
|
| 137 |
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
| 138 |
-
return get_json_result(data=docs)
|
| 139 |
except Exception as e:
|
| 140 |
return server_error_response(e)
|
| 141 |
|
|
@@ -228,20 +228,18 @@ def run():
|
|
| 228 |
|
| 229 |
@manager.route('/rename', methods=['POST'])
|
| 230 |
@login_required
|
| 231 |
-
@validate_request("doc_id", "name"
|
| 232 |
def rename():
|
| 233 |
req = request.json
|
| 234 |
-
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(
|
| 235 |
-
req["old_name"].lower()).suffix:
|
| 236 |
-
get_json_result(
|
| 237 |
-
data=False,
|
| 238 |
-
retmsg="The extension of file can't be changed",
|
| 239 |
-
retcode=RetCode.ARGUMENT_ERROR)
|
| 240 |
-
|
| 241 |
try:
|
| 242 |
e, doc = DocumentService.get_by_id(req["doc_id"])
|
| 243 |
if not e:
|
| 244 |
return get_data_error_result(retmsg="Document not found!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
if DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
| 246 |
return get_data_error_result(
|
| 247 |
retmsg="Duplicated document name in the same knowledgebase.")
|
|
|
|
| 133 |
orderby = request.args.get("orderby", "create_time")
|
| 134 |
desc = request.args.get("desc", True)
|
| 135 |
try:
|
| 136 |
+
docs, tol = DocumentService.get_by_kb_id(
|
| 137 |
kb_id, page_number, items_per_page, orderby, desc, keywords)
|
| 138 |
+
return get_json_result(data={"total":tol, "docs": docs})
|
| 139 |
except Exception as e:
|
| 140 |
return server_error_response(e)
|
| 141 |
|
|
|
|
| 228 |
|
| 229 |
@manager.route('/rename', methods=['POST'])
|
| 230 |
@login_required
|
| 231 |
+
@validate_request("doc_id", "name")
|
| 232 |
def rename():
|
| 233 |
req = request.json
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
try:
|
| 235 |
e, doc = DocumentService.get_by_id(req["doc_id"])
|
| 236 |
if not e:
|
| 237 |
return get_data_error_result(retmsg="Document not found!")
|
| 238 |
+
if pathlib.Path(req["name"].lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
|
| 239 |
+
return get_json_result(
|
| 240 |
+
data=False,
|
| 241 |
+
retmsg="The extension of file can't be changed",
|
| 242 |
+
retcode=RetCode.ARGUMENT_ERROR)
|
| 243 |
if DocumentService.query(name=req["name"], kb_id=doc.kb_id):
|
| 244 |
return get_data_error_result(
|
| 245 |
retmsg="Duplicated document name in the same knowledgebase.")
|
api/db/services/document_service.py
CHANGED
|
@@ -36,6 +36,7 @@ class DocumentService(CommonService):
|
|
| 36 |
cls.model.name.like(f"%%{keywords}%%"))
|
| 37 |
else:
|
| 38 |
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
|
|
|
| 39 |
if desc:
|
| 40 |
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
| 41 |
else:
|
|
@@ -43,7 +44,7 @@ class DocumentService(CommonService):
|
|
| 43 |
|
| 44 |
docs = docs.paginate(page_number, items_per_page)
|
| 45 |
|
| 46 |
-
return list(docs.dicts())
|
| 47 |
|
| 48 |
@classmethod
|
| 49 |
@DB.connection_context()
|
|
|
|
| 36 |
cls.model.name.like(f"%%{keywords}%%"))
|
| 37 |
else:
|
| 38 |
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
| 39 |
+
count = docs.count()
|
| 40 |
if desc:
|
| 41 |
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
| 42 |
else:
|
|
|
|
| 44 |
|
| 45 |
docs = docs.paginate(page_number, items_per_page)
|
| 46 |
|
| 47 |
+
return list(docs.dicts()), count
|
| 48 |
|
| 49 |
@classmethod
|
| 50 |
@DB.connection_context()
|
rag/app/__init__.py
CHANGED
|
@@ -1,91 +0,0 @@
|
|
| 1 |
-
import re
|
| 2 |
-
|
| 3 |
-
from nltk import word_tokenize
|
| 4 |
-
|
| 5 |
-
from rag.nlp import stemmer, huqie
|
| 6 |
-
|
| 7 |
-
BULLET_PATTERN = [[
|
| 8 |
-
r"第[零一二三四五六七八九十百]+(编|部分)",
|
| 9 |
-
r"第[零一二三四五六七八九十百]+章",
|
| 10 |
-
r"第[零一二三四五六七八九十百]+节",
|
| 11 |
-
r"第[零一二三四五六七八九十百]+条",
|
| 12 |
-
r"[\((][零一二三四五六七八九十百]+[\))]",
|
| 13 |
-
], [
|
| 14 |
-
r"[0-9]{,3}[\. 、]",
|
| 15 |
-
r"[0-9]{,2}\.[0-9]{,2}",
|
| 16 |
-
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
| 17 |
-
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
| 18 |
-
], [
|
| 19 |
-
r"第[零一二三四五六七八九十百]+章",
|
| 20 |
-
r"第[零一二三四五六七八九十百]+节",
|
| 21 |
-
r"[零一二三四五六七八九十百]+[ 、]",
|
| 22 |
-
r"[\((][零一二三四五六七八九十百]+[\))]",
|
| 23 |
-
r"[\((][0-9]{,2}[\))]",
|
| 24 |
-
] ,[
|
| 25 |
-
r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
|
| 26 |
-
r"Chapter (I+V?|VI*|XI|IX|X)",
|
| 27 |
-
r"Section [0-9]+",
|
| 28 |
-
r"Article [0-9]+"
|
| 29 |
-
]
|
| 30 |
-
]
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def bullets_category(sections):
|
| 34 |
-
global BULLET_PATTERN
|
| 35 |
-
hits = [0] * len(BULLET_PATTERN)
|
| 36 |
-
for i, pro in enumerate(BULLET_PATTERN):
|
| 37 |
-
for sec in sections:
|
| 38 |
-
for p in pro:
|
| 39 |
-
if re.match(p, sec):
|
| 40 |
-
hits[i] += 1
|
| 41 |
-
break
|
| 42 |
-
maxium = 0
|
| 43 |
-
res = -1
|
| 44 |
-
for i,h in enumerate(hits):
|
| 45 |
-
if h <= maxium:continue
|
| 46 |
-
res = i
|
| 47 |
-
maxium = h
|
| 48 |
-
return res
|
| 49 |
-
|
| 50 |
-
def is_english(texts):
|
| 51 |
-
eng = 0
|
| 52 |
-
for t in texts:
|
| 53 |
-
if re.match(r"[a-zA-Z]{2,}", t.strip()):
|
| 54 |
-
eng += 1
|
| 55 |
-
if eng / len(texts) > 0.8:
|
| 56 |
-
return True
|
| 57 |
-
return False
|
| 58 |
-
|
| 59 |
-
def tokenize(d, t, eng):
|
| 60 |
-
d["content_with_weight"] = t
|
| 61 |
-
if eng:
|
| 62 |
-
t = re.sub(r"([a-z])-([a-z])", r"\1\2", t)
|
| 63 |
-
d["content_ltks"] = " ".join([stemmer.stem(w) for w in word_tokenize(t)])
|
| 64 |
-
else:
|
| 65 |
-
d["content_ltks"] = huqie.qie(t)
|
| 66 |
-
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def remove_contents_table(sections, eng=False):
|
| 70 |
-
i = 0
|
| 71 |
-
while i < len(sections):
|
| 72 |
-
def get(i):
|
| 73 |
-
nonlocal sections
|
| 74 |
-
return (sections[i] if type(sections[i]) == type("") else sections[i][0]).strip()
|
| 75 |
-
if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$", re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
|
| 76 |
-
i += 1
|
| 77 |
-
continue
|
| 78 |
-
sections.pop(i)
|
| 79 |
-
if i >= len(sections): break
|
| 80 |
-
prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
|
| 81 |
-
while not prefix:
|
| 82 |
-
sections.pop(i)
|
| 83 |
-
if i >= len(sections): break
|
| 84 |
-
prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
|
| 85 |
-
sections.pop(i)
|
| 86 |
-
if i >= len(sections) or not prefix: break
|
| 87 |
-
for j in range(i, min(i+128, len(sections))):
|
| 88 |
-
if not re.match(prefix, get(j)):
|
| 89 |
-
continue
|
| 90 |
-
for _ in range(i, j):sections.pop(i)
|
| 91 |
-
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rag/app/book.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
| 1 |
import copy
|
| 2 |
import random
|
| 3 |
import re
|
| 4 |
-
from io import BytesIO
|
| 5 |
-
from docx import Document
|
| 6 |
import numpy as np
|
| 7 |
-
from rag.
|
|
|
|
| 8 |
from rag.nlp import huqie
|
| 9 |
from rag.parser.docx_parser import HuDocxParser
|
| 10 |
from rag.parser.pdf_parser import HuParser
|
|
@@ -28,7 +27,6 @@ class Pdf(HuParser):
|
|
| 28 |
self._table_transformer_job(zoomin)
|
| 29 |
callback(0.68, "Table analysis finished")
|
| 30 |
self._text_merge()
|
| 31 |
-
column_width = np.median([b["x1"] - b["x0"] for b in self.boxes])
|
| 32 |
self._concat_downward(concat_between_pages=False)
|
| 33 |
self._filter_forpages()
|
| 34 |
self._merge_with_same_bullet()
|
|
@@ -37,10 +35,10 @@ class Pdf(HuParser):
|
|
| 37 |
|
| 38 |
callback(0.8, "Text extraction finished")
|
| 39 |
|
| 40 |
-
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes]
|
| 41 |
|
| 42 |
|
| 43 |
-
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
| 44 |
doc = {
|
| 45 |
"docnm_kwd": filename,
|
| 46 |
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
|
@@ -52,8 +50,8 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 52 |
callback(0.1, "Start to parse.")
|
| 53 |
doc_parser = HuDocxParser()
|
| 54 |
# TODO: table of contents need to be removed
|
| 55 |
-
sections, tbls = doc_parser(binary if binary else filename)
|
| 56 |
-
remove_contents_table(sections, eng
|
| 57 |
callback(0.8, "Finish parsing.")
|
| 58 |
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
| 59 |
pdf_parser = Pdf()
|
|
@@ -75,54 +73,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 75 |
callback(0.8, "Finish parsing.")
|
| 76 |
else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
for j, p in enumerate(BULLET_PATTERN[bull]):
|
| 83 |
-
if re.match(p, txt.strip()):
|
| 84 |
-
projs[i] = j
|
| 85 |
-
levels[j].append(i)
|
| 86 |
-
break
|
| 87 |
-
else:
|
| 88 |
-
if re.search(r"(title|head)", layout):
|
| 89 |
-
projs[i] = BULLET_PATTERN[bull]
|
| 90 |
-
levels[BULLET_PATTERN[bull]].append(i)
|
| 91 |
-
else:
|
| 92 |
-
levels[BULLET_PATTERN[bull] + 1].append(i)
|
| 93 |
-
sections = [t for t,_ in sections]
|
| 94 |
-
|
| 95 |
-
def binary_search(arr, target):
|
| 96 |
-
if target > arr[-1]: return len(arr) - 1
|
| 97 |
-
if target > arr[0]: return -1
|
| 98 |
-
s, e = 0, len(arr)
|
| 99 |
-
while e - s > 1:
|
| 100 |
-
i = (e + s) // 2
|
| 101 |
-
if target > arr[i]:
|
| 102 |
-
s = i
|
| 103 |
-
continue
|
| 104 |
-
elif target < arr[i]:
|
| 105 |
-
e = i
|
| 106 |
-
continue
|
| 107 |
-
else:
|
| 108 |
-
assert False
|
| 109 |
-
return s
|
| 110 |
-
|
| 111 |
-
cks = []
|
| 112 |
-
readed = [False] * len(sections)
|
| 113 |
-
levels = levels[::-1]
|
| 114 |
-
for i, arr in enumerate(levels):
|
| 115 |
-
for j in arr:
|
| 116 |
-
if readed[j]: continue
|
| 117 |
-
readed[j] = True
|
| 118 |
-
cks.append([j])
|
| 119 |
-
if i + 1 == len(levels) - 1: continue
|
| 120 |
-
for ii in range(i + 1, len(levels)):
|
| 121 |
-
jj = binary_search(levels[ii], j)
|
| 122 |
-
if jj < 0: break
|
| 123 |
-
if jj > cks[-1][-1]: cks[-1].pop(-1)
|
| 124 |
-
cks[-1].append(levels[ii][jj])
|
| 125 |
|
|
|
|
| 126 |
# is it English
|
| 127 |
eng = is_english(random.choices(sections, k=218))
|
| 128 |
|
|
@@ -138,11 +94,11 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 138 |
tokenize(d, r, eng)
|
| 139 |
d["image"] = img
|
| 140 |
res.append(d)
|
|
|
|
| 141 |
# wrap up to es documents
|
| 142 |
for ck in cks:
|
| 143 |
-
print("\n-".join(ck[::-1]))
|
| 144 |
-
ck = "\n".join(ck[::-1])
|
| 145 |
d = copy.deepcopy(doc)
|
|
|
|
| 146 |
if pdf_parser:
|
| 147 |
d["image"] = pdf_parser.crop(ck)
|
| 148 |
ck = pdf_parser.remove_tag(ck)
|
|
@@ -153,4 +109,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 153 |
|
| 154 |
if __name__ == "__main__":
|
| 155 |
import sys
|
| 156 |
-
|
|
|
|
|
|
|
|
|
| 1 |
import copy
|
| 2 |
import random
|
| 3 |
import re
|
|
|
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
+
from rag.parser import bullets_category, BULLET_PATTERN, is_english, tokenize, remove_contents_table, \
|
| 6 |
+
hierarchical_merge, make_colon_as_title, naive_merge
|
| 7 |
from rag.nlp import huqie
|
| 8 |
from rag.parser.docx_parser import HuDocxParser
|
| 9 |
from rag.parser.pdf_parser import HuParser
|
|
|
|
| 27 |
self._table_transformer_job(zoomin)
|
| 28 |
callback(0.68, "Table analysis finished")
|
| 29 |
self._text_merge()
|
|
|
|
| 30 |
self._concat_downward(concat_between_pages=False)
|
| 31 |
self._filter_forpages()
|
| 32 |
self._merge_with_same_bullet()
|
|
|
|
| 35 |
|
| 36 |
callback(0.8, "Text extraction finished")
|
| 37 |
|
| 38 |
+
return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno","")) for b in self.boxes], tbls
|
| 39 |
|
| 40 |
|
| 41 |
+
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
| 42 |
doc = {
|
| 43 |
"docnm_kwd": filename,
|
| 44 |
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
|
|
|
| 50 |
callback(0.1, "Start to parse.")
|
| 51 |
doc_parser = HuDocxParser()
|
| 52 |
# TODO: table of contents need to be removed
|
| 53 |
+
sections, tbls = doc_parser(binary if binary else filename, from_page=from_page, to_page=to_page)
|
| 54 |
+
remove_contents_table(sections, eng=is_english(random.choices([t for t,_ in sections], k=200)))
|
| 55 |
callback(0.8, "Finish parsing.")
|
| 56 |
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
| 57 |
pdf_parser = Pdf()
|
|
|
|
| 73 |
callback(0.8, "Finish parsing.")
|
| 74 |
else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
|
| 75 |
|
| 76 |
+
make_colon_as_title(sections)
|
| 77 |
+
bull = bullets_category([t for t in random.choices([t for t,_ in sections], k=100)])
|
| 78 |
+
if bull >= 0: cks = hierarchical_merge(bull, sections, 3)
|
| 79 |
+
else: cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
sections = [t for t, _ in sections]
|
| 82 |
# is it English
|
| 83 |
eng = is_english(random.choices(sections, k=218))
|
| 84 |
|
|
|
|
| 94 |
tokenize(d, r, eng)
|
| 95 |
d["image"] = img
|
| 96 |
res.append(d)
|
| 97 |
+
print("TABLE", d["content_with_weight"])
|
| 98 |
# wrap up to es documents
|
| 99 |
for ck in cks:
|
|
|
|
|
|
|
| 100 |
d = copy.deepcopy(doc)
|
| 101 |
+
ck = "\n".join(ck)
|
| 102 |
if pdf_parser:
|
| 103 |
d["image"] = pdf_parser.crop(ck)
|
| 104 |
ck = pdf_parser.remove_tag(ck)
|
|
|
|
| 109 |
|
| 110 |
if __name__ == "__main__":
|
| 111 |
import sys
|
| 112 |
+
def dummy(a, b):
|
| 113 |
+
pass
|
| 114 |
+
chunk(sys.argv[1], from_page=1, to_page=10, callback=dummy)
|
rag/app/laws.py
CHANGED
|
@@ -3,10 +3,12 @@ import re
|
|
| 3 |
from io import BytesIO
|
| 4 |
from docx import Document
|
| 5 |
import numpy as np
|
| 6 |
-
from rag.
|
|
|
|
| 7 |
from rag.nlp import huqie
|
| 8 |
from rag.parser.docx_parser import HuDocxParser
|
| 9 |
from rag.parser.pdf_parser import HuParser
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
class Docx(HuDocxParser):
|
|
@@ -17,10 +19,20 @@ class Docx(HuDocxParser):
|
|
| 17 |
line = re.sub(r"\u3000", " ", line).strip()
|
| 18 |
return line
|
| 19 |
|
| 20 |
-
def __call__(self, filename, binary=None):
|
| 21 |
self.doc = Document(
|
| 22 |
filename) if not binary else Document(BytesIO(binary))
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
return [l for l in lines if l]
|
| 25 |
|
| 26 |
|
|
@@ -38,49 +50,15 @@ class Pdf(HuParser):
|
|
| 38 |
start = timer()
|
| 39 |
self._layouts_paddle(zoomin)
|
| 40 |
callback(0.77, "Layout analysis finished")
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
# is it English
|
| 44 |
-
eng = is_english([b["text"] for b in bxs])
|
| 45 |
-
# Merge vertically
|
| 46 |
-
i = 0
|
| 47 |
-
while i + 1 < len(bxs):
|
| 48 |
-
b = bxs[i]
|
| 49 |
-
b_ = bxs[i + 1]
|
| 50 |
-
if b["page_number"] < b_["page_number"] and re.match(r"[0-9 •一—-]+$", b["text"]):
|
| 51 |
-
bxs.pop(i)
|
| 52 |
-
continue
|
| 53 |
-
concatting_feats = [
|
| 54 |
-
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
|
| 55 |
-
len(b["text"].strip())>1 and b["text"].strip()[-2] in ",;:'\",‘“、;:",
|
| 56 |
-
b["text"].strip()[0] in "。;?!?”)),,、:",
|
| 57 |
-
]
|
| 58 |
-
# features for not concating
|
| 59 |
-
feats = [
|
| 60 |
-
b.get("layoutno",0) != b.get("layoutno",0),
|
| 61 |
-
b["text"].strip()[-1] in "。?!?",
|
| 62 |
-
eng and b["text"].strip()[-1] in ".!?",
|
| 63 |
-
b["page_number"] == b_["page_number"] and b_["top"] - \
|
| 64 |
-
b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5,
|
| 65 |
-
b["page_number"] < b_["page_number"] and abs(
|
| 66 |
-
b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4
|
| 67 |
-
]
|
| 68 |
-
if any(feats) and not any(concatting_feats):
|
| 69 |
-
i += 1
|
| 70 |
-
continue
|
| 71 |
-
# merge up and down
|
| 72 |
-
b["bottom"] = b_["bottom"]
|
| 73 |
-
b["text"] += b_["text"]
|
| 74 |
-
b["x0"] = min(b["x0"], b_["x0"])
|
| 75 |
-
b["x1"] = max(b["x1"], b_["x1"])
|
| 76 |
-
bxs.pop(i + 1)
|
| 77 |
|
| 78 |
callback(0.8, "Text extraction finished")
|
| 79 |
|
| 80 |
-
return [b["text"] + self._line_tag(b, zoomin) for b in
|
| 81 |
|
| 82 |
|
| 83 |
-
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
| 84 |
doc = {
|
| 85 |
"docnm_kwd": filename,
|
| 86 |
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
|
@@ -116,50 +94,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 116 |
# is it English
|
| 117 |
eng = is_english(sections)
|
| 118 |
# Remove 'Contents' part
|
| 119 |
-
|
| 120 |
-
while i < len(sections):
|
| 121 |
-
if not re.match(r"(contents|目录|目次|table of contents)$", re.sub(r"( | |\u3000)+", "", sections[i].split("@@")[0], re.IGNORECASE)):
|
| 122 |
-
i += 1
|
| 123 |
-
continue
|
| 124 |
-
sections.pop(i)
|
| 125 |
-
if i >= len(sections): break
|
| 126 |
-
prefix = sections[i].strip()[:3] if not eng else " ".join(sections[i].strip().split(" ")[:2])
|
| 127 |
-
while not prefix:
|
| 128 |
-
sections.pop(i)
|
| 129 |
-
if i >= len(sections): break
|
| 130 |
-
prefix = sections[i].strip()[:3] if not eng else " ".join(sections[i].strip().split(" ")[:2])
|
| 131 |
-
sections.pop(i)
|
| 132 |
-
if i >= len(sections) or not prefix: break
|
| 133 |
-
for j in range(i, min(i+128, len(sections))):
|
| 134 |
-
if not re.match(prefix, sections[j]):
|
| 135 |
-
continue
|
| 136 |
-
for _ in range(i, j):sections.pop(i)
|
| 137 |
-
break
|
| 138 |
|
|
|
|
| 139 |
bull = bullets_category(sections)
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
for j,p in enumerate(BULLET_PATTERN[bull]):
|
| 143 |
-
if re.match(p, sec.strip()):
|
| 144 |
-
projs[i] = j
|
| 145 |
-
break
|
| 146 |
-
readed = [0] * len(sections)
|
| 147 |
-
cks = []
|
| 148 |
-
for pr in range(len(BULLET_PATTERN[bull])-1, 1, -1):
|
| 149 |
-
for i in range(len(sections)):
|
| 150 |
-
if readed[i] or projs[i] < pr:
|
| 151 |
-
continue
|
| 152 |
-
# find father and grand-father and grand...father
|
| 153 |
-
p = projs[i]
|
| 154 |
-
readed[i] = 1
|
| 155 |
-
ck = [sections[i]]
|
| 156 |
-
for j in range(i-1, -1, -1):
|
| 157 |
-
if projs[j] >= p:continue
|
| 158 |
-
ck.append(sections[j])
|
| 159 |
-
readed[j] = 1
|
| 160 |
-
p = projs[j]
|
| 161 |
-
if p == 0: break
|
| 162 |
-
cks.append(ck[::-1])
|
| 163 |
|
| 164 |
res = []
|
| 165 |
# wrap up to es documents
|
|
@@ -177,4 +117,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 177 |
|
| 178 |
if __name__ == "__main__":
|
| 179 |
import sys
|
| 180 |
-
|
|
|
|
|
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from docx import Document
|
| 5 |
import numpy as np
|
| 6 |
+
from rag.parser import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
| 7 |
+
make_colon_as_title
|
| 8 |
from rag.nlp import huqie
|
| 9 |
from rag.parser.docx_parser import HuDocxParser
|
| 10 |
from rag.parser.pdf_parser import HuParser
|
| 11 |
+
from rag.settings import cron_logger
|
| 12 |
|
| 13 |
|
| 14 |
class Docx(HuDocxParser):
|
|
|
|
| 19 |
line = re.sub(r"\u3000", " ", line).strip()
|
| 20 |
return line
|
| 21 |
|
| 22 |
+
def __call__(self, filename, binary=None, from_page=0, to_page=100000):
|
| 23 |
self.doc = Document(
|
| 24 |
filename) if not binary else Document(BytesIO(binary))
|
| 25 |
+
pn = 0
|
| 26 |
+
lines = []
|
| 27 |
+
for p in self.doc.paragraphs:
|
| 28 |
+
if pn > to_page:break
|
| 29 |
+
if from_page <= pn < to_page and p.text.strip(): lines.append(self.__clean(p.text))
|
| 30 |
+
for run in p.runs:
|
| 31 |
+
if 'lastRenderedPageBreak' in run._element.xml:
|
| 32 |
+
pn += 1
|
| 33 |
+
continue
|
| 34 |
+
if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
|
| 35 |
+
pn += 1
|
| 36 |
return [l for l in lines if l]
|
| 37 |
|
| 38 |
|
|
|
|
| 50 |
start = timer()
|
| 51 |
self._layouts_paddle(zoomin)
|
| 52 |
callback(0.77, "Layout analysis finished")
|
| 53 |
+
cron_logger.info("paddle layouts:".format((timer()-start)/(self.total_page+0.1)))
|
| 54 |
+
self._naive_vertical_merge()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
callback(0.8, "Text extraction finished")
|
| 57 |
|
| 58 |
+
return [b["text"] + self._line_tag(b, zoomin) for b in self.boxes]
|
| 59 |
|
| 60 |
|
| 61 |
+
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
| 62 |
doc = {
|
| 63 |
"docnm_kwd": filename,
|
| 64 |
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
|
|
|
| 94 |
# is it English
|
| 95 |
eng = is_english(sections)
|
| 96 |
# Remove 'Contents' part
|
| 97 |
+
remove_contents_table(sections, eng)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
+
make_colon_as_title(sections)
|
| 100 |
bull = bullets_category(sections)
|
| 101 |
+
cks = hierarchical_merge(bull, sections, 3)
|
| 102 |
+
if not cks: callback(0.99, "No chunk parsed out.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
res = []
|
| 105 |
# wrap up to es documents
|
|
|
|
| 117 |
|
| 118 |
if __name__ == "__main__":
|
| 119 |
import sys
|
| 120 |
+
def dummy(a, b):
|
| 121 |
+
pass
|
| 122 |
+
chunk(sys.argv[1], callback=dummy)
|
rag/app/manual.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import copy
|
| 2 |
import re
|
| 3 |
-
from rag.
|
| 4 |
from rag.nlp import huqie
|
| 5 |
from rag.parser.pdf_parser import HuParser
|
| 6 |
from rag.utils import num_tokens_from_string
|
|
@@ -57,7 +57,7 @@ class Pdf(HuParser):
|
|
| 57 |
return [b["text"] + self._line_tag(b, zoomin) for b in self.boxes], tbls
|
| 58 |
|
| 59 |
|
| 60 |
-
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
| 61 |
pdf_parser = None
|
| 62 |
paper = {}
|
| 63 |
|
|
@@ -117,5 +117,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 117 |
|
| 118 |
if __name__ == "__main__":
|
| 119 |
import sys
|
| 120 |
-
|
| 121 |
-
|
|
|
|
|
|
| 1 |
import copy
|
| 2 |
import re
|
| 3 |
+
from rag.parser import tokenize
|
| 4 |
from rag.nlp import huqie
|
| 5 |
from rag.parser.pdf_parser import HuParser
|
| 6 |
from rag.utils import num_tokens_from_string
|
|
|
|
| 57 |
return [b["text"] + self._line_tag(b, zoomin) for b in self.boxes], tbls
|
| 58 |
|
| 59 |
|
| 60 |
+
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
| 61 |
pdf_parser = None
|
| 62 |
paper = {}
|
| 63 |
|
|
|
|
| 117 |
|
| 118 |
if __name__ == "__main__":
|
| 119 |
import sys
|
| 120 |
+
def dummy(a, b):
|
| 121 |
+
pass
|
| 122 |
+
chunk(sys.argv[1], callback=dummy)
|
rag/app/naive.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import re
|
| 3 |
+
from rag.app import laws
|
| 4 |
+
from rag.parser import is_english, tokenize, naive_merge
|
| 5 |
+
from rag.nlp import huqie
|
| 6 |
+
from rag.parser.pdf_parser import HuParser
|
| 7 |
+
from rag.settings import cron_logger
|
| 8 |
+
|
| 9 |
+
class Pdf(HuParser):
|
| 10 |
+
def __call__(self, filename, binary=None, from_page=0,
|
| 11 |
+
to_page=100000, zoomin=3, callback=None):
|
| 12 |
+
self.__images__(
|
| 13 |
+
filename if not binary else binary,
|
| 14 |
+
zoomin,
|
| 15 |
+
from_page,
|
| 16 |
+
to_page)
|
| 17 |
+
callback(0.1, "OCR finished")
|
| 18 |
+
|
| 19 |
+
from timeit import default_timer as timer
|
| 20 |
+
start = timer()
|
| 21 |
+
self._layouts_paddle(zoomin)
|
| 22 |
+
callback(0.77, "Layout analysis finished")
|
| 23 |
+
cron_logger.info("paddle layouts:".format((timer()-start)/(self.total_page+0.1)))
|
| 24 |
+
self._naive_vertical_merge()
|
| 25 |
+
return [(b["text"], self._line_tag(b, zoomin)) for b in self.boxes]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
| 29 |
+
doc = {
|
| 30 |
+
"docnm_kwd": filename,
|
| 31 |
+
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
| 32 |
+
}
|
| 33 |
+
doc["title_sm_tks"] = huqie.qieqie(doc["title_tks"])
|
| 34 |
+
pdf_parser = None
|
| 35 |
+
sections = []
|
| 36 |
+
if re.search(r"\.docx?$", filename, re.IGNORECASE):
|
| 37 |
+
callback(0.1, "Start to parse.")
|
| 38 |
+
for txt in laws.Docx()(filename, binary):
|
| 39 |
+
sections.append((txt, ""))
|
| 40 |
+
callback(0.8, "Finish parsing.")
|
| 41 |
+
elif re.search(r"\.pdf$", filename, re.IGNORECASE):
|
| 42 |
+
pdf_parser = Pdf()
|
| 43 |
+
sections = pdf_parser(filename if not binary else binary,
|
| 44 |
+
from_page=from_page, to_page=to_page, callback=callback)
|
| 45 |
+
elif re.search(r"\.txt$", filename, re.IGNORECASE):
|
| 46 |
+
callback(0.1, "Start to parse.")
|
| 47 |
+
txt = ""
|
| 48 |
+
if binary:txt = binary.decode("utf-8")
|
| 49 |
+
else:
|
| 50 |
+
with open(filename, "r") as f:
|
| 51 |
+
while True:
|
| 52 |
+
l = f.readline()
|
| 53 |
+
if not l:break
|
| 54 |
+
txt += l
|
| 55 |
+
sections = txt.split("\n")
|
| 56 |
+
sections = [(l,"") for l in sections if l]
|
| 57 |
+
callback(0.8, "Finish parsing.")
|
| 58 |
+
else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
|
| 59 |
+
|
| 60 |
+
cks = naive_merge(sections, kwargs.get("chunk_token_num", 128), kwargs.get("delimer", "\n。;!?"))
|
| 61 |
+
eng = is_english(cks)
|
| 62 |
+
res = []
|
| 63 |
+
# wrap up to es documents
|
| 64 |
+
for ck in cks:
|
| 65 |
+
print("--", ck)
|
| 66 |
+
d = copy.deepcopy(doc)
|
| 67 |
+
if pdf_parser:
|
| 68 |
+
d["image"] = pdf_parser.crop(ck)
|
| 69 |
+
ck = pdf_parser.remove_tag(ck)
|
| 70 |
+
tokenize(d, ck, eng)
|
| 71 |
+
res.append(d)
|
| 72 |
+
return res
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
import sys
|
| 77 |
+
def dummy(a, b):
|
| 78 |
+
pass
|
| 79 |
+
chunk(sys.argv[1], from_page=0, to_page=10, callback=dummy)
|
rag/app/paper.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import copy
|
| 2 |
import re
|
| 3 |
from collections import Counter
|
| 4 |
-
from rag.
|
| 5 |
from rag.nlp import huqie
|
| 6 |
from rag.parser.pdf_parser import HuParser
|
| 7 |
import numpy as np
|
|
@@ -113,7 +113,7 @@ class Pdf(HuParser):
|
|
| 113 |
}
|
| 114 |
|
| 115 |
|
| 116 |
-
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
| 117 |
pdf_parser = None
|
| 118 |
paper = {}
|
| 119 |
|
|
@@ -232,5 +232,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 232 |
|
| 233 |
if __name__ == "__main__":
|
| 234 |
import sys
|
| 235 |
-
|
| 236 |
-
|
|
|
|
|
|
| 1 |
import copy
|
| 2 |
import re
|
| 3 |
from collections import Counter
|
| 4 |
+
from rag.parser import tokenize
|
| 5 |
from rag.nlp import huqie
|
| 6 |
from rag.parser.pdf_parser import HuParser
|
| 7 |
import numpy as np
|
|
|
|
| 113 |
}
|
| 114 |
|
| 115 |
|
| 116 |
+
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
| 117 |
pdf_parser = None
|
| 118 |
paper = {}
|
| 119 |
|
|
|
|
| 232 |
|
| 233 |
if __name__ == "__main__":
|
| 234 |
import sys
|
| 235 |
+
def dummy(a, b):
|
| 236 |
+
pass
|
| 237 |
+
chunk(sys.argv[1], callback=dummy)
|
rag/app/presentation.py
CHANGED
|
@@ -3,7 +3,7 @@ import re
|
|
| 3 |
from io import BytesIO
|
| 4 |
from pptx import Presentation
|
| 5 |
|
| 6 |
-
from rag.
|
| 7 |
from rag.nlp import huqie
|
| 8 |
from rag.parser.pdf_parser import HuParser
|
| 9 |
|
|
@@ -93,7 +93,7 @@ class Pdf(HuParser):
|
|
| 93 |
return res
|
| 94 |
|
| 95 |
|
| 96 |
-
def chunk(filename, binary=None,
|
| 97 |
doc = {
|
| 98 |
"docnm_kwd": filename,
|
| 99 |
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
|
@@ -122,5 +122,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 122 |
|
| 123 |
if __name__== "__main__":
|
| 124 |
import sys
|
| 125 |
-
|
|
|
|
|
|
|
| 126 |
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from pptx import Presentation
|
| 5 |
|
| 6 |
+
from rag.parser import tokenize, is_english
|
| 7 |
from rag.nlp import huqie
|
| 8 |
from rag.parser.pdf_parser import HuParser
|
| 9 |
|
|
|
|
| 93 |
return res
|
| 94 |
|
| 95 |
|
| 96 |
+
def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None, **kwargs):
|
| 97 |
doc = {
|
| 98 |
"docnm_kwd": filename,
|
| 99 |
"title_tks": huqie.qie(re.sub(r"\.[a-zA-Z]+$", "", filename))
|
|
|
|
| 122 |
|
| 123 |
if __name__== "__main__":
|
| 124 |
import sys
|
| 125 |
+
def dummy(a, b):
|
| 126 |
+
pass
|
| 127 |
+
chunk(sys.argv[1], callback=dummy)
|
| 128 |
|
rag/app/qa.py
CHANGED
|
@@ -3,7 +3,7 @@ import re
|
|
| 3 |
from io import BytesIO
|
| 4 |
from nltk import word_tokenize
|
| 5 |
from openpyxl import load_workbook
|
| 6 |
-
from rag.
|
| 7 |
from rag.nlp import huqie, stemmer
|
| 8 |
|
| 9 |
|
|
@@ -55,7 +55,7 @@ def beAdoc(d, q, a, eng):
|
|
| 55 |
return d
|
| 56 |
|
| 57 |
|
| 58 |
-
def chunk(filename, binary=None,
|
| 59 |
|
| 60 |
res = []
|
| 61 |
if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
|
@@ -98,7 +98,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, callback=None):
|
|
| 98 |
|
| 99 |
if __name__== "__main__":
|
| 100 |
import sys
|
| 101 |
-
def
|
| 102 |
pass
|
| 103 |
-
|
| 104 |
|
|
|
|
| 3 |
from io import BytesIO
|
| 4 |
from nltk import word_tokenize
|
| 5 |
from openpyxl import load_workbook
|
| 6 |
+
from rag.parser import is_english
|
| 7 |
from rag.nlp import huqie, stemmer
|
| 8 |
|
| 9 |
|
|
|
|
| 55 |
return d
|
| 56 |
|
| 57 |
|
| 58 |
+
def chunk(filename, binary=None, callback=None, **kwargs):
|
| 59 |
|
| 60 |
res = []
|
| 61 |
if re.search(r"\.xlsx?$", filename, re.IGNORECASE):
|
|
|
|
| 98 |
|
| 99 |
if __name__== "__main__":
|
| 100 |
import sys
|
| 101 |
+
def dummy(a, b):
|
| 102 |
pass
|
| 103 |
+
chunk(sys.argv[1], callback=dummy)
|
| 104 |
|
rag/parser/__init__.py
CHANGED
|
@@ -1,3 +1,220 @@
|
|
|
|
|
|
|
|
| 1 |
from .pdf_parser import HuParser as PdfParser
|
| 2 |
from .docx_parser import HuDocxParser as DocxParser
|
| 3 |
from .excel_parser import HuExcelParser as ExcelParser
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
|
| 3 |
from .pdf_parser import HuParser as PdfParser
|
| 4 |
from .docx_parser import HuDocxParser as DocxParser
|
| 5 |
from .excel_parser import HuExcelParser as ExcelParser
|
| 6 |
+
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
from nltk import word_tokenize
|
| 10 |
+
|
| 11 |
+
from rag.nlp import stemmer, huqie
|
| 12 |
+
from ..utils import num_tokens_from_string
|
| 13 |
+
|
| 14 |
+
BULLET_PATTERN = [[
|
| 15 |
+
r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
|
| 16 |
+
r"第[零一二三四五六七八九十百0-9]+章",
|
| 17 |
+
r"第[零一二三四五六七八九十百0-9]+节",
|
| 18 |
+
r"第[零一二三四五六七八九十百0-9]+条",
|
| 19 |
+
r"[\((][零一二三四五六七八九十百]+[\))]",
|
| 20 |
+
], [
|
| 21 |
+
r"第[0-9]+章",
|
| 22 |
+
r"第[0-9]+节",
|
| 23 |
+
r"[0-9]{,3}[\. 、]",
|
| 24 |
+
r"[0-9]{,2}\.[0-9]{,2}",
|
| 25 |
+
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
| 26 |
+
r"[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}\.[0-9]{,2}",
|
| 27 |
+
], [
|
| 28 |
+
r"第[零一二三四五六七八九十百0-9]+章",
|
| 29 |
+
r"第[零一二三四五六七八九十百0-9]+节",
|
| 30 |
+
r"[零一二三四五六七八九十百]+[ 、]",
|
| 31 |
+
r"[\((][零一二三四五六七八九十百]+[\))]",
|
| 32 |
+
r"[\((][0-9]{,2}[\))]",
|
| 33 |
+
], [
|
| 34 |
+
r"PART (ONE|TWO|THREE|FOUR|FIVE|SIX|SEVEN|EIGHT|NINE|TEN)",
|
| 35 |
+
r"Chapter (I+V?|VI*|XI|IX|X)",
|
| 36 |
+
r"Section [0-9]+",
|
| 37 |
+
r"Article [0-9]+"
|
| 38 |
+
]
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def bullets_category(sections):
|
| 43 |
+
global BULLET_PATTERN
|
| 44 |
+
hits = [0] * len(BULLET_PATTERN)
|
| 45 |
+
for i, pro in enumerate(BULLET_PATTERN):
|
| 46 |
+
for sec in sections:
|
| 47 |
+
for p in pro:
|
| 48 |
+
if re.match(p, sec):
|
| 49 |
+
hits[i] += 1
|
| 50 |
+
break
|
| 51 |
+
maxium = 0
|
| 52 |
+
res = -1
|
| 53 |
+
for i, h in enumerate(hits):
|
| 54 |
+
if h <= maxium: continue
|
| 55 |
+
res = i
|
| 56 |
+
maxium = h
|
| 57 |
+
return res
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def is_english(texts):
|
| 61 |
+
eng = 0
|
| 62 |
+
for t in texts:
|
| 63 |
+
if re.match(r"[a-zA-Z]{2,}", t.strip()):
|
| 64 |
+
eng += 1
|
| 65 |
+
if eng / len(texts) > 0.8:
|
| 66 |
+
return True
|
| 67 |
+
return False
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def tokenize(d, t, eng):
|
| 71 |
+
d["content_with_weight"] = t
|
| 72 |
+
if eng:
|
| 73 |
+
t = re.sub(r"([a-z])-([a-z])", r"\1\2", t)
|
| 74 |
+
d["content_ltks"] = " ".join([stemmer.stem(w) for w in word_tokenize(t)])
|
| 75 |
+
else:
|
| 76 |
+
d["content_ltks"] = huqie.qie(t)
|
| 77 |
+
d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def remove_contents_table(sections, eng=False):
|
| 81 |
+
i = 0
|
| 82 |
+
while i < len(sections):
|
| 83 |
+
def get(i):
|
| 84 |
+
nonlocal sections
|
| 85 |
+
return (sections[i] if type(sections[i]) == type("") else sections[i][0]).strip()
|
| 86 |
+
|
| 87 |
+
if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
|
| 88 |
+
re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
|
| 89 |
+
i += 1
|
| 90 |
+
continue
|
| 91 |
+
sections.pop(i)
|
| 92 |
+
if i >= len(sections): break
|
| 93 |
+
prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
|
| 94 |
+
while not prefix:
|
| 95 |
+
sections.pop(i)
|
| 96 |
+
if i >= len(sections): break
|
| 97 |
+
prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
|
| 98 |
+
sections.pop(i)
|
| 99 |
+
if i >= len(sections) or not prefix: break
|
| 100 |
+
for j in range(i, min(i + 128, len(sections))):
|
| 101 |
+
if not re.match(prefix, get(j)):
|
| 102 |
+
continue
|
| 103 |
+
for _ in range(i, j): sections.pop(i)
|
| 104 |
+
break
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def make_colon_as_title(sections):
|
| 108 |
+
if not sections: return []
|
| 109 |
+
if type(sections[0]) == type(""): return sections
|
| 110 |
+
i = 0
|
| 111 |
+
while i < len(sections):
|
| 112 |
+
txt, layout = sections[i]
|
| 113 |
+
i += 1
|
| 114 |
+
txt = txt.split("@")[0].strip()
|
| 115 |
+
if not txt:
|
| 116 |
+
continue
|
| 117 |
+
if txt[-1] not in "::":
|
| 118 |
+
continue
|
| 119 |
+
txt = txt[::-1]
|
| 120 |
+
arr = re.split(r"([。?!!?;;]| .)", txt)
|
| 121 |
+
if len(arr) < 2 or len(arr[1]) < 32:
|
| 122 |
+
continue
|
| 123 |
+
sections.insert(i - 1, (arr[0][::-1], "title"))
|
| 124 |
+
i += 1
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def hierarchical_merge(bull, sections, depth):
|
| 128 |
+
if not sections or bull < 0: return []
|
| 129 |
+
if type(sections[0]) == type(""): sections = [(s, "") for s in sections]
|
| 130 |
+
sections = [(t,o) for t, o in sections if t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
|
| 131 |
+
bullets_size = len(BULLET_PATTERN[bull])
|
| 132 |
+
levels = [[] for _ in range(bullets_size + 2)]
|
| 133 |
+
|
| 134 |
+
def not_title(txt):
|
| 135 |
+
if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt): return False
|
| 136 |
+
if len(txt) >= 128: return True
|
| 137 |
+
return re.search(r"[,;,。;!!]", txt)
|
| 138 |
+
|
| 139 |
+
for i, (txt, layout) in enumerate(sections):
|
| 140 |
+
for j, p in enumerate(BULLET_PATTERN[bull]):
|
| 141 |
+
if re.match(p, txt.strip()) and not not_title(txt):
|
| 142 |
+
levels[j].append(i)
|
| 143 |
+
break
|
| 144 |
+
else:
|
| 145 |
+
if re.search(r"(title|head)", layout):
|
| 146 |
+
levels[bullets_size].append(i)
|
| 147 |
+
else:
|
| 148 |
+
levels[bullets_size + 1].append(i)
|
| 149 |
+
sections = [t for t, _ in sections]
|
| 150 |
+
for s in sections: print("--", s)
|
| 151 |
+
|
| 152 |
+
def binary_search(arr, target):
|
| 153 |
+
if not arr: return -1
|
| 154 |
+
if target > arr[-1]: return len(arr) - 1
|
| 155 |
+
if target < arr[0]: return -1
|
| 156 |
+
s, e = 0, len(arr)
|
| 157 |
+
while e - s > 1:
|
| 158 |
+
i = (e + s) // 2
|
| 159 |
+
if target > arr[i]:
|
| 160 |
+
s = i
|
| 161 |
+
continue
|
| 162 |
+
elif target < arr[i]:
|
| 163 |
+
e = i
|
| 164 |
+
continue
|
| 165 |
+
else:
|
| 166 |
+
assert False
|
| 167 |
+
return s
|
| 168 |
+
|
| 169 |
+
cks = []
|
| 170 |
+
readed = [False] * len(sections)
|
| 171 |
+
levels = levels[::-1]
|
| 172 |
+
for i, arr in enumerate(levels[:depth]):
|
| 173 |
+
for j in arr:
|
| 174 |
+
if readed[j]: continue
|
| 175 |
+
readed[j] = True
|
| 176 |
+
cks.append([j])
|
| 177 |
+
if i + 1 == len(levels) - 1: continue
|
| 178 |
+
for ii in range(i + 1, len(levels)):
|
| 179 |
+
jj = binary_search(levels[ii], j)
|
| 180 |
+
if jj < 0: continue
|
| 181 |
+
if jj > cks[-1][-1]: cks[-1].pop(-1)
|
| 182 |
+
cks[-1].append(levels[ii][jj])
|
| 183 |
+
for ii in cks[-1]: readed[ii] = True
|
| 184 |
+
for i in range(len(cks)):
|
| 185 |
+
cks[i] = [sections[j] for j in cks[i][::-1]]
|
| 186 |
+
print("--------------\n", "\n* ".join(cks[i]))
|
| 187 |
+
|
| 188 |
+
return cks
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
|
| 192 |
+
if not sections: return []
|
| 193 |
+
if type(sections[0]) == type(""): sections = [(s, "") for s in sections]
|
| 194 |
+
cks = [""]
|
| 195 |
+
tk_nums = [0]
|
| 196 |
+
def add_chunk(t, pos):
|
| 197 |
+
nonlocal cks, tk_nums, delimiter
|
| 198 |
+
tnum = num_tokens_from_string(t)
|
| 199 |
+
if tnum < 8: pos = ""
|
| 200 |
+
if tk_nums[-1] > chunk_token_num:
|
| 201 |
+
cks.append(t + pos)
|
| 202 |
+
tk_nums.append(tnum)
|
| 203 |
+
else:
|
| 204 |
+
cks[-1] += t + pos
|
| 205 |
+
tk_nums[-1] += tnum
|
| 206 |
+
|
| 207 |
+
for sec, pos in sections:
|
| 208 |
+
s, e = 0, 1
|
| 209 |
+
while e < len(sec):
|
| 210 |
+
if sec[e] in delimiter:
|
| 211 |
+
add_chunk(sec[s: e+1], pos)
|
| 212 |
+
s = e + 1
|
| 213 |
+
e = s + 1
|
| 214 |
+
else:
|
| 215 |
+
e += 1
|
| 216 |
+
if s < e: add_chunk(sec[s: e], pos)
|
| 217 |
+
|
| 218 |
+
return cks
|
| 219 |
+
|
| 220 |
+
|
rag/parser/docx_parser.py
CHANGED
|
@@ -98,8 +98,19 @@ class HuDocxParser:
|
|
| 98 |
return lines
|
| 99 |
return ["\n".join(lines)]
|
| 100 |
|
| 101 |
-
def __call__(self, fnm):
|
| 102 |
self.doc = Document(fnm) if isinstance(fnm, str) else Document(BytesIO(fnm))
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
tbls = [self.__extract_table_content(tb) for tb in self.doc.tables]
|
| 105 |
return secs, tbls
|
|
|
|
| 98 |
return lines
|
| 99 |
return ["\n".join(lines)]
|
| 100 |
|
| 101 |
+
def __call__(self, fnm, from_page=0, to_page=100000):
|
| 102 |
self.doc = Document(fnm) if isinstance(fnm, str) else Document(BytesIO(fnm))
|
| 103 |
+
pn = 0
|
| 104 |
+
secs = []
|
| 105 |
+
for p in self.doc.paragraphs:
|
| 106 |
+
if pn > to_page: break
|
| 107 |
+
if from_page <= pn < to_page and p.text.strip(): secs.append((p.text, p.style.name))
|
| 108 |
+
for run in p.runs:
|
| 109 |
+
if 'lastRenderedPageBreak' in run._element.xml:
|
| 110 |
+
pn += 1
|
| 111 |
+
continue
|
| 112 |
+
if 'w:br' in run._element.xml and 'type="page"' in run._element.xml:
|
| 113 |
+
pn += 1
|
| 114 |
+
|
| 115 |
tbls = [self.__extract_table_content(tb) for tb in self.doc.tables]
|
| 116 |
return secs, tbls
|
rag/parser/pdf_parser.py
CHANGED
|
@@ -650,6 +650,41 @@ class HuParser:
|
|
| 650 |
i += 1
|
| 651 |
self.boxes = bxs
|
| 652 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 653 |
def _concat_downward(self, concat_between_pages=True):
|
| 654 |
# count boxes in the same row as a feature
|
| 655 |
for i in range(len(self.boxes)):
|
|
@@ -761,11 +796,13 @@ class HuParser:
|
|
| 761 |
def _filter_forpages(self):
|
| 762 |
if not self.boxes:
|
| 763 |
return
|
|
|
|
| 764 |
i = 0
|
| 765 |
while i < len(self.boxes):
|
| 766 |
if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$", re.sub(r"( | |\u3000)+", "", self.boxes[i]["text"].lower())):
|
| 767 |
i += 1
|
| 768 |
continue
|
|
|
|
| 769 |
eng = re.match(r"[0-9a-zA-Z :'.-]{5,}", self.boxes[i]["text"].strip())
|
| 770 |
self.boxes.pop(i)
|
| 771 |
if i >= len(self.boxes): break
|
|
@@ -781,14 +818,36 @@ class HuParser:
|
|
| 781 |
continue
|
| 782 |
for k in range(i, j): self.boxes.pop(i)
|
| 783 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 784 |
|
| 785 |
def _merge_with_same_bullet(self):
|
| 786 |
i = 0
|
| 787 |
while i + 1 < len(self.boxes):
|
| 788 |
b = self.boxes[i]
|
| 789 |
b_ = self.boxes[i + 1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 790 |
if b["text"].strip()[0] != b_["text"].strip()[0] \
|
| 791 |
or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm") \
|
|
|
|
| 792 |
or b["top"] > b_["bottom"]:
|
| 793 |
i += 1
|
| 794 |
continue
|
|
@@ -1596,8 +1655,7 @@ class HuParser:
|
|
| 1596 |
self.pdf = pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm))
|
| 1597 |
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
| 1598 |
enumerate(self.pdf.pages[page_from:page_to])]
|
| 1599 |
-
self.page_chars = [[c for c in
|
| 1600 |
-
range(len(self.page_images))]
|
| 1601 |
self.total_page = len(self.pdf.pages)
|
| 1602 |
except Exception as e:
|
| 1603 |
self.pdf = fitz.open(fnm) if isinstance(fnm, str) else fitz.open(stream=fnm, filetype="pdf")
|
|
@@ -1605,15 +1663,17 @@ class HuParser:
|
|
| 1605 |
self.page_chars = []
|
| 1606 |
mat = fitz.Matrix(zoomin, zoomin)
|
| 1607 |
self.total_page = len(self.pdf)
|
| 1608 |
-
for page in self.pdf
|
| 1609 |
-
|
|
|
|
|
|
|
| 1610 |
img = Image.frombytes("RGB", [pix.width, pix.height],
|
| 1611 |
pix.samples)
|
| 1612 |
self.page_images.append(img)
|
| 1613 |
self.page_chars.append([])
|
| 1614 |
|
| 1615 |
logging.info("Images converted.")
|
| 1616 |
-
self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(random.choices([c["text"] for c in self.page_chars[i]], k=100))) for i in range(len(self.page_chars))]
|
| 1617 |
if sum([1 if e else 0 for e in self.is_english]) > len(self.page_images) / 2:
|
| 1618 |
self.is_english = True
|
| 1619 |
else:
|
|
@@ -1644,8 +1704,8 @@ class HuParser:
|
|
| 1644 |
# np.max([c["bottom"] for c in chars]))
|
| 1645 |
self.__ocr_paddle(i + 1, img, chars, zoomin)
|
| 1646 |
|
| 1647 |
-
if not self.is_english and not
|
| 1648 |
-
self.is_english = re.search(r"[\na-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join([b["text"] for b in random.choices(self.boxes, k=30)]))
|
| 1649 |
|
| 1650 |
logging.info("Is it English:", self.is_english)
|
| 1651 |
|
|
|
|
| 650 |
i += 1
|
| 651 |
self.boxes = bxs
|
| 652 |
|
| 653 |
+
def _naive_vertical_merge(self):
|
| 654 |
+
bxs = self.sort_Y_firstly(self.boxes, np.median(self.mean_height) / 3)
|
| 655 |
+
i = 0
|
| 656 |
+
while i + 1 < len(bxs):
|
| 657 |
+
b = bxs[i]
|
| 658 |
+
b_ = bxs[i + 1]
|
| 659 |
+
if b["page_number"] < b_["page_number"] and re.match(r"[0-9 •一—-]+$", b["text"]):
|
| 660 |
+
bxs.pop(i)
|
| 661 |
+
continue
|
| 662 |
+
concatting_feats = [
|
| 663 |
+
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
|
| 664 |
+
len(b["text"].strip()) > 1 and b["text"].strip()[-2] in ",;:'\",‘“、;:",
|
| 665 |
+
b["text"].strip()[0] in "。;?!?”)),,、:",
|
| 666 |
+
]
|
| 667 |
+
# features for not concating
|
| 668 |
+
feats = [
|
| 669 |
+
b.get("layoutno", 0) != b.get("layoutno", 0),
|
| 670 |
+
b["text"].strip()[-1] in "。?!?",
|
| 671 |
+
self.is_english and b["text"].strip()[-1] in ".!?",
|
| 672 |
+
b["page_number"] == b_["page_number"] and b_["top"] - \
|
| 673 |
+
b["bottom"] > self.mean_height[b["page_number"] - 1] * 1.5,
|
| 674 |
+
b["page_number"] < b_["page_number"] and abs(
|
| 675 |
+
b["x0"] - b_["x0"]) > self.mean_width[b["page_number"] - 1] * 4
|
| 676 |
+
]
|
| 677 |
+
if any(feats) and not any(concatting_feats):
|
| 678 |
+
i += 1
|
| 679 |
+
continue
|
| 680 |
+
# merge up and down
|
| 681 |
+
b["bottom"] = b_["bottom"]
|
| 682 |
+
b["text"] += b_["text"]
|
| 683 |
+
b["x0"] = min(b["x0"], b_["x0"])
|
| 684 |
+
b["x1"] = max(b["x1"], b_["x1"])
|
| 685 |
+
bxs.pop(i + 1)
|
| 686 |
+
self.boxes = bxs
|
| 687 |
+
|
| 688 |
def _concat_downward(self, concat_between_pages=True):
|
| 689 |
# count boxes in the same row as a feature
|
| 690 |
for i in range(len(self.boxes)):
|
|
|
|
| 796 |
def _filter_forpages(self):
|
| 797 |
if not self.boxes:
|
| 798 |
return
|
| 799 |
+
findit = False
|
| 800 |
i = 0
|
| 801 |
while i < len(self.boxes):
|
| 802 |
if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$", re.sub(r"( | |\u3000)+", "", self.boxes[i]["text"].lower())):
|
| 803 |
i += 1
|
| 804 |
continue
|
| 805 |
+
findit = True
|
| 806 |
eng = re.match(r"[0-9a-zA-Z :'.-]{5,}", self.boxes[i]["text"].strip())
|
| 807 |
self.boxes.pop(i)
|
| 808 |
if i >= len(self.boxes): break
|
|
|
|
| 818 |
continue
|
| 819 |
for k in range(i, j): self.boxes.pop(i)
|
| 820 |
break
|
| 821 |
+
if findit:return
|
| 822 |
+
|
| 823 |
+
page_dirty = [0] * len(self.page_images)
|
| 824 |
+
for b in self.boxes:
|
| 825 |
+
if re.search(r"(··|··|··)", b["text"]):
|
| 826 |
+
page_dirty[b["page_number"]-1] += 1
|
| 827 |
+
page_dirty = set([i+1 for i, t in enumerate(page_dirty) if t > 3])
|
| 828 |
+
if not page_dirty: return
|
| 829 |
+
i = 0
|
| 830 |
+
while i < len(self.boxes):
|
| 831 |
+
if self.boxes[i]["page_number"] in page_dirty:
|
| 832 |
+
self.boxes.pop(i)
|
| 833 |
+
continue
|
| 834 |
+
i += 1
|
| 835 |
|
| 836 |
def _merge_with_same_bullet(self):
|
| 837 |
i = 0
|
| 838 |
while i + 1 < len(self.boxes):
|
| 839 |
b = self.boxes[i]
|
| 840 |
b_ = self.boxes[i + 1]
|
| 841 |
+
if not b["text"].strip():
|
| 842 |
+
self.boxes.pop(i)
|
| 843 |
+
continue
|
| 844 |
+
if not b_["text"].strip():
|
| 845 |
+
self.boxes.pop(i+1)
|
| 846 |
+
continue
|
| 847 |
+
|
| 848 |
if b["text"].strip()[0] != b_["text"].strip()[0] \
|
| 849 |
or b["text"].strip()[0].lower() in set("qwertyuopasdfghjklzxcvbnm") \
|
| 850 |
+
or huqie.is_chinese(b["text"].strip()[0]) \
|
| 851 |
or b["top"] > b_["bottom"]:
|
| 852 |
i += 1
|
| 853 |
continue
|
|
|
|
| 1655 |
self.pdf = pdfplumber.open(fnm) if isinstance(fnm, str) else pdfplumber.open(BytesIO(fnm))
|
| 1656 |
self.page_images = [p.to_image(resolution=72 * zoomin).annotated for i, p in
|
| 1657 |
enumerate(self.pdf.pages[page_from:page_to])]
|
| 1658 |
+
self.page_chars = [[c for c in page.chars if self._has_color(c)] for page in self.pdf.pages[page_from:page_to]]
|
|
|
|
| 1659 |
self.total_page = len(self.pdf.pages)
|
| 1660 |
except Exception as e:
|
| 1661 |
self.pdf = fitz.open(fnm) if isinstance(fnm, str) else fitz.open(stream=fnm, filetype="pdf")
|
|
|
|
| 1663 |
self.page_chars = []
|
| 1664 |
mat = fitz.Matrix(zoomin, zoomin)
|
| 1665 |
self.total_page = len(self.pdf)
|
| 1666 |
+
for i, page in enumerate(self.pdf):
|
| 1667 |
+
if i < page_from:continue
|
| 1668 |
+
if i >= page_to:break
|
| 1669 |
+
pix = page.get_pixmap(matrix=mat)
|
| 1670 |
img = Image.frombytes("RGB", [pix.width, pix.height],
|
| 1671 |
pix.samples)
|
| 1672 |
self.page_images.append(img)
|
| 1673 |
self.page_chars.append([])
|
| 1674 |
|
| 1675 |
logging.info("Images converted.")
|
| 1676 |
+
self.is_english = [re.search(r"[a-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join(random.choices([c["text"] for c in self.page_chars[i]], k=min(100, len(self.page_chars[i]))))) for i in range(len(self.page_chars))]
|
| 1677 |
if sum([1 if e else 0 for e in self.is_english]) > len(self.page_images) / 2:
|
| 1678 |
self.is_english = True
|
| 1679 |
else:
|
|
|
|
| 1704 |
# np.max([c["bottom"] for c in chars]))
|
| 1705 |
self.__ocr_paddle(i + 1, img, chars, zoomin)
|
| 1706 |
|
| 1707 |
+
if not self.is_english and not any([c for c in self.page_chars]) and self.boxes:
|
| 1708 |
+
self.is_english = re.search(r"[\na-zA-Z0-9,/¸;:'\[\]\(\)!@#$%^&*\"?<>._-]{30,}", "".join([b["text"] for b in random.choices([b for bxs in self.boxes for b in bxs], k=30)]))
|
| 1709 |
|
| 1710 |
logging.info("Is it English:", self.is_english)
|
| 1711 |
|