Add support for HTML file (#973)
Browse files### What problem does this PR solve?
Add support for HTML file
### Type of change
- [x] New Feature (non-breaking change which adds functionality)
- api/utils/file_utils.py +1 -1
- deepdoc/parser/__init__.py +1 -0
- deepdoc/parser/html_parser.py +27 -0
- rag/app/book.py +9 -1
- rag/app/laws.py +7 -1
- rag/app/naive.py +7 -1
- rag/app/one.py +7 -1
- requirements.txt +2 -0
- requirements_arm.txt +3 -1
- requirements_dev.txt +2 -0
api/utils/file_utils.py
CHANGED
|
@@ -156,7 +156,7 @@ def filename_type(filename):
|
|
| 156 |
return FileType.PDF.value
|
| 157 |
|
| 158 |
if re.match(
|
| 159 |
-
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt)$", filename):
|
| 160 |
return FileType.DOC.value
|
| 161 |
|
| 162 |
if re.match(
|
|
|
|
| 156 |
return FileType.PDF.value
|
| 157 |
|
| 158 |
if re.match(
|
| 159 |
+
r".*\.(doc|docx|ppt|pptx|yml|xml|htm|json|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html)$", filename):
|
| 160 |
return FileType.DOC.value
|
| 161 |
|
| 162 |
if re.match(
|
deepdoc/parser/__init__.py
CHANGED
|
@@ -4,3 +4,4 @@ from .pdf_parser import RAGFlowPdfParser as PdfParser, PlainParser
|
|
| 4 |
from .docx_parser import RAGFlowDocxParser as DocxParser
|
| 5 |
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
| 6 |
from .ppt_parser import RAGFlowPptParser as PptParser
|
|
|
|
|
|
| 4 |
from .docx_parser import RAGFlowDocxParser as DocxParser
|
| 5 |
from .excel_parser import RAGFlowExcelParser as ExcelParser
|
| 6 |
from .ppt_parser import RAGFlowPptParser as PptParser
|
| 7 |
+
from .html_parser import RAGFlowHtmlParser as HtmlParser
|
deepdoc/parser/html_parser.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
from rag.nlp import find_codec
|
| 3 |
+
import readability
|
| 4 |
+
import html_text
|
| 5 |
+
import chardet
|
| 6 |
+
|
| 7 |
+
def get_encoding(file):
|
| 8 |
+
with open(file,'rb') as f:
|
| 9 |
+
tmp = chardet.detect(f.read())
|
| 10 |
+
return tmp['encoding']
|
| 11 |
+
|
| 12 |
+
class RAGFlowHtmlParser:
|
| 13 |
+
def __call__(self, fnm, binary=None):
|
| 14 |
+
txt = ""
|
| 15 |
+
if binary:
|
| 16 |
+
encoding = find_codec(binary)
|
| 17 |
+
txt = binary.decode(encoding, errors="ignore")
|
| 18 |
+
else:
|
| 19 |
+
with open(fnm, "r",encoding=get_encoding(fnm)) as f:
|
| 20 |
+
txt = f.read()
|
| 21 |
+
|
| 22 |
+
html_doc = readability.Document(txt)
|
| 23 |
+
title = html_doc.title()
|
| 24 |
+
content = html_text.extract_text(html_doc.summary(html_partial=True))
|
| 25 |
+
txt = f'{title}\n{content}'
|
| 26 |
+
sections = txt.split("\n")
|
| 27 |
+
return sections
|
rag/app/book.py
CHANGED
|
@@ -19,7 +19,7 @@ from rag.nlp import bullets_category, is_english, tokenize, remove_contents_tabl
|
|
| 19 |
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
|
| 20 |
tokenize_chunks, find_codec
|
| 21 |
from rag.nlp import rag_tokenizer
|
| 22 |
-
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
| 23 |
|
| 24 |
|
| 25 |
class Pdf(PdfParser):
|
|
@@ -105,6 +105,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
| 105 |
random_choices([t for t, _ in sections], k=200)))
|
| 106 |
callback(0.8, "Finish parsing.")
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 109 |
callback(0.1, "Start to parse.")
|
| 110 |
binary = BytesIO(binary)
|
|
|
|
| 19 |
hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, add_positions, \
|
| 20 |
tokenize_chunks, find_codec
|
| 21 |
from rag.nlp import rag_tokenizer
|
| 22 |
+
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
| 23 |
|
| 24 |
|
| 25 |
class Pdf(PdfParser):
|
|
|
|
| 105 |
random_choices([t for t, _ in sections], k=200)))
|
| 106 |
callback(0.8, "Finish parsing.")
|
| 107 |
|
| 108 |
+
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
| 109 |
+
callback(0.1, "Start to parse.")
|
| 110 |
+
sections = HtmlParser()(filename, binary)
|
| 111 |
+
sections = [(l, "") for l in sections if l]
|
| 112 |
+
remove_contents_table(sections, eng=is_english(
|
| 113 |
+
random_choices([t for t, _ in sections], k=200)))
|
| 114 |
+
callback(0.8, "Finish parsing.")
|
| 115 |
+
|
| 116 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 117 |
callback(0.1, "Start to parse.")
|
| 118 |
binary = BytesIO(binary)
|
rag/app/laws.py
CHANGED
|
@@ -20,7 +20,7 @@ from api.db import ParserType
|
|
| 20 |
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
| 21 |
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
| 22 |
from rag.nlp import rag_tokenizer
|
| 23 |
-
from deepdoc.parser import PdfParser, DocxParser, PlainParser
|
| 24 |
from rag.settings import cron_logger
|
| 25 |
|
| 26 |
|
|
@@ -125,6 +125,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
| 125 |
sections = [l for l in sections if l]
|
| 126 |
callback(0.8, "Finish parsing.")
|
| 127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 129 |
callback(0.1, "Start to parse.")
|
| 130 |
binary = BytesIO(binary)
|
|
|
|
| 20 |
from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, hierarchical_merge, \
|
| 21 |
make_colon_as_title, add_positions, tokenize_chunks, find_codec
|
| 22 |
from rag.nlp import rag_tokenizer
|
| 23 |
+
from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser
|
| 24 |
from rag.settings import cron_logger
|
| 25 |
|
| 26 |
|
|
|
|
| 125 |
sections = [l for l in sections if l]
|
| 126 |
callback(0.8, "Finish parsing.")
|
| 127 |
|
| 128 |
+
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
| 129 |
+
callback(0.1, "Start to parse.")
|
| 130 |
+
sections = HtmlParser()(filename, binary)
|
| 131 |
+
sections = [l for l in sections if l]
|
| 132 |
+
callback(0.8, "Finish parsing.")
|
| 133 |
+
|
| 134 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 135 |
callback(0.1, "Start to parse.")
|
| 136 |
binary = BytesIO(binary)
|
rag/app/naive.py
CHANGED
|
@@ -17,7 +17,7 @@ from timeit import default_timer as timer
|
|
| 17 |
import re
|
| 18 |
from deepdoc.parser.pdf_parser import PlainParser
|
| 19 |
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
| 20 |
-
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
|
| 21 |
from rag.settings import cron_logger
|
| 22 |
from rag.utils import num_tokens_from_string
|
| 23 |
|
|
@@ -161,6 +161,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
| 161 |
|
| 162 |
callback(0.8, "Finish parsing.")
|
| 163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 165 |
callback(0.1, "Start to parse.")
|
| 166 |
binary = BytesIO(binary)
|
|
|
|
| 17 |
import re
|
| 18 |
from deepdoc.parser.pdf_parser import PlainParser
|
| 19 |
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
|
| 20 |
+
from deepdoc.parser import PdfParser, ExcelParser, DocxParser, HtmlParser
|
| 21 |
from rag.settings import cron_logger
|
| 22 |
from rag.utils import num_tokens_from_string
|
| 23 |
|
|
|
|
| 161 |
|
| 162 |
callback(0.8, "Finish parsing.")
|
| 163 |
|
| 164 |
+
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
| 165 |
+
callback(0.1, "Start to parse.")
|
| 166 |
+
sections = HtmlParser()(filename, binary)
|
| 167 |
+
sections = [(l, "") for l in sections if l]
|
| 168 |
+
callback(0.8, "Finish parsing.")
|
| 169 |
+
|
| 170 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 171 |
callback(0.1, "Start to parse.")
|
| 172 |
binary = BytesIO(binary)
|
rag/app/one.py
CHANGED
|
@@ -15,7 +15,7 @@ from io import BytesIO
|
|
| 15 |
import re
|
| 16 |
from rag.app import laws
|
| 17 |
from rag.nlp import rag_tokenizer, tokenize, find_codec
|
| 18 |
-
from deepdoc.parser import PdfParser, ExcelParser, PlainParser
|
| 19 |
|
| 20 |
|
| 21 |
class Pdf(PdfParser):
|
|
@@ -97,6 +97,12 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
|
|
| 97 |
sections = [s for s in sections if s]
|
| 98 |
callback(0.8, "Finish parsing.")
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 101 |
callback(0.1, "Start to parse.")
|
| 102 |
binary = BytesIO(binary)
|
|
|
|
| 15 |
import re
|
| 16 |
from rag.app import laws
|
| 17 |
from rag.nlp import rag_tokenizer, tokenize, find_codec
|
| 18 |
+
from deepdoc.parser import PdfParser, ExcelParser, PlainParser, HtmlParser
|
| 19 |
|
| 20 |
|
| 21 |
class Pdf(PdfParser):
|
|
|
|
| 97 |
sections = [s for s in sections if s]
|
| 98 |
callback(0.8, "Finish parsing.")
|
| 99 |
|
| 100 |
+
elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE):
|
| 101 |
+
callback(0.1, "Start to parse.")
|
| 102 |
+
sections = HtmlParser()(filename, binary)
|
| 103 |
+
sections = [s for s in sections if s]
|
| 104 |
+
callback(0.8, "Finish parsing.")
|
| 105 |
+
|
| 106 |
elif re.search(r"\.doc$", filename, re.IGNORECASE):
|
| 107 |
callback(0.1, "Start to parse.")
|
| 108 |
binary = BytesIO(binary)
|
requirements.txt
CHANGED
|
@@ -137,3 +137,5 @@ loguru==0.7.2
|
|
| 137 |
umap-learn
|
| 138 |
fasttext==0.9.2
|
| 139 |
volcengine
|
|
|
|
|
|
|
|
|
| 137 |
umap-learn
|
| 138 |
fasttext==0.9.2
|
| 139 |
volcengine
|
| 140 |
+
readability-lxml==0.8.1
|
| 141 |
+
html_text==0.6.2
|
requirements_arm.txt
CHANGED
|
@@ -137,4 +137,6 @@ loguru==0.7.2
|
|
| 137 |
umap-learn
|
| 138 |
fasttext==0.9.2
|
| 139 |
volcengine
|
| 140 |
-
opencv-python-headless==4.9.0.80
|
|
|
|
|
|
|
|
|
| 137 |
umap-learn
|
| 138 |
fasttext==0.9.2
|
| 139 |
volcengine
|
| 140 |
+
opencv-python-headless==4.9.0.80
|
| 141 |
+
readability-lxml==0.8.1
|
| 142 |
+
html_text==0.6.2
|
requirements_dev.txt
CHANGED
|
@@ -125,3 +125,5 @@ redis==5.0.4
|
|
| 125 |
fasttext==0.9.2
|
| 126 |
umap-learn
|
| 127 |
volcengine
|
|
|
|
|
|
|
|
|
| 125 |
fasttext==0.9.2
|
| 126 |
umap-learn
|
| 127 |
volcengine
|
| 128 |
+
readability-lxml==0.8.1
|
| 129 |
+
html_text==0.6.2
|