KevinHuSh commited on
Commit
bcb7249
·
1 Parent(s): f422a06

solve task execution issues (#90)

Browse files
api/apps/document_app.py CHANGED
@@ -236,13 +236,16 @@ def run():
236
  try:
237
  for id in req["doc_ids"]:
238
  info = {"run": str(req["run"]), "progress": 0}
239
- if str(req["run"]) == TaskStatus.RUNNING.value:info["progress_msg"] = ""
 
 
 
240
  DocumentService.update_by_id(id, info)
241
- if str(req["run"]) == TaskStatus.CANCEL.value:
242
- tenant_id = DocumentService.get_tenant_id(id)
243
- if not tenant_id:
244
- return get_data_error_result(retmsg="Tenant not found!")
245
- ELASTICSEARCH.deleteByQuery(Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
246
 
247
  return get_json_result(data=True)
248
  except Exception as e:
@@ -311,13 +314,17 @@ def change_parser():
311
  if doc.type == FileType.VISUAL or re.search(r"\.(ppt|pptx|pages)$", doc.name):
312
  return get_data_error_result(retmsg="Not supported yet!")
313
 
314
- e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress":0, "progress_msg": ""})
315
  if not e:
316
  return get_data_error_result(retmsg="Document not found!")
317
  if doc.token_num>0:
318
  e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num*-1, doc.chunk_num*-1, doc.process_duation*-1)
319
  if not e:
320
  return get_data_error_result(retmsg="Document not found!")
 
 
 
 
321
 
322
  return get_json_result(data=True)
323
  except Exception as e:
 
236
  try:
237
  for id in req["doc_ids"]:
238
  info = {"run": str(req["run"]), "progress": 0}
239
+ if str(req["run"]) == TaskStatus.RUNNING.value:
240
+ info["progress_msg"] = ""
241
+ info["chunk_num"] = 0
242
+ info["token_num"] = 0
243
  DocumentService.update_by_id(id, info)
244
+ #if str(req["run"]) == TaskStatus.CANCEL.value:
245
+ tenant_id = DocumentService.get_tenant_id(id)
246
+ if not tenant_id:
247
+ return get_data_error_result(retmsg="Tenant not found!")
248
+ ELASTICSEARCH.deleteByQuery(Q("match", doc_id=id), idxnm=search.index_name(tenant_id))
249
 
250
  return get_json_result(data=True)
251
  except Exception as e:
 
314
  if doc.type == FileType.VISUAL or re.search(r"\.(ppt|pptx|pages)$", doc.name):
315
  return get_data_error_result(retmsg="Not supported yet!")
316
 
317
+ e = DocumentService.update_by_id(doc.id, {"parser_id": req["parser_id"], "progress":0, "progress_msg": "", "run": "0"})
318
  if not e:
319
  return get_data_error_result(retmsg="Document not found!")
320
  if doc.token_num>0:
321
  e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num*-1, doc.chunk_num*-1, doc.process_duation*-1)
322
  if not e:
323
  return get_data_error_result(retmsg="Document not found!")
324
+ tenant_id = DocumentService.get_tenant_id(req["doc_id"])
325
+ if not tenant_id:
326
+ return get_data_error_result(retmsg="Tenant not found!")
327
+ ELASTICSEARCH.deleteByQuery(Q("match", doc_id=doc.id), idxnm=search.index_name(tenant_id))
328
 
329
  return get_json_result(data=True)
330
  except Exception as e:
api/db/services/task_service.py CHANGED
@@ -65,7 +65,7 @@ class TaskService(CommonService):
65
  try:
66
  task = cls.model.get_by_id(id)
67
  _, doc = DocumentService.get_by_id(task.doc_id)
68
- return doc.run == TaskStatus.CANCEL.value
69
  except Exception as e:
70
  pass
71
  return True
 
65
  try:
66
  task = cls.model.get_by_id(id)
67
  _, doc = DocumentService.get_by_id(task.doc_id)
68
+ return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
69
  except Exception as e:
70
  pass
71
  return True
api/settings.py CHANGED
@@ -98,15 +98,6 @@ PROXY_PROTOCOL = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("protocol")
98
 
99
  DATABASE = decrypt_database_config(name="mysql")
100
 
101
- # Logger
102
- LoggerFactory.set_directory(os.path.join(get_project_base_directory(), "logs", "api"))
103
- # {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
104
- LoggerFactory.LEVEL = 10
105
-
106
- stat_logger = getLogger("stat")
107
- access_logger = getLogger("access")
108
- database_logger = getLogger("database")
109
-
110
  # Switch
111
  # upload
112
  UPLOAD_DATA_FROM_CLIENT = True
@@ -144,6 +135,15 @@ CHECK_NODES_IDENTITY = False
144
 
145
  retrievaler = search.Dealer(ELASTICSEARCH)
146
 
 
 
 
 
 
 
 
 
 
147
  class CustomEnum(Enum):
148
  @classmethod
149
  def valid(cls, value):
 
98
 
99
  DATABASE = decrypt_database_config(name="mysql")
100
 
 
 
 
 
 
 
 
 
 
101
  # Switch
102
  # upload
103
  UPLOAD_DATA_FROM_CLIENT = True
 
135
 
136
  retrievaler = search.Dealer(ELASTICSEARCH)
137
 
138
+ # Logger
139
+ LoggerFactory.set_directory(os.path.join(get_project_base_directory(), "logs", "api"))
140
+ # {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0}
141
+ LoggerFactory.LEVEL = 10
142
+
143
+ stat_logger = getLogger("stat")
144
+ access_logger = getLogger("access")
145
+ database_logger = getLogger("database")
146
+
147
  class CustomEnum(Enum):
148
  @classmethod
149
  def valid(cls, value):
deepdoc/parser/pdf_parser.py CHANGED
@@ -8,7 +8,7 @@ import torch
8
  import re
9
  import pdfplumber
10
  import logging
11
- from PIL import Image
12
  import numpy as np
13
 
14
  from api.db import ParserType
@@ -930,13 +930,25 @@ class HuParser:
930
 
931
  def crop(self, text, ZM=3):
932
  imgs = []
 
933
  for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", text):
934
  pn, left, right, top, bottom = tag.strip(
935
  "#").strip("@").split("\t")
936
  left, right, top, bottom = float(left), float(
937
  right), float(top), float(bottom)
 
 
 
 
 
 
 
 
 
 
 
 
938
  bottom *= ZM
939
- pns = [int(p) - 1 for p in pn.split("-")]
940
  for pn in pns[1:]:
941
  bottom += self.page_images[pn - 1].size[1]
942
  imgs.append(
@@ -959,16 +971,21 @@ class HuParser:
959
 
960
  if not imgs:
961
  return
962
- GAP = 2
963
  height = 0
964
  for img in imgs:
965
  height += img.size[1] + GAP
966
  height = int(height)
 
967
  pic = Image.new("RGB",
968
- (int(np.max([i.size[0] for i in imgs])), height),
969
  (245, 245, 245))
970
  height = 0
971
- for img in imgs:
 
 
 
 
 
972
  pic.paste(img, (0, int(height)))
973
  height += img.size[1] + GAP
974
  return pic
 
8
  import re
9
  import pdfplumber
10
  import logging
11
+ from PIL import Image, ImageDraw
12
  import numpy as np
13
 
14
  from api.db import ParserType
 
930
 
931
  def crop(self, text, ZM=3):
932
  imgs = []
933
+ poss = []
934
  for tag in re.findall(r"@@[0-9-]+\t[0-9.\t]+##", text):
935
  pn, left, right, top, bottom = tag.strip(
936
  "#").strip("@").split("\t")
937
  left, right, top, bottom = float(left), float(
938
  right), float(top), float(bottom)
939
+ poss.append(([int(p) - 1 for p in pn.split("-")], left, right, top, bottom))
940
+ if not poss: return
941
+
942
+ max_width = np.max([right-left for (_, left, right, _, _) in poss])
943
+ GAP = 6
944
+ pos = poss[0]
945
+ poss.insert(0, ([pos[0][0]], pos[1], pos[2], max(0, pos[3]-120), max(pos[3]-GAP, 0)))
946
+ pos = poss[-1]
947
+ poss.append(([pos[0][-1]], pos[1], pos[2], min(self.page_images[pos[0][-1]].size[1]/ZM, pos[4]+GAP), min(self.page_images[pos[0][-1]].size[1]/ZM, pos[4]+120)))
948
+
949
+ for ii, (pns, left, right, top, bottom) in enumerate(poss):
950
+ right = left + max_width
951
  bottom *= ZM
 
952
  for pn in pns[1:]:
953
  bottom += self.page_images[pn - 1].size[1]
954
  imgs.append(
 
971
 
972
  if not imgs:
973
  return
 
974
  height = 0
975
  for img in imgs:
976
  height += img.size[1] + GAP
977
  height = int(height)
978
+ width = int(np.max([i.size[0] for i in imgs]))
979
  pic = Image.new("RGB",
980
+ (width, height),
981
  (245, 245, 245))
982
  height = 0
983
+ for ii, img in enumerate(imgs):
984
+ if ii == 0 or ii + 1 == len(imgs):
985
+ img = img.convert('RGBA')
986
+ overlay = Image.new('RGBA', img.size, (0, 0, 0, 0))
987
+ overlay.putalpha(128)
988
+ img = Image.alpha_composite(img, overlay).convert("RGB")
989
  pic.paste(img, (0, int(height)))
990
  height += img.size[1] + GAP
991
  return pic
deepdoc/vision/layout_recognizer.py CHANGED
@@ -34,7 +34,7 @@ class LayoutRecognizer(Recognizer):
34
  "Equation",
35
  ]
36
  def __init__(self, domain):
37
- super().__init__(self.labels, domain) #, os.path.join(get_project_base_directory(), "rag/res/deepdoc/"))
38
 
39
  def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16):
40
  def __is_garbage(b):
 
34
  "Equation",
35
  ]
36
  def __init__(self, domain):
37
+ super().__init__(self.labels, domain, os.path.join(get_project_base_directory(), "rag/res/deepdoc/"))
38
 
39
  def __call__(self, image_list, ocr_res, scale_factor=3, thr=0.2, batch_size=16):
40
  def __is_garbage(b):
deepdoc/vision/table_structure_recognizer.py CHANGED
@@ -33,7 +33,7 @@ class TableStructureRecognizer(Recognizer):
33
  ]
34
 
35
  def __init__(self):
36
- super().__init__(self.labels, "tsr")#,os.path.join(get_project_base_directory(), "rag/res/deepdoc/"))
37
 
38
  def __call__(self, images, thr=0.2):
39
  tbls = super().__call__(images, thr)
 
33
  ]
34
 
35
  def __init__(self):
36
+ super().__init__(self.labels, "tsr",os.path.join(get_project_base_directory(), "rag/res/deepdoc/"))
37
 
38
  def __call__(self, images, thr=0.2):
39
  tbls = super().__call__(images, thr)
rag/app/book.py CHANGED
@@ -13,7 +13,7 @@
13
  import copy
14
  import re
15
  from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
16
- hierarchical_merge, make_colon_as_title, naive_merge, random_choices
17
  from rag.nlp import huqie
18
  from deepdoc.parser import PdfParser, DocxParser
19
 
@@ -90,25 +90,16 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
90
  make_colon_as_title(sections)
91
  bull = bullets_category([t for t in random_choices([t for t,_ in sections], k=100)])
92
  if bull >= 0: cks = hierarchical_merge(bull, sections, 3)
93
- else: cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
 
 
 
94
 
95
- sections = [t for t, _ in sections]
96
  # is it English
97
- eng = lang.lower() == "english"#is_english(random_choices(sections, k=218))
 
 
98
 
99
- res = []
100
- # add tables
101
- for img, rows in tbls:
102
- bs = 10
103
- de = ";" if eng else ";"
104
- for i in range(0, len(rows), bs):
105
- d = copy.deepcopy(doc)
106
- r = de.join(rows[i:i + bs])
107
- r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
108
- tokenize(d, r, eng)
109
- d["image"] = img
110
- res.append(d)
111
- print("TABLE", d["content_with_weight"])
112
  # wrap up to es documents
113
  for ck in cks:
114
  d = copy.deepcopy(doc)
 
13
  import copy
14
  import re
15
  from rag.nlp import bullets_category, is_english, tokenize, remove_contents_table, \
16
+ hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table
17
  from rag.nlp import huqie
18
  from deepdoc.parser import PdfParser, DocxParser
19
 
 
90
  make_colon_as_title(sections)
91
  bull = bullets_category([t for t in random_choices([t for t,_ in sections], k=100)])
92
  if bull >= 0: cks = hierarchical_merge(bull, sections, 3)
93
+ else:
94
+ sections = [s.split("@") for s in sections]
95
+ sections = [(pr[0], "@"+pr[1]) for pr in sections if len(pr)==2]
96
+ cks = naive_merge(sections, kwargs.get("chunk_token_num", 256), kwargs.get("delimer", "\n。;!?"))
97
 
 
98
  # is it English
99
+ eng = lang.lower() == "english"#is_english(random_choices([t for t, _ in sections], k=218))
100
+
101
+ res = tokenize_table(tbls, doc, eng)
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  # wrap up to es documents
104
  for ck in cks:
105
  d = copy.deepcopy(doc)
rag/app/manual.py CHANGED
@@ -2,7 +2,7 @@ import copy
2
  import re
3
 
4
  from api.db import ParserType
5
- from rag.nlp import huqie, tokenize
6
  from deepdoc.parser import PdfParser
7
  from rag.utils import num_tokens_from_string
8
 
@@ -81,18 +81,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
81
  # is it English
82
  eng = lang.lower() == "english"#pdf_parser.is_english
83
 
84
- res = []
85
- # add tables
86
- for img, rows in tbls:
87
- bs = 10
88
- de = ";" if eng else ";"
89
- for i in range(0, len(rows), bs):
90
- d = copy.deepcopy(doc)
91
- r = de.join(rows[i:i + bs])
92
- r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
93
- tokenize(d, r, eng)
94
- d["image"] = img
95
- res.append(d)
96
 
97
  i = 0
98
  chunk = []
 
2
  import re
3
 
4
  from api.db import ParserType
5
+ from rag.nlp import huqie, tokenize, tokenize_table
6
  from deepdoc.parser import PdfParser
7
  from rag.utils import num_tokens_from_string
8
 
 
81
  # is it English
82
  eng = lang.lower() == "english"#pdf_parser.is_english
83
 
84
+ res = tokenize_table(tbls, doc, eng)
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  i = 0
87
  chunk = []
rag/app/naive.py CHANGED
@@ -13,7 +13,7 @@
13
  import copy
14
  import re
15
  from rag.app import laws
16
- from rag.nlp import huqie, is_english, tokenize, naive_merge
17
  from deepdoc.parser import PdfParser
18
  from rag.settings import cron_logger
19
 
@@ -72,17 +72,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
72
  pdf_parser = Pdf()
73
  sections, tbls = pdf_parser(filename if not binary else binary,
74
  from_page=from_page, to_page=to_page, callback=callback)
75
- # add tables
76
- for img, rows in tbls:
77
- bs = 10
78
- de = ";" if eng else ";"
79
- for i in range(0, len(rows), bs):
80
- d = copy.deepcopy(doc)
81
- r = de.join(rows[i:i + bs])
82
- r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
83
- tokenize(d, r, eng)
84
- d["image"] = img
85
- res.append(d)
86
  elif re.search(r"\.txt$", filename, re.IGNORECASE):
87
  callback(0.1, "Start to parse.")
88
  txt = ""
@@ -106,6 +96,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
106
  # wrap up to es documents
107
  for ck in cks:
108
  print("--", ck)
 
109
  d = copy.deepcopy(doc)
110
  if pdf_parser:
111
  d["image"] = pdf_parser.crop(ck)
 
13
  import copy
14
  import re
15
  from rag.app import laws
16
+ from rag.nlp import huqie, is_english, tokenize, naive_merge, tokenize_table
17
  from deepdoc.parser import PdfParser
18
  from rag.settings import cron_logger
19
 
 
72
  pdf_parser = Pdf()
73
  sections, tbls = pdf_parser(filename if not binary else binary,
74
  from_page=from_page, to_page=to_page, callback=callback)
75
+ res = tokenize_table(tbls, doc, eng)
 
 
 
 
 
 
 
 
 
 
76
  elif re.search(r"\.txt$", filename, re.IGNORECASE):
77
  callback(0.1, "Start to parse.")
78
  txt = ""
 
96
  # wrap up to es documents
97
  for ck in cks:
98
  print("--", ck)
99
+ if not ck:continue
100
  d = copy.deepcopy(doc)
101
  if pdf_parser:
102
  d["image"] = pdf_parser.crop(ck)
rag/app/paper.py CHANGED
@@ -15,7 +15,7 @@ import re
15
  from collections import Counter
16
 
17
  from api.db import ParserType
18
- from rag.nlp import huqie, tokenize
19
  from deepdoc.parser import PdfParser
20
  import numpy as np
21
  from rag.utils import num_tokens_from_string
@@ -158,18 +158,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
158
  eng = lang.lower() == "english"#pdf_parser.is_english
159
  print("It's English.....", eng)
160
 
161
- res = []
162
- # add tables
163
- for img, rows in paper["tables"]:
164
- bs = 10
165
- de = ";" if eng else ";"
166
- for i in range(0, len(rows), bs):
167
- d = copy.deepcopy(doc)
168
- r = de.join(rows[i:i + bs])
169
- r = re.sub(r"\t——(来自| in ).*”%s" % de, "", r)
170
- tokenize(d, r)
171
- d["image"] = img
172
- res.append(d)
173
 
174
  if paper["abstract"]:
175
  d = copy.deepcopy(doc)
 
15
  from collections import Counter
16
 
17
  from api.db import ParserType
18
+ from rag.nlp import huqie, tokenize, tokenize_table
19
  from deepdoc.parser import PdfParser
20
  import numpy as np
21
  from rag.utils import num_tokens_from_string
 
158
  eng = lang.lower() == "english"#pdf_parser.is_english
159
  print("It's English.....", eng)
160
 
161
+ res = tokenize_table(paper["tables"], doc, eng)
 
 
 
 
 
 
 
 
 
 
 
162
 
163
  if paper["abstract"]:
164
  d = copy.deepcopy(doc)
rag/app/presentation.py CHANGED
@@ -20,7 +20,7 @@ from deepdoc.parser import PdfParser, PptParser
20
 
21
  class Ppt(PptParser):
22
  def __call__(self, fnm, from_page, to_page, callback=None):
23
- txts = super.__call__(fnm, from_page, to_page)
24
 
25
  callback(0.5, "Text extraction finished.")
26
  import aspose.slides as slides
 
20
 
21
  class Ppt(PptParser):
22
  def __call__(self, fnm, from_page, to_page, callback=None):
23
+ txts = super().__call__(fnm, from_page, to_page)
24
 
25
  callback(0.5, "Text extraction finished.")
26
  import aspose.slides as slides
rag/app/resume.py CHANGED
@@ -79,7 +79,7 @@ def chunk(filename, binary=None, callback=None, **kwargs):
79
  resume = remote_call(filename, binary)
80
  if len(resume.keys()) < 7:
81
  callback(-1, "Resume is not successfully parsed.")
82
- return []
83
  callback(0.6, "Done parsing. Chunking...")
84
  print(json.dumps(resume, ensure_ascii=False, indent=2))
85
 
 
79
  resume = remote_call(filename, binary)
80
  if len(resume.keys()) < 7:
81
  callback(-1, "Resume is not successfully parsed.")
82
+ raise Exception("Resume parser remote call fail!")
83
  callback(0.6, "Done parsing. Chunking...")
84
  print(json.dumps(resume, ensure_ascii=False, indent=2))
85
 
rag/nlp/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
-
2
 
3
  from nltk.stem import PorterStemmer
4
  stemmer = PorterStemmer()
@@ -80,6 +80,20 @@ def tokenize(d, t, eng):
80
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
81
 
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  def remove_contents_table(sections, eng=False):
84
  i = 0
85
  while i < len(sections):
@@ -201,10 +215,12 @@ def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
201
  tnum = num_tokens_from_string(t)
202
  if tnum < 8: pos = ""
203
  if tk_nums[-1] > chunk_token_num:
204
- cks.append(t + pos)
 
205
  tk_nums.append(tnum)
206
  else:
207
- cks[-1] += t + pos
 
208
  tk_nums[-1] += tnum
209
 
210
  for sec, pos in sections:
 
1
+ import copy
2
 
3
  from nltk.stem import PorterStemmer
4
  stemmer = PorterStemmer()
 
80
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
81
 
82
 
83
+ def tokenize_table(tbls, doc, eng, batch_size=10):
84
+ res = []
85
+ # add tables
86
+ for img, rows in tbls:
87
+ de = "; " if eng else "; "
88
+ for i in range(0, len(rows), batch_size):
89
+ d = copy.deepcopy(doc)
90
+ r = de.join(rows[i:i + batch_size])
91
+ tokenize(d, r, eng)
92
+ d["image"] = img
93
+ res.append(d)
94
+ return res
95
+
96
+
97
  def remove_contents_table(sections, eng=False):
98
  i = 0
99
  while i < len(sections):
 
215
  tnum = num_tokens_from_string(t)
216
  if tnum < 8: pos = ""
217
  if tk_nums[-1] > chunk_token_num:
218
+ if t.find(pos) < 0: t += pos
219
+ cks.append(t)
220
  tk_nums.append(tnum)
221
  else:
222
+ if cks[-1].find(pos) < 0: t += pos
223
+ cks[-1] += t
224
  tk_nums[-1] += tnum
225
 
226
  for sec, pos in sections:
rag/nlp/search.py CHANGED
@@ -1,6 +1,8 @@
1
  # -*- coding: utf-8 -*-
2
  import json
3
  import re
 
 
4
  from elasticsearch_dsl import Q, Search
5
  from typing import List, Optional, Dict, Union
6
  from dataclasses import dataclass
@@ -98,7 +100,7 @@ class Dealer:
98
  del s["highlight"]
99
  q_vec = s["knn"]["query_vector"]
100
  es_logger.info("【Q】: {}".format(json.dumps(s)))
101
- res = self.es.search(s, idxnm=idxnm, timeout="600s", src=src)
102
  es_logger.info("TOTAL: {}".format(self.es.getTotal(res)))
103
  if self.es.getTotal(res) == 0 and "knn" in s:
104
  bqry, _ = self.qryr.question(qst, min_match="10%")
 
1
  # -*- coding: utf-8 -*-
2
  import json
3
  import re
4
+ from copy import deepcopy
5
+
6
  from elasticsearch_dsl import Q, Search
7
  from typing import List, Optional, Dict, Union
8
  from dataclasses import dataclass
 
100
  del s["highlight"]
101
  q_vec = s["knn"]["query_vector"]
102
  es_logger.info("【Q】: {}".format(json.dumps(s)))
103
+ res = self.es.search(deepcopy(s), idxnm=idxnm, timeout="600s", src=src)
104
  es_logger.info("TOTAL: {}".format(self.es.getTotal(res)))
105
  if self.es.getTotal(res) == 0 and "knn" in s:
106
  bqry, _ = self.qryr.question(qst, min_match="10%")
rag/svr/task_broker.py CHANGED
@@ -90,7 +90,7 @@ def dispatch():
90
  tsks.append(task)
91
  else:
92
  tsks.append(new_task())
93
- print(tsks)
94
  bulk_insert_into_db(Task, tsks, True)
95
  set_dispatching(r["id"])
96
  tmf.write(str(r["update_time"]) + "\n")
 
90
  tsks.append(task)
91
  else:
92
  tsks.append(new_task())
93
+
94
  bulk_insert_into_db(Task, tsks, True)
95
  set_dispatching(r["id"])
96
  tmf.write(str(r["update_time"]) + "\n")
rag/svr/task_executor.py CHANGED
@@ -114,7 +114,7 @@ def build(row):
114
  kb_id=row["kb_id"], parser_config=row["parser_config"], tenant_id=row["tenant_id"])
115
  except Exception as e:
116
  if re.search("(No such file|not found)", str(e)):
117
- callback(-1, "Can not find file <%s>" % row["doc_name"])
118
  else:
119
  callback(-1, f"Internal server error: %s" %
120
  str(e).replace("'", ""))
 
114
  kb_id=row["kb_id"], parser_config=row["parser_config"], tenant_id=row["tenant_id"])
115
  except Exception as e:
116
  if re.search("(No such file|not found)", str(e)):
117
+ callback(-1, "Can not find file <%s>" % row["name"])
118
  else:
119
  callback(-1, f"Internal server error: %s" %
120
  str(e).replace("'", ""))