KevinHuSh commited on
Commit
fda6678
·
1 Parent(s): 405c9f9

adjust hierarchical_merge strategy (#100)

Browse files
Files changed (3) hide show
  1. rag/app/laws.py +0 -1
  2. rag/nlp/__init__.py +75 -42
  3. rag/nlp/search.py +2 -2
rag/app/laws.py CHANGED
@@ -103,7 +103,6 @@ def chunk(filename, binary=None, from_page=0, to_page=100000, lang="Chinese", ca
103
  if not l:break
104
  txt += l
105
  sections = txt.split("\n")
106
- sections = txt.split("\n")
107
  sections = [l for l in sections if l]
108
  callback(0.8, "Finish parsing.")
109
  else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
 
103
  if not l:break
104
  txt += l
105
  sections = txt.split("\n")
 
106
  sections = [l for l in sections if l]
107
  callback(0.8, "Finish parsing.")
108
  else: raise NotImplementedError("file type not supported yet(docx, pdf, txt supported)")
rag/nlp/__init__.py CHANGED
@@ -1,13 +1,14 @@
 
 
 
 
 
1
  import copy
2
 
3
  from nltk.stem import PorterStemmer
 
4
  stemmer = PorterStemmer()
5
 
6
- import re
7
- from nltk import word_tokenize
8
- from . import huqie
9
- from rag.utils import num_tokens_from_string
10
- import random
11
 
12
  BULLET_PATTERN = [[
13
  r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
@@ -54,7 +55,8 @@ def bullets_category(sections):
54
  maxium = 0
55
  res = -1
56
  for i, h in enumerate(hits):
57
- if h <= maxium: continue
 
58
  res = i
59
  maxium = h
60
  return res
@@ -74,7 +76,8 @@ def tokenize(d, t, eng):
74
  d["content_with_weight"] = t
75
  if eng:
76
  t = re.sub(r"([a-z])-([a-z])", r"\1\2", t)
77
- d["content_ltks"] = " ".join([stemmer.stem(w) for w in word_tokenize(t)])
 
78
  else:
79
  d["content_ltks"] = huqie.qie(t)
80
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
@@ -84,7 +87,8 @@ def tokenize_table(tbls, doc, eng, batch_size=10):
84
  res = []
85
  # add tables
86
  for (img, rows), poss in tbls:
87
- if not rows:continue
 
88
  if isinstance(rows, str):
89
  d = copy.deepcopy(doc)
90
  r = re.sub(r"<[^<>]{,12}>", "", rows)
@@ -106,14 +110,15 @@ def tokenize_table(tbls, doc, eng, batch_size=10):
106
 
107
 
108
  def add_positions(d, poss):
109
- if not poss:return
 
110
  d["page_num_int"] = []
111
  d["position_int"] = []
112
  d["top_int"] = []
113
  for pn, left, right, top, bottom in poss:
114
- d["page_num_int"].append(pn+1)
115
  d["top_int"].append(top)
116
- d["position_int"].append((pn+1, left, right, top, bottom))
117
  d["top_int"] = d["top_int"][:1]
118
 
119
 
@@ -122,31 +127,38 @@ def remove_contents_table(sections, eng=False):
122
  while i < len(sections):
123
  def get(i):
124
  nonlocal sections
125
- return (sections[i] if type(sections[i]) == type("") else sections[i][0]).strip()
 
126
 
127
  if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
128
  re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
129
  i += 1
130
  continue
131
  sections.pop(i)
132
- if i >= len(sections): break
 
133
  prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
134
  while not prefix:
135
  sections.pop(i)
136
- if i >= len(sections): break
 
137
  prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
138
  sections.pop(i)
139
- if i >= len(sections) or not prefix: break
 
140
  for j in range(i, min(i + 128, len(sections))):
141
  if not re.match(prefix, get(j)):
142
  continue
143
- for _ in range(i, j): sections.pop(i)
 
144
  break
145
 
146
 
147
  def make_colon_as_title(sections):
148
- if not sections: return []
149
- if type(sections[0]) == type(""): return sections
 
 
150
  i = 0
151
  while i < len(sections):
152
  txt, layout = sections[i]
@@ -165,20 +177,25 @@ def make_colon_as_title(sections):
165
 
166
 
167
  def hierarchical_merge(bull, sections, depth):
168
- if not sections or bull < 0: return []
169
- if type(sections[0]) == type(""): sections = [(s, "") for s in sections]
170
- sections = [(t,o) for t, o in sections if t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
 
 
 
171
  bullets_size = len(BULLET_PATTERN[bull])
172
  levels = [[] for _ in range(bullets_size + 2)]
173
 
174
  def not_title(txt):
175
- if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt): return False
176
- if len(txt.split(" "))>12 or (txt.find(" ")<0 and len(txt)) >= 32: return True
 
 
177
  return re.search(r"[,;,。;!!]", txt)
178
 
179
  for i, (txt, layout) in enumerate(sections):
180
  for j, p in enumerate(BULLET_PATTERN[bull]):
181
- if re.match(p, txt.strip()) and not not_title(txt):
182
  levels[j].append(i)
183
  break
184
  else:
@@ -187,12 +204,16 @@ def hierarchical_merge(bull, sections, depth):
187
  else:
188
  levels[bullets_size + 1].append(i)
189
  sections = [t for t, _ in sections]
190
- #for s in sections: print("--", s)
 
191
 
192
  def binary_search(arr, target):
193
- if not arr: return -1
194
- if target > arr[-1]: return len(arr) - 1
195
- if target < arr[0]: return -1
 
 
 
196
  s, e = 0, len(arr)
197
  while e - s > 1:
198
  i = (e + s) // 2
@@ -211,18 +232,24 @@ def hierarchical_merge(bull, sections, depth):
211
  levels = levels[::-1]
212
  for i, arr in enumerate(levels[:depth]):
213
  for j in arr:
214
- if readed[j]: continue
 
215
  readed[j] = True
216
  cks.append([j])
217
- if i + 1 == len(levels) - 1: continue
 
218
  for ii in range(i + 1, len(levels)):
219
  jj = binary_search(levels[ii], j)
220
- if jj < 0: continue
221
- if jj > cks[-1][-1]: cks[-1].pop(-1)
 
 
222
  cks[-1].append(levels[ii][jj])
223
- for ii in cks[-1]: readed[ii] = True
 
224
 
225
- if not cks:return cks
 
226
 
227
  for i in range(len(cks)):
228
  cks[i] = [sections[j] for j in cks[i][::-1]]
@@ -247,20 +274,26 @@ def hierarchical_merge(bull, sections, depth):
247
 
248
 
249
  def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
250
- if not sections: return []
251
- if type(sections[0]) == type(""): sections = [(s, "") for s in sections]
 
 
252
  cks = [""]
253
  tk_nums = [0]
 
254
  def add_chunk(t, pos):
255
  nonlocal cks, tk_nums, delimiter
256
  tnum = num_tokens_from_string(t)
257
- if tnum < 8: pos = ""
 
258
  if tk_nums[-1] > chunk_token_num:
259
- if t.find(pos) < 0: t += pos
 
260
  cks.append(t)
261
  tk_nums.append(tnum)
262
  else:
263
- if cks[-1].find(pos) < 0: t += pos
 
264
  cks[-1] += t
265
  tk_nums[-1] += tnum
266
 
@@ -270,12 +303,12 @@ def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
270
  s, e = 0, 1
271
  while e < len(sec):
272
  if sec[e] in delimiter:
273
- add_chunk(sec[s: e+1], pos)
274
  s = e + 1
275
  e = s + 1
276
  else:
277
  e += 1
278
- if s < e: add_chunk(sec[s: e], pos)
 
279
 
280
  return cks
281
-
 
1
+ import random
2
+ from rag.utils import num_tokens_from_string
3
+ from . import huqie
4
+ from nltk import word_tokenize
5
+ import re
6
  import copy
7
 
8
  from nltk.stem import PorterStemmer
9
+
10
  stemmer = PorterStemmer()
11
 
 
 
 
 
 
12
 
13
  BULLET_PATTERN = [[
14
  r"第[零一二三四五六七八九十百0-9]+(分?编|部分)",
 
55
  maxium = 0
56
  res = -1
57
  for i, h in enumerate(hits):
58
+ if h <= maxium:
59
+ continue
60
  res = i
61
  maxium = h
62
  return res
 
76
  d["content_with_weight"] = t
77
  if eng:
78
  t = re.sub(r"([a-z])-([a-z])", r"\1\2", t)
79
+ d["content_ltks"] = " ".join([stemmer.stem(w)
80
+ for w in word_tokenize(t)])
81
  else:
82
  d["content_ltks"] = huqie.qie(t)
83
  d["content_sm_ltks"] = huqie.qieqie(d["content_ltks"])
 
87
  res = []
88
  # add tables
89
  for (img, rows), poss in tbls:
90
+ if not rows:
91
+ continue
92
  if isinstance(rows, str):
93
  d = copy.deepcopy(doc)
94
  r = re.sub(r"<[^<>]{,12}>", "", rows)
 
110
 
111
 
112
  def add_positions(d, poss):
113
+ if not poss:
114
+ return
115
  d["page_num_int"] = []
116
  d["position_int"] = []
117
  d["top_int"] = []
118
  for pn, left, right, top, bottom in poss:
119
+ d["page_num_int"].append(pn + 1)
120
  d["top_int"].append(top)
121
+ d["position_int"].append((pn + 1, left, right, top, bottom))
122
  d["top_int"] = d["top_int"][:1]
123
 
124
 
 
127
  while i < len(sections):
128
  def get(i):
129
  nonlocal sections
130
+ return (sections[i] if isinstance(sections[i],
131
+ type("")) else sections[i][0]).strip()
132
 
133
  if not re.match(r"(contents|目录|目次|table of contents|致谢|acknowledge)$",
134
  re.sub(r"( | |\u3000)+", "", get(i).split("@@")[0], re.IGNORECASE)):
135
  i += 1
136
  continue
137
  sections.pop(i)
138
+ if i >= len(sections):
139
+ break
140
  prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
141
  while not prefix:
142
  sections.pop(i)
143
+ if i >= len(sections):
144
+ break
145
  prefix = get(i)[:3] if not eng else " ".join(get(i).split(" ")[:2])
146
  sections.pop(i)
147
+ if i >= len(sections) or not prefix:
148
+ break
149
  for j in range(i, min(i + 128, len(sections))):
150
  if not re.match(prefix, get(j)):
151
  continue
152
+ for _ in range(i, j):
153
+ sections.pop(i)
154
  break
155
 
156
 
157
  def make_colon_as_title(sections):
158
+ if not sections:
159
+ return []
160
+ if isinstance(sections[0], type("")):
161
+ return sections
162
  i = 0
163
  while i < len(sections):
164
  txt, layout = sections[i]
 
177
 
178
 
179
  def hierarchical_merge(bull, sections, depth):
180
+ if not sections or bull < 0:
181
+ return []
182
+ if isinstance(sections[0], type("")):
183
+ sections = [(s, "") for s in sections]
184
+ sections = [(t, o) for t, o in sections if
185
+ t and len(t.split("@")[0].strip()) > 1 and not re.match(r"[0-9]+$", t.split("@")[0].strip())]
186
  bullets_size = len(BULLET_PATTERN[bull])
187
  levels = [[] for _ in range(bullets_size + 2)]
188
 
189
  def not_title(txt):
190
+ if re.match(r"第[零一二三四五六七八九十百0-9]+条", txt):
191
+ return False
192
+ if len(txt.split(" ")) > 12 or (txt.find(" ") < 0 and len(txt) >= 32):
193
+ return True
194
  return re.search(r"[,;,。;!!]", txt)
195
 
196
  for i, (txt, layout) in enumerate(sections):
197
  for j, p in enumerate(BULLET_PATTERN[bull]):
198
+ if re.match(p, txt.strip()):
199
  levels[j].append(i)
200
  break
201
  else:
 
204
  else:
205
  levels[bullets_size + 1].append(i)
206
  sections = [t for t, _ in sections]
207
+
208
+ # for s in sections: print("--", s)
209
 
210
  def binary_search(arr, target):
211
+ if not arr:
212
+ return -1
213
+ if target > arr[-1]:
214
+ return len(arr) - 1
215
+ if target < arr[0]:
216
+ return -1
217
  s, e = 0, len(arr)
218
  while e - s > 1:
219
  i = (e + s) // 2
 
232
  levels = levels[::-1]
233
  for i, arr in enumerate(levels[:depth]):
234
  for j in arr:
235
+ if readed[j]:
236
+ continue
237
  readed[j] = True
238
  cks.append([j])
239
+ if i + 1 == len(levels) - 1:
240
+ continue
241
  for ii in range(i + 1, len(levels)):
242
  jj = binary_search(levels[ii], j)
243
+ if jj < 0:
244
+ continue
245
+ if jj > cks[-1][-1]:
246
+ cks[-1].pop(-1)
247
  cks[-1].append(levels[ii][jj])
248
+ for ii in cks[-1]:
249
+ readed[ii] = True
250
 
251
+ if not cks:
252
+ return cks
253
 
254
  for i in range(len(cks)):
255
  cks[i] = [sections[j] for j in cks[i][::-1]]
 
274
 
275
 
276
  def naive_merge(sections, chunk_token_num=128, delimiter="\n。;!?"):
277
+ if not sections:
278
+ return []
279
+ if isinstance(sections[0], type("")):
280
+ sections = [(s, "") for s in sections]
281
  cks = [""]
282
  tk_nums = [0]
283
+
284
  def add_chunk(t, pos):
285
  nonlocal cks, tk_nums, delimiter
286
  tnum = num_tokens_from_string(t)
287
+ if tnum < 8:
288
+ pos = ""
289
  if tk_nums[-1] > chunk_token_num:
290
+ if t.find(pos) < 0:
291
+ t += pos
292
  cks.append(t)
293
  tk_nums.append(tnum)
294
  else:
295
+ if cks[-1].find(pos) < 0:
296
+ t += pos
297
  cks[-1] += t
298
  tk_nums[-1] += tnum
299
 
 
303
  s, e = 0, 1
304
  while e < len(sec):
305
  if sec[e] in delimiter:
306
+ add_chunk(sec[s: e + 1], pos)
307
  s = e + 1
308
  e = s + 1
309
  else:
310
  e += 1
311
+ if s < e:
312
+ add_chunk(sec[s: e], pos)
313
 
314
  return cks
 
rag/nlp/search.py CHANGED
@@ -82,8 +82,8 @@ class Dealer:
82
  )
83
  else:
84
  s = s.sort(
85
- {"page_num_int": {"order": "asc", "unmapped_type": "float"}},
86
- {"top_int": {"order": "asc", "unmapped_type": "float", "mode" : "avg"}},
87
  {"create_time": {"order": "desc", "unmapped_type": "date"}},
88
  {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
89
  )
 
82
  )
83
  else:
84
  s = s.sort(
85
+ {"page_num_int": {"order": "asc", "unmapped_type": "float", "mode" : "avg"}},
86
+ {"top_int": {"order": "asc", "unmapped_type": "float", "mode": "avg"}},
87
  {"create_time": {"order": "desc", "unmapped_type": "date"}},
88
  {"create_timestamp_flt": {"order": "desc", "unmapped_type": "float"}}
89
  )