Kevin Hu
commited on
Commit
·
8c682a7
1
Parent(s):
2bf4151
boost highlight performace (#2419)
Browse files### What problem does this PR solve?
#2415
### Type of change
- [x] Performance Improvement
- rag/nlp/query.py +5 -3
- rag/nlp/search.py +2 -3
rag/nlp/query.py
CHANGED
@@ -50,7 +50,7 @@ class EsQueryer:
|
|
50 |
patts = [
|
51 |
(r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", ""),
|
52 |
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
|
53 |
-
(r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down) ", " ")
|
54 |
]
|
55 |
for r, p in patts:
|
56 |
txt = re.sub(r, p, txt, flags=re.IGNORECASE)
|
@@ -80,7 +80,7 @@ class EsQueryer:
|
|
80 |
must=Q("query_string", fields=self.flds,
|
81 |
type="best_fields", query=" ".join(q),
|
82 |
boost=1)#, minimum_should_match=min_match)
|
83 |
-
),
|
84 |
|
85 |
def need_fine_grained_tokenize(tk):
|
86 |
if len(tk) < 3:
|
@@ -93,8 +93,10 @@ class EsQueryer:
|
|
93 |
for tt in self.tw.split(txt)[:256]: # .split(" "):
|
94 |
if not tt:
|
95 |
continue
|
|
|
96 |
twts = self.tw.weights([tt])
|
97 |
syns = self.syn.lookup(tt)
|
|
|
98 |
logging.info(json.dumps(twts, ensure_ascii=False))
|
99 |
tms = []
|
100 |
for tk, w in sorted(twts, key=lambda x: x[1] * -1):
|
@@ -147,7 +149,7 @@ class EsQueryer:
|
|
147 |
|
148 |
return Q("bool",
|
149 |
must=mst,
|
150 |
-
), keywords
|
151 |
|
152 |
def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3,
|
153 |
vtweight=0.7):
|
|
|
50 |
patts = [
|
51 |
(r"是*(什么样的|哪家|一下|那家|请问|啥样|咋样了|什么时候|何时|何地|何人|是否|是不是|多少|哪里|怎么|哪儿|怎么样|如何|哪些|是啥|啥是|啊|吗|呢|吧|咋|什么|有没有|呀)是*", ""),
|
52 |
(r"(^| )(what|who|how|which|where|why)('re|'s)? ", " "),
|
53 |
+
(r"(^| )('s|'re|is|are|were|was|do|does|did|don't|doesn't|didn't|has|have|be|there|you|me|your|my|mine|just|please|may|i|should|would|wouldn't|will|won't|done|go|for|with|so|the|a|an|by|i'm|it's|he's|she's|they|they're|you're|as|by|on|in|at|up|out|down|of) ", " ")
|
54 |
]
|
55 |
for r, p in patts:
|
56 |
txt = re.sub(r, p, txt, flags=re.IGNORECASE)
|
|
|
80 |
must=Q("query_string", fields=self.flds,
|
81 |
type="best_fields", query=" ".join(q),
|
82 |
boost=1)#, minimum_should_match=min_match)
|
83 |
+
), list(set([t for t in txt.split(" ") if t]))
|
84 |
|
85 |
def need_fine_grained_tokenize(tk):
|
86 |
if len(tk) < 3:
|
|
|
93 |
for tt in self.tw.split(txt)[:256]: # .split(" "):
|
94 |
if not tt:
|
95 |
continue
|
96 |
+
keywords.append(tt)
|
97 |
twts = self.tw.weights([tt])
|
98 |
syns = self.syn.lookup(tt)
|
99 |
+
if syns: keywords.extend(syns)
|
100 |
logging.info(json.dumps(twts, ensure_ascii=False))
|
101 |
tms = []
|
102 |
for tk, w in sorted(twts, key=lambda x: x[1] * -1):
|
|
|
149 |
|
150 |
return Q("bool",
|
151 |
must=mst,
|
152 |
+
), list(set(keywords))
|
153 |
|
154 |
def hybrid_similarity(self, avec, bvecs, atks, btkss, tkweight=0.3,
|
155 |
vtweight=0.7):
|
rag/nlp/search.py
CHANGED
@@ -189,10 +189,9 @@ class Dealer:
|
|
189 |
txt = d["_source"][fieldnm]
|
190 |
txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE|re.MULTILINE)
|
191 |
txts = []
|
192 |
-
for w in keywords:
|
193 |
-
txt = re.sub(r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])"%re.escape(w), r"\1<em>\2</em>\3", txt, flags=re.IGNORECASE|re.MULTILINE)
|
194 |
-
|
195 |
for t in re.split(r"[.?!;\n]", txt):
|
|
|
|
|
196 |
if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE|re.MULTILINE): continue
|
197 |
txts.append(t)
|
198 |
ans[d["_id"]] = "...".join(txts) if txts else "...".join([a for a in list(hlts.items())[0][1]])
|
|
|
189 |
txt = d["_source"][fieldnm]
|
190 |
txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE|re.MULTILINE)
|
191 |
txts = []
|
|
|
|
|
|
|
192 |
for t in re.split(r"[.?!;\n]", txt):
|
193 |
+
for w in keywords:
|
194 |
+
t = re.sub(r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])"%re.escape(w), r"\1<em>\2</em>\3", t, flags=re.IGNORECASE|re.MULTILINE)
|
195 |
if not re.search(r"<em>[^<>]+</em>", t, flags=re.IGNORECASE|re.MULTILINE): continue
|
196 |
txts.append(t)
|
197 |
ans[d["_id"]] = "...".join(txts) if txts else "...".join([a for a in list(hlts.items())[0][1]])
|