JobSmithManipulation Kevin Hu commited on
Commit
d1a0b33
·
1 Parent(s): bf00d96

rename some attributes in document sdk (#2481)

Browse files

### What problem does this PR solve?

#1102

### Type of change

- [x] Performance Improvement

---------

Co-authored-by: Kevin Hu <[email protected]>

api/apps/sdk/doc.py CHANGED
@@ -99,6 +99,7 @@ def docinfos(tenant_id):
99
  "chunk_num": "chunk_count",
100
  "kb_id": "knowledgebase_id",
101
  "token_num": "token_count",
 
102
  }
103
  renamed_doc = {}
104
  for key, value in doc.to_dict().items():
@@ -125,10 +126,14 @@ def save_doc(tenant_id):
125
  if not e:
126
  return get_data_error_result(retmsg="Document not found!")
127
  #other value can't be changed
128
- if "chunk_num" in req:
129
- if req["chunk_num"] != doc.chunk_num:
130
  return get_data_error_result(
131
  retmsg="Can't change chunk_count.")
 
 
 
 
132
  if "progress" in req:
133
  if req['progress'] != doc.progress:
134
  return get_data_error_result(
@@ -158,9 +163,9 @@ def save_doc(tenant_id):
158
  FileService.update_by_id(file.id, {"name": req["name"]})
159
  except Exception as e:
160
  return server_error_response(e)
161
- if "parser_id" in req:
162
  try:
163
- if doc.parser_id.lower() == req["parser_id"].lower():
164
  if "parser_config" in req:
165
  if req["parser_config"] == doc.parser_config:
166
  return get_json_result(data=True)
@@ -172,7 +177,7 @@ def save_doc(tenant_id):
172
  return get_data_error_result(retmsg="Not supported yet!")
173
 
174
  e = DocumentService.update_by_id(doc.id,
175
- {"parser_id": req["parser_id"], "progress": 0, "progress_msg": "",
176
  "run": TaskStatus.UNSTART.value})
177
  if not e:
178
  return get_data_error_result(retmsg="Document not found!")
@@ -183,7 +188,7 @@ def save_doc(tenant_id):
183
  doc.process_duation * -1)
184
  if not e:
185
  return get_data_error_result(retmsg="Document not found!")
186
- tenant_id = DocumentService.get_tenant_id(req["doc_id"])
187
  if not tenant_id:
188
  return get_data_error_result(retmsg="Tenant not found!")
189
  ELASTICSEARCH.deleteByQuery(
@@ -272,7 +277,7 @@ def rename():
272
 
273
  @manager.route("/<document_id>", methods=["GET"])
274
  @token_required
275
- def download_document(dataset_id, document_id,tenant_id):
276
  try:
277
  # Check whether there is this document
278
  exist, document = DocumentService.get_by_id(document_id)
@@ -304,7 +309,7 @@ def download_document(dataset_id, document_id,tenant_id):
304
  @manager.route('/dataset/<dataset_id>/documents', methods=['GET'])
305
  @token_required
306
  def list_docs(dataset_id, tenant_id):
307
- kb_id = request.args.get("kb_id")
308
  if not kb_id:
309
  return get_json_result(
310
  data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
@@ -334,6 +339,7 @@ def list_docs(dataset_id, tenant_id):
334
  "chunk_num": "chunk_count",
335
  "kb_id": "knowledgebase_id",
336
  "token_num": "token_count",
 
337
  }
338
  renamed_doc = {}
339
  for key, value in doc.items():
@@ -349,10 +355,10 @@ def list_docs(dataset_id, tenant_id):
349
  @token_required
350
  def rm(tenant_id):
351
  req = request.args
352
- if "doc_id" not in req:
353
  return get_data_error_result(
354
  retmsg="doc_id is required")
355
- doc_ids = req["doc_id"]
356
  if isinstance(doc_ids, str): doc_ids = [doc_ids]
357
  root_folder = FileService.get_root_folder(tenant_id)
358
  pf_id = root_folder["id"]
@@ -413,7 +419,7 @@ def show_parsing_status(tenant_id, document_id):
413
  def run(tenant_id):
414
  req = request.json
415
  try:
416
- for id in req["doc_ids"]:
417
  info = {"run": str(req["run"]), "progress": 0}
418
  if str(req["run"]) == TaskStatus.RUNNING.value:
419
  info["progress_msg"] = ""
@@ -442,15 +448,15 @@ def run(tenant_id):
442
 
443
  @manager.route('/chunk/list', methods=['POST'])
444
  @token_required
445
- @validate_request("doc_id")
446
  def list_chunk(tenant_id):
447
  req = request.json
448
- doc_id = req["doc_id"]
449
  page = int(req.get("page", 1))
450
  size = int(req.get("size", 30))
451
  question = req.get("keywords", "")
452
  try:
453
- tenant_id = DocumentService.get_tenant_id(req["doc_id"])
454
  if not tenant_id:
455
  return get_data_error_result(retmsg="Tenant not found!")
456
  e, doc = DocumentService.get_by_id(doc_id)
@@ -509,15 +515,15 @@ def list_chunk(tenant_id):
509
 
510
  @manager.route('/chunk/create', methods=['POST'])
511
  @token_required
512
- @validate_request("doc_id", "content_with_weight")
513
  def create(tenant_id):
514
  req = request.json
515
  md5 = hashlib.md5()
516
- md5.update((req["content_with_weight"] + req["doc_id"]).encode("utf-8"))
517
 
518
  chunk_id = md5.hexdigest()
519
- d = {"id": chunk_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
520
- "content_with_weight": req["content_with_weight"]}
521
  d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
522
  d["important_kwd"] = req.get("important_kwd", [])
523
  d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
@@ -525,22 +531,22 @@ def create(tenant_id):
525
  d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
526
 
527
  try:
528
- e, doc = DocumentService.get_by_id(req["doc_id"])
529
  if not e:
530
  return get_data_error_result(retmsg="Document not found!")
531
  d["kb_id"] = [doc.kb_id]
532
  d["docnm_kwd"] = doc.name
533
  d["doc_id"] = doc.id
534
 
535
- tenant_id = DocumentService.get_tenant_id(req["doc_id"])
536
  if not tenant_id:
537
  return get_data_error_result(retmsg="Tenant not found!")
538
 
539
- embd_id = DocumentService.get_embd_id(req["doc_id"])
540
  embd_mdl = TenantLLMService.model_instance(
541
  tenant_id, LLMType.EMBEDDING.value, embd_id)
542
 
543
- v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
544
  v = 0.1 * v[0] + 0.9 * v[1]
545
  d["q_%d_vec" % len(v)] = v.tolist()
546
  ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
@@ -568,14 +574,14 @@ def create(tenant_id):
568
 
569
  @manager.route('/chunk/rm', methods=['POST'])
570
  @token_required
571
- @validate_request("chunk_ids", "doc_id")
572
  def rm_chunk(tenant_id):
573
  req = request.json
574
  try:
575
  if not ELASTICSEARCH.deleteByQuery(
576
  Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)):
577
  return get_data_error_result(retmsg="Index updating failure")
578
- e, doc = DocumentService.get_by_id(req["doc_id"])
579
  if not e:
580
  return get_data_error_result(retmsg="Document not found!")
581
  deleted_chunk_ids = req["chunk_ids"]
@@ -587,30 +593,30 @@ def rm_chunk(tenant_id):
587
 
588
  @manager.route('/chunk/set', methods=['POST'])
589
  @token_required
590
- @validate_request("doc_id", "chunk_id", "content_with_weight",
591
- "important_kwd")
592
  def set(tenant_id):
593
  req = request.json
594
  d = {
595
  "id": req["chunk_id"],
596
- "content_with_weight": req["content_with_weight"]}
597
- d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
598
  d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
599
- d["important_kwd"] = req["important_kwd"]
600
- d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
601
  if "available_int" in req:
602
  d["available_int"] = req["available_int"]
603
 
604
  try:
605
- tenant_id = DocumentService.get_tenant_id(req["doc_id"])
606
  if not tenant_id:
607
  return get_data_error_result(retmsg="Tenant not found!")
608
 
609
- embd_id = DocumentService.get_embd_id(req["doc_id"])
610
  embd_mdl = TenantLLMService.model_instance(
611
  tenant_id, LLMType.EMBEDDING.value, embd_id)
612
 
613
- e, doc = DocumentService.get_by_id(req["doc_id"])
614
  if not e:
615
  return get_data_error_result(retmsg="Document not found!")
616
 
@@ -618,7 +624,7 @@ def set(tenant_id):
618
  arr = [
619
  t for t in re.split(
620
  r"[\n\t]",
621
- req["content_with_weight"]) if len(t) > 1]
622
  if len(arr) != 2:
623
  return get_data_error_result(
624
  retmsg="Q&A must be separated by TAB/ENTER key.")
@@ -626,7 +632,7 @@ def set(tenant_id):
626
  d = beAdoc(d, arr[0], arr[1], not any(
627
  [rag_tokenizer.is_chinese(t) for t in q + a]))
628
 
629
- v, c = embd_mdl.encode([doc.name, req["content_with_weight"]])
630
  v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
631
  d["q_%d_vec" % len(v)] = v.tolist()
632
  ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
@@ -636,13 +642,13 @@ def set(tenant_id):
636
 
637
  @manager.route('/retrieval_test', methods=['POST'])
638
  @token_required
639
- @validate_request("kb_id", "question")
640
  def retrieval_test(tenant_id):
641
  req = request.json
642
  page = int(req.get("page", 1))
643
  size = int(req.get("size", 30))
644
  question = req["question"]
645
- kb_id = req["kb_id"]
646
  if isinstance(kb_id, str): kb_id = [kb_id]
647
  doc_ids = req.get("doc_ids", [])
648
  similarity_threshold = float(req.get("similarity_threshold", 0.2))
@@ -693,6 +699,7 @@ def retrieval_test(tenant_id):
693
  "content_with_weight": "content",
694
  "doc_id": "document_id",
695
  "important_kwd": "important_keywords",
 
696
  }
697
  rename_chunk={}
698
  for key, value in chunk.items():
 
99
  "chunk_num": "chunk_count",
100
  "kb_id": "knowledgebase_id",
101
  "token_num": "token_count",
102
+ "parser_id":"parser_method",
103
  }
104
  renamed_doc = {}
105
  for key, value in doc.to_dict().items():
 
126
  if not e:
127
  return get_data_error_result(retmsg="Document not found!")
128
  #other value can't be changed
129
+ if "chunk_count" in req:
130
+ if req["chunk_count"] != doc.chunk_num:
131
  return get_data_error_result(
132
  retmsg="Can't change chunk_count.")
133
+ if "token_count" in req:
134
+ if req["token_count"] != doc.token_num:
135
+ return get_data_error_result(
136
+ retmsg="Can't change token_count.")
137
  if "progress" in req:
138
  if req['progress'] != doc.progress:
139
  return get_data_error_result(
 
163
  FileService.update_by_id(file.id, {"name": req["name"]})
164
  except Exception as e:
165
  return server_error_response(e)
166
+ if "parser_method" in req:
167
  try:
168
+ if doc.parser_id.lower() == req["parser_method"].lower():
169
  if "parser_config" in req:
170
  if req["parser_config"] == doc.parser_config:
171
  return get_json_result(data=True)
 
177
  return get_data_error_result(retmsg="Not supported yet!")
178
 
179
  e = DocumentService.update_by_id(doc.id,
180
+ {"parser_id": req["parser_method"], "progress": 0, "progress_msg": "",
181
  "run": TaskStatus.UNSTART.value})
182
  if not e:
183
  return get_data_error_result(retmsg="Document not found!")
 
188
  doc.process_duation * -1)
189
  if not e:
190
  return get_data_error_result(retmsg="Document not found!")
191
+ tenant_id = DocumentService.get_tenant_id(req["id"])
192
  if not tenant_id:
193
  return get_data_error_result(retmsg="Tenant not found!")
194
  ELASTICSEARCH.deleteByQuery(
 
277
 
278
  @manager.route("/<document_id>", methods=["GET"])
279
  @token_required
280
+ def download_document(document_id,tenant_id):
281
  try:
282
  # Check whether there is this document
283
  exist, document = DocumentService.get_by_id(document_id)
 
309
  @manager.route('/dataset/<dataset_id>/documents', methods=['GET'])
310
  @token_required
311
  def list_docs(dataset_id, tenant_id):
312
+ kb_id = request.args.get("knowledgebase_id")
313
  if not kb_id:
314
  return get_json_result(
315
  data=False, retmsg='Lack of "KB ID"', retcode=RetCode.ARGUMENT_ERROR)
 
339
  "chunk_num": "chunk_count",
340
  "kb_id": "knowledgebase_id",
341
  "token_num": "token_count",
342
+ "parser_id":"parser_method"
343
  }
344
  renamed_doc = {}
345
  for key, value in doc.items():
 
355
  @token_required
356
  def rm(tenant_id):
357
  req = request.args
358
+ if "document_id" not in req:
359
  return get_data_error_result(
360
  retmsg="doc_id is required")
361
+ doc_ids = req["document_id"]
362
  if isinstance(doc_ids, str): doc_ids = [doc_ids]
363
  root_folder = FileService.get_root_folder(tenant_id)
364
  pf_id = root_folder["id"]
 
419
  def run(tenant_id):
420
  req = request.json
421
  try:
422
+ for id in req["document_ids"]:
423
  info = {"run": str(req["run"]), "progress": 0}
424
  if str(req["run"]) == TaskStatus.RUNNING.value:
425
  info["progress_msg"] = ""
 
448
 
449
  @manager.route('/chunk/list', methods=['POST'])
450
  @token_required
451
+ @validate_request("document_id")
452
  def list_chunk(tenant_id):
453
  req = request.json
454
+ doc_id = req["document_id"]
455
  page = int(req.get("page", 1))
456
  size = int(req.get("size", 30))
457
  question = req.get("keywords", "")
458
  try:
459
+ tenant_id = DocumentService.get_tenant_id(req["document_id"])
460
  if not tenant_id:
461
  return get_data_error_result(retmsg="Tenant not found!")
462
  e, doc = DocumentService.get_by_id(doc_id)
 
515
 
516
  @manager.route('/chunk/create', methods=['POST'])
517
  @token_required
518
+ @validate_request("document_id", "content")
519
  def create(tenant_id):
520
  req = request.json
521
  md5 = hashlib.md5()
522
+ md5.update((req["content"] + req["document_id"]).encode("utf-8"))
523
 
524
  chunk_id = md5.hexdigest()
525
+ d = {"id": chunk_id, "content_ltks": rag_tokenizer.tokenize(req["content"]),
526
+ "content_with_weight": req["content"]}
527
  d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
528
  d["important_kwd"] = req.get("important_kwd", [])
529
  d["important_tks"] = rag_tokenizer.tokenize(" ".join(req.get("important_kwd", [])))
 
531
  d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
532
 
533
  try:
534
+ e, doc = DocumentService.get_by_id(req["document_id"])
535
  if not e:
536
  return get_data_error_result(retmsg="Document not found!")
537
  d["kb_id"] = [doc.kb_id]
538
  d["docnm_kwd"] = doc.name
539
  d["doc_id"] = doc.id
540
 
541
+ tenant_id = DocumentService.get_tenant_id(req["document_id"])
542
  if not tenant_id:
543
  return get_data_error_result(retmsg="Tenant not found!")
544
 
545
+ embd_id = DocumentService.get_embd_id(req["document_id"])
546
  embd_mdl = TenantLLMService.model_instance(
547
  tenant_id, LLMType.EMBEDDING.value, embd_id)
548
 
549
+ v, c = embd_mdl.encode([doc.name, req["content"]])
550
  v = 0.1 * v[0] + 0.9 * v[1]
551
  d["q_%d_vec" % len(v)] = v.tolist()
552
  ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
 
574
 
575
  @manager.route('/chunk/rm', methods=['POST'])
576
  @token_required
577
+ @validate_request("chunk_ids", "document_id")
578
  def rm_chunk(tenant_id):
579
  req = request.json
580
  try:
581
  if not ELASTICSEARCH.deleteByQuery(
582
  Q("ids", values=req["chunk_ids"]), search.index_name(tenant_id)):
583
  return get_data_error_result(retmsg="Index updating failure")
584
+ e, doc = DocumentService.get_by_id(req["document_id"])
585
  if not e:
586
  return get_data_error_result(retmsg="Document not found!")
587
  deleted_chunk_ids = req["chunk_ids"]
 
593
 
594
  @manager.route('/chunk/set', methods=['POST'])
595
  @token_required
596
+ @validate_request("document_id", "chunk_id", "content",
597
+ "important_keywords")
598
  def set(tenant_id):
599
  req = request.json
600
  d = {
601
  "id": req["chunk_id"],
602
+ "content_with_weight": req["content"]}
603
+ d["content_ltks"] = rag_tokenizer.tokenize(req["content"])
604
  d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
605
+ d["important_kwd"] = req["important_keywords"]
606
+ d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_keywords"]))
607
  if "available_int" in req:
608
  d["available_int"] = req["available_int"]
609
 
610
  try:
611
+ tenant_id = DocumentService.get_tenant_id(req["document_id"])
612
  if not tenant_id:
613
  return get_data_error_result(retmsg="Tenant not found!")
614
 
615
+ embd_id = DocumentService.get_embd_id(req["document_id"])
616
  embd_mdl = TenantLLMService.model_instance(
617
  tenant_id, LLMType.EMBEDDING.value, embd_id)
618
 
619
+ e, doc = DocumentService.get_by_id(req["document_id"])
620
  if not e:
621
  return get_data_error_result(retmsg="Document not found!")
622
 
 
624
  arr = [
625
  t for t in re.split(
626
  r"[\n\t]",
627
+ req["content"]) if len(t) > 1]
628
  if len(arr) != 2:
629
  return get_data_error_result(
630
  retmsg="Q&A must be separated by TAB/ENTER key.")
 
632
  d = beAdoc(d, arr[0], arr[1], not any(
633
  [rag_tokenizer.is_chinese(t) for t in q + a]))
634
 
635
+ v, c = embd_mdl.encode([doc.name, req["content"]])
636
  v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
637
  d["q_%d_vec" % len(v)] = v.tolist()
638
  ELASTICSEARCH.upsert([d], search.index_name(tenant_id))
 
642
 
643
  @manager.route('/retrieval_test', methods=['POST'])
644
  @token_required
645
+ @validate_request("knowledgebase_id", "question")
646
  def retrieval_test(tenant_id):
647
  req = request.json
648
  page = int(req.get("page", 1))
649
  size = int(req.get("size", 30))
650
  question = req["question"]
651
+ kb_id = req["knowledgebase_id"]
652
  if isinstance(kb_id, str): kb_id = [kb_id]
653
  doc_ids = req.get("doc_ids", [])
654
  similarity_threshold = float(req.get("similarity_threshold", 0.2))
 
699
  "content_with_weight": "content",
700
  "doc_id": "document_id",
701
  "important_kwd": "important_keywords",
702
+ "docnm_kwd":"document_keyword"
703
  }
704
  rename_chunk={}
705
  for key, value in chunk.items():
sdk/python/ragflow/modules/chunk.py CHANGED
@@ -22,7 +22,7 @@ class Chunk(Base):
22
  Delete the chunk in the document.
23
  """
24
  res = self.post('/doc/chunk/rm',
25
- {"doc_id": self.document_id, 'chunk_ids': [self.id]})
26
  res = res.json()
27
  if res.get("retmsg") == "success":
28
  return True
@@ -34,13 +34,13 @@ class Chunk(Base):
34
  """
35
  res = self.post('/doc/chunk/set',
36
  {"chunk_id": self.id,
37
- "kb_id": self.knowledgebase_id,
38
  "name": self.document_name,
39
- "content_with_weight": self.content,
40
- "important_kwd": self.important_keywords,
41
  "create_time": self.create_time,
42
  "create_timestamp_flt": self.create_timestamp_float,
43
- "doc_id": self.document_id,
44
  "status": self.status,
45
  })
46
  res = res.json()
 
22
  Delete the chunk in the document.
23
  """
24
  res = self.post('/doc/chunk/rm',
25
+ {"document_id": self.document_id, 'chunk_ids': [self.id]})
26
  res = res.json()
27
  if res.get("retmsg") == "success":
28
  return True
 
34
  """
35
  res = self.post('/doc/chunk/set',
36
  {"chunk_id": self.id,
37
+ "knowledgebase_id": self.knowledgebase_id,
38
  "name": self.document_name,
39
+ "content": self.content,
40
+ "important_keywords": self.important_keywords,
41
  "create_time": self.create_time,
42
  "create_timestamp_flt": self.create_timestamp_float,
43
+ "document_id": self.document_id,
44
  "status": self.status,
45
  })
46
  res = res.json()
sdk/python/ragflow/modules/dataset.py CHANGED
@@ -65,7 +65,7 @@ class DataSet(Base):
65
  """
66
  # Construct the request payload for listing documents
67
  payload = {
68
- "kb_id": self.id,
69
  "keywords": keywords,
70
  "offset": offset,
71
  "limit": limit
 
65
  """
66
  # Construct the request payload for listing documents
67
  payload = {
68
+ "knowledgebase_id": self.id,
69
  "keywords": keywords,
70
  "offset": offset,
71
  "limit": limit
sdk/python/ragflow/modules/document.py CHANGED
@@ -34,10 +34,10 @@ class Document(Base):
34
  Save the document details to the server.
35
  """
36
  res = self.post('/doc/save',
37
- {"id": self.id, "name": self.name, "thumbnail": self.thumbnail, "kb_id": self.knowledgebase_id,
38
- "parser_id": self.parser_method, "parser_config": self.parser_config.to_json(),
39
  "source_type": self.source_type, "type": self.type, "created_by": self.created_by,
40
- "size": self.size, "token_num": self.token_count, "chunk_num": self.chunk_count,
41
  "progress": self.progress, "progress_msg": self.progress_msg,
42
  "process_begin_at": self.process_begin_at, "process_duation": self.process_duration
43
  })
@@ -51,7 +51,7 @@ class Document(Base):
51
  Delete the document from the server.
52
  """
53
  res = self.rm('/doc/delete',
54
- {"doc_id": self.id})
55
  res = res.json()
56
  if res.get("retmsg") == "success":
57
  return True
@@ -83,7 +83,7 @@ class Document(Base):
83
  """
84
  try:
85
  # Construct request data including document ID and run status (assuming 1 means to run)
86
- data = {"doc_ids": [self.id], "run": 1}
87
 
88
  # Send a POST request to the specified parsing status endpoint to start parsing
89
  res = self.post(f'/doc/run', data)
@@ -112,7 +112,7 @@ class Document(Base):
112
  start_time = time.time()
113
  while time.time() - start_time < timeout:
114
  # Check the parsing status
115
- res = self.get(f'/doc/{self.id}/status', {"doc_ids": [self.id]})
116
  res_data = res.json()
117
  data = res_data.get("data", [])
118
 
@@ -133,7 +133,7 @@ class Document(Base):
133
  """
134
  try:
135
  # Construct request data, including document ID and action to cancel (assuming 2 means cancel)
136
- data = {"doc_ids": [self.id], "run": 2}
137
 
138
  # Send a POST request to the specified parsing status endpoint to cancel parsing
139
  res = self.post(f'/doc/run', data)
@@ -162,7 +162,7 @@ class Document(Base):
162
  list: A list of chunks returned from the API.
163
  """
164
  data = {
165
- "doc_id": self.id,
166
  "page": page,
167
  "size": size,
168
  "keywords": keywords,
@@ -188,7 +188,7 @@ class Document(Base):
188
  raise Exception(f"API request failed with status code {res.status_code}")
189
 
190
  def add_chunk(self, content: str):
191
- res = self.post('/doc/chunk/create', {"doc_id": self.id, "content_with_weight":content})
192
  if res.status_code == 200:
193
  res_data = res.json().get("data")
194
  chunk_data = res_data.get("chunk")
 
34
  Save the document details to the server.
35
  """
36
  res = self.post('/doc/save',
37
+ {"id": self.id, "name": self.name, "thumbnail": self.thumbnail, "knowledgebase_id": self.knowledgebase_id,
38
+ "parser_method": self.parser_method, "parser_config": self.parser_config.to_json(),
39
  "source_type": self.source_type, "type": self.type, "created_by": self.created_by,
40
+ "size": self.size, "token_count": self.token_count, "chunk_count": self.chunk_count,
41
  "progress": self.progress, "progress_msg": self.progress_msg,
42
  "process_begin_at": self.process_begin_at, "process_duation": self.process_duration
43
  })
 
51
  Delete the document from the server.
52
  """
53
  res = self.rm('/doc/delete',
54
+ {"document_id": self.id})
55
  res = res.json()
56
  if res.get("retmsg") == "success":
57
  return True
 
83
  """
84
  try:
85
  # Construct request data including document ID and run status (assuming 1 means to run)
86
+ data = {"document_ids": [self.id], "run": 1}
87
 
88
  # Send a POST request to the specified parsing status endpoint to start parsing
89
  res = self.post(f'/doc/run', data)
 
112
  start_time = time.time()
113
  while time.time() - start_time < timeout:
114
  # Check the parsing status
115
+ res = self.get(f'/doc/{self.id}/status', {"document_ids": [self.id]})
116
  res_data = res.json()
117
  data = res_data.get("data", [])
118
 
 
133
  """
134
  try:
135
  # Construct request data, including document ID and action to cancel (assuming 2 means cancel)
136
+ data = {"document_ids": [self.id], "run": 2}
137
 
138
  # Send a POST request to the specified parsing status endpoint to cancel parsing
139
  res = self.post(f'/doc/run', data)
 
162
  list: A list of chunks returned from the API.
163
  """
164
  data = {
165
+ "document_id": self.id,
166
  "page": page,
167
  "size": size,
168
  "keywords": keywords,
 
188
  raise Exception(f"API request failed with status code {res.status_code}")
189
 
190
  def add_chunk(self, content: str):
191
+ res = self.post('/doc/chunk/create', {"document_id": self.id, "content":content})
192
  if res.status_code == 200:
193
  res_data = res.json().get("data")
194
  chunk_data = res_data.get("chunk")
sdk/python/ragflow/ragflow.py CHANGED
@@ -150,14 +150,11 @@ class RAGFlow:
150
  files = {
151
  'file': (name, blob)
152
  }
153
- data = {
154
- 'kb_id': ds.id
155
- }
156
  headers = {
157
  'Authorization': f"Bearer {ds.rag.user_key}"
158
  }
159
 
160
- response = requests.post(self.api_url + url, data=data, files=files,
161
  headers=headers)
162
 
163
  if response.status_code == 200 and response.json().get('retmsg') == 'success':
@@ -184,7 +181,7 @@ class RAGFlow:
184
  if not doc_ids or not isinstance(doc_ids, list):
185
  raise ValueError("doc_ids must be a non-empty list of document IDs")
186
 
187
- data = {"doc_ids": doc_ids, "run": 1}
188
 
189
  res = self.post(f'/doc/run', data)
190
 
@@ -206,7 +203,7 @@ class RAGFlow:
206
  try:
207
  if not doc_ids or not isinstance(doc_ids, list):
208
  raise ValueError("doc_ids must be a non-empty list of document IDs")
209
- data = {"doc_ids": doc_ids, "run": 2}
210
  res = self.post(f'/doc/run', data)
211
 
212
  if res.status_code != 200:
@@ -252,7 +249,7 @@ class RAGFlow:
252
  "similarity_threshold": similarity_threshold,
253
  "vector_similarity_weight": vector_similarity_weight,
254
  "top_k": top_k,
255
- "kb_id": datasets,
256
  }
257
 
258
  # Send a POST request to the backend service (using requests library as an example, actual implementation may vary)
 
150
  files = {
151
  'file': (name, blob)
152
  }
 
 
 
153
  headers = {
154
  'Authorization': f"Bearer {ds.rag.user_key}"
155
  }
156
 
157
+ response = requests.post(self.api_url + url, files=files,
158
  headers=headers)
159
 
160
  if response.status_code == 200 and response.json().get('retmsg') == 'success':
 
181
  if not doc_ids or not isinstance(doc_ids, list):
182
  raise ValueError("doc_ids must be a non-empty list of document IDs")
183
 
184
+ data = {"document_ids": doc_ids, "run": 1}
185
 
186
  res = self.post(f'/doc/run', data)
187
 
 
203
  try:
204
  if not doc_ids or not isinstance(doc_ids, list):
205
  raise ValueError("doc_ids must be a non-empty list of document IDs")
206
+ data = {"document_ids": doc_ids, "run": 2}
207
  res = self.post(f'/doc/run', data)
208
 
209
  if res.status_code != 200:
 
249
  "similarity_threshold": similarity_threshold,
250
  "vector_similarity_weight": vector_similarity_weight,
251
  "top_k": top_k,
252
+ "knowledgebase_id": datasets,
253
  }
254
 
255
  # Send a POST request to the backend service (using requests library as an example, actual implementation may vary)
sdk/python/test/t_document.py CHANGED
@@ -255,14 +255,14 @@ class TestDocument(TestSdk):
255
  def test_add_chunk_to_chunk_list(self):
256
  rag = RAGFlow(API_KEY, HOST_ADDRESS)
257
  doc = rag.get_document(name='story.txt')
258
- chunk = doc.add_chunk(content="assss")
259
  assert chunk is not None, "Chunk is None"
260
  assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
261
 
262
  def test_delete_chunk_of_chunk_list(self):
263
  rag = RAGFlow(API_KEY, HOST_ADDRESS)
264
  doc = rag.get_document(name='story.txt')
265
- chunk = doc.add_chunk(content="assss")
266
  assert chunk is not None, "Chunk is None"
267
  assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
268
  doc = rag.get_document(name='story.txt')
@@ -274,7 +274,7 @@ class TestDocument(TestSdk):
274
  def test_update_chunk_content(self):
275
  rag = RAGFlow(API_KEY, HOST_ADDRESS)
276
  doc = rag.get_document(name='story.txt')
277
- chunk = doc.add_chunk(content="assssd")
278
  assert chunk is not None, "Chunk is None"
279
  assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
280
  chunk.content = "ragflow123"
 
255
  def test_add_chunk_to_chunk_list(self):
256
  rag = RAGFlow(API_KEY, HOST_ADDRESS)
257
  doc = rag.get_document(name='story.txt')
258
+ chunk = doc.add_chunk(content="assssdd")
259
  assert chunk is not None, "Chunk is None"
260
  assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
261
 
262
  def test_delete_chunk_of_chunk_list(self):
263
  rag = RAGFlow(API_KEY, HOST_ADDRESS)
264
  doc = rag.get_document(name='story.txt')
265
+ chunk = doc.add_chunk(content="assssdd")
266
  assert chunk is not None, "Chunk is None"
267
  assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
268
  doc = rag.get_document(name='story.txt')
 
274
  def test_update_chunk_content(self):
275
  rag = RAGFlow(API_KEY, HOST_ADDRESS)
276
  doc = rag.get_document(name='story.txt')
277
+ chunk = doc.add_chunk(content="assssddd")
278
  assert chunk is not None, "Chunk is None"
279
  assert isinstance(chunk, Chunk), "Chunk was not added to chunk list"
280
  chunk.content = "ragflow123"