Commit
·
f6252d5
1
Parent(s):
3d9274d
Updated session APIs (#2868)
Browse files### What problem does this PR solve?
_Briefly describe what this PR aims to solve. Include background context
that will help reviewers understand the purpose of the PR._
### Type of change
- [x] Documentation Update
---------
Signed-off-by: Jin Hai <[email protected]>
Co-authored-by: Jin Hai <[email protected]>
- api/python_api_reference.md +177 -163
api/python_api_reference.md
CHANGED
@@ -137,7 +137,7 @@ RAGFlow.list_datasets(
|
|
137 |
desc: bool = True,
|
138 |
id: str = None,
|
139 |
name: str = None
|
140 |
-
) ->
|
141 |
```
|
142 |
|
143 |
Retrieves a list of knowledge bases.
|
@@ -244,12 +244,12 @@ File management inside knowledge base
|
|
244 |
## Upload document
|
245 |
|
246 |
```python
|
247 |
-
DataSet.upload_documents(document_list:
|
248 |
```
|
249 |
|
250 |
### Parameters
|
251 |
|
252 |
-
#### document_list:`
|
253 |
A list composed of dicts containing `name` and `blob`.
|
254 |
|
255 |
|
@@ -260,7 +260,7 @@ no return
|
|
260 |
```python
|
261 |
from ragflow import RAGFlow
|
262 |
|
263 |
-
rag = RAGFlow(api_key="
|
264 |
ds = rag.create_dataset(name="kb_1")
|
265 |
ds.upload_documents([{name="1.txt", blob="123"}, ...] }
|
266 |
```
|
@@ -286,7 +286,7 @@ no return
|
|
286 |
```python
|
287 |
from ragflow import RAGFlow
|
288 |
|
289 |
-
rag = RAGFlow(api_key="
|
290 |
ds=rag.list_datasets(id='id')
|
291 |
ds=ds[0]
|
292 |
doc = ds.list_documents(id="wdfxb5t547d")
|
@@ -311,7 +311,7 @@ bytes of the document.
|
|
311 |
```python
|
312 |
from ragflow import RAGFlow
|
313 |
|
314 |
-
rag = RAGFlow(api_key="
|
315 |
ds=rag.list_datasets(id="id")
|
316 |
ds=ds[0]
|
317 |
doc = ds.list_documents(id="wdfxb5t547d")
|
@@ -325,7 +325,7 @@ print(doc)
|
|
325 |
## List documents
|
326 |
|
327 |
```python
|
328 |
-
Dataset.list_documents(id:str =None, keywords: str=None, offset: int=0, limit:int = 1024,order_by:str = "create_time", desc: bool = True) ->
|
329 |
```
|
330 |
|
331 |
### Parameters
|
@@ -353,7 +353,7 @@ The field by which the records should be sorted. This specifies the attribute or
|
|
353 |
A boolean flag indicating whether the sorting should be in descending order.
|
354 |
### Returns
|
355 |
|
356 |
-
|
357 |
|
358 |
A document object containing the following attributes:
|
359 |
|
@@ -427,7 +427,7 @@ Duration of the processing in seconds or minutes. Defaults to `0.0`.
|
|
427 |
```python
|
428 |
from ragflow import RAGFlow
|
429 |
|
430 |
-
rag = RAGFlow(api_key="
|
431 |
ds = rag.create_dataset(name="kb_1")
|
432 |
|
433 |
filename1 = "~/ragflow.txt"
|
@@ -443,7 +443,7 @@ for d in ds.list_documents(keywords="rag", offset=0, limit=12):
|
|
443 |
## Delete documents
|
444 |
|
445 |
```python
|
446 |
-
DataSet.delete_documents(ids:
|
447 |
```
|
448 |
### Returns
|
449 |
|
@@ -454,7 +454,7 @@ no return
|
|
454 |
```python
|
455 |
from ragflow import RAGFlow
|
456 |
|
457 |
-
rag = RAGFlow(api_key="
|
458 |
ds = rag.list_datasets(name="kb_1")
|
459 |
ds = ds[0]
|
460 |
ds.delete_documents(ids=["id_1","id_2"])
|
@@ -465,13 +465,13 @@ ds.delete_documents(ids=["id_1","id_2"])
|
|
465 |
## Parse and stop parsing document
|
466 |
|
467 |
```python
|
468 |
-
DataSet.async_parse_documents(document_ids:
|
469 |
-
DataSet.async_cancel_parse_documents(document_ids:
|
470 |
```
|
471 |
|
472 |
### Parameters
|
473 |
|
474 |
-
#### document_ids:`
|
475 |
The ids of the documents to be parsed
|
476 |
????????????????????????????????????????????????????
|
477 |
|
@@ -503,7 +503,7 @@ print("Async bulk parsing cancelled")
|
|
503 |
|
504 |
## List chunks
|
505 |
```python
|
506 |
-
Document.list_chunks(keywords: str = None, offset: int = 0, limit: int = -1, id : str = None) ->
|
507 |
```
|
508 |
### Parameters
|
509 |
|
@@ -523,13 +523,13 @@ Document.list_chunks(keywords: str = None, offset: int = 0, limit: int = -1, id
|
|
523 |
The ID of the chunk to be retrieved
|
524 |
default: `None`
|
525 |
### Returns
|
526 |
-
|
527 |
|
528 |
### Examples
|
529 |
```python
|
530 |
from ragflow import RAGFlow
|
531 |
|
532 |
-
rag = RAGFlow(api_key="
|
533 |
ds = rag.list_datasets("123")
|
534 |
ds = ds[0]
|
535 |
ds.async_parse_documents(["wdfxb5t547d"])
|
@@ -546,7 +546,7 @@ Document.add_chunk(content:str) -> Chunk
|
|
546 |
|
547 |
#### content: `str`, *Required*
|
548 |
Contains the main text or information of the chunk.
|
549 |
-
#### important_keywords :`
|
550 |
list the key terms or phrases that are significant or central to the chunk's content.
|
551 |
|
552 |
### Returns
|
@@ -558,7 +558,7 @@ chunk
|
|
558 |
```python
|
559 |
from ragflow import RAGFlow
|
560 |
|
561 |
-
rag = RAGFlow(api_key="
|
562 |
ds = rag.list_datasets(id="123")
|
563 |
ds = ds[0]
|
564 |
doc = ds.list_documents(id="wdfxb5t547d")
|
@@ -571,10 +571,10 @@ chunk = doc.add_chunk(content="xxxxxxx")
|
|
571 |
## Delete chunk
|
572 |
|
573 |
```python
|
574 |
-
Document.delete_chunks(chunk_ids:
|
575 |
```
|
576 |
### Parameters
|
577 |
-
#### chunk_ids:`
|
578 |
The list of chunk_id
|
579 |
|
580 |
### Returns
|
@@ -586,7 +586,7 @@ no return
|
|
586 |
```python
|
587 |
from ragflow import RAGFlow
|
588 |
|
589 |
-
rag = RAGFlow(api_key="
|
590 |
ds = rag.list_datasets(id="123")
|
591 |
ds = ds[0]
|
592 |
doc = ds.list_documents(id="wdfxb5t547d")
|
@@ -606,7 +606,7 @@ Chunk.update(update_message: dict)
|
|
606 |
- `content`: `str`
|
607 |
Contains the main text or information of the chunk
|
608 |
|
609 |
-
- `important_keywords`: `
|
610 |
List the key terms or phrases that are significant or central to the chunk's content
|
611 |
|
612 |
- `available`: `int`
|
@@ -621,7 +621,7 @@ no return
|
|
621 |
```python
|
622 |
from ragflow import RAGFlow
|
623 |
|
624 |
-
rag = RAGFlow(api_key="
|
625 |
ds = rag.list_datasets(id="123")
|
626 |
ds = ds[0]
|
627 |
doc = ds.list_documents(id="wdfxb5t547d")
|
@@ -635,7 +635,7 @@ chunk.update({"content":"sdfx...})
|
|
635 |
## Retrieval
|
636 |
|
637 |
```python
|
638 |
-
RAGFlow.retrieve(question:str="", datasets:
|
639 |
```
|
640 |
|
641 |
### Parameters
|
@@ -644,11 +644,11 @@ RAGFlow.retrieve(question:str="", datasets:List[str]=None, document=List[str]=No
|
|
644 |
|
645 |
The user query or query keywords. Defaults to `""`.
|
646 |
|
647 |
-
#### datasets: `
|
648 |
|
649 |
The scope of datasets.
|
650 |
|
651 |
-
#### document: `
|
652 |
|
653 |
The scope of document. `None` means no limitation. Defaults to `None`.
|
654 |
|
@@ -683,14 +683,14 @@ Indicating whether keyword-based matching is enabled (True) or disabled (False).
|
|
683 |
Specifying whether to enable highlighting of matched terms in the results (True) or not (False).
|
684 |
### Returns
|
685 |
|
686 |
-
|
687 |
|
688 |
### Examples
|
689 |
|
690 |
```python
|
691 |
from ragflow import RAGFlow
|
692 |
|
693 |
-
rag = RAGFlow(api_key="
|
694 |
ds = rag.list_datasets(name="ragflow")
|
695 |
ds = ds[0]
|
696 |
name = 'ragflow_test.txt'
|
@@ -714,20 +714,20 @@ for c in rag.retrieve(question="What's ragflow?",
|
|
714 |
Chat APIs
|
715 |
:::
|
716 |
|
717 |
-
## Create chat
|
718 |
-
|
719 |
-
Creates a chat assistant.
|
720 |
|
721 |
```python
|
722 |
RAGFlow.create_chat(
|
723 |
name: str = "assistant",
|
724 |
avatar: str = "path",
|
725 |
-
knowledgebases:
|
726 |
llm: Chat.LLM = None,
|
727 |
prompt: Chat.Prompt = None
|
728 |
) -> Chat
|
729 |
```
|
730 |
|
|
|
|
|
731 |
### Returns
|
732 |
|
733 |
- Success: A `Chat` object representing the chat assistant.
|
@@ -752,7 +752,7 @@ The llm of the created chat. Defaults to `None`. When the value is `None`, a dic
|
|
752 |
- **model_name**, `str`
|
753 |
The chat model name. If it is `None`, the user's default chat model will be returned.
|
754 |
- **temperature**, `float`
|
755 |
-
|
756 |
- **top_p**, `float`
|
757 |
Also known as “nucleus sampling”, this parameter sets a threshold to select a smaller set of words to sample from. It focuses on the most likely words, cutting off the less probable ones. Defaults to `0.3`
|
758 |
- **presence_penalty**, `float`
|
@@ -764,14 +764,20 @@ The llm of the created chat. Defaults to `None`. When the value is `None`, a dic
|
|
764 |
|
765 |
#### Prompt: `str`
|
766 |
|
767 |
-
Instructions for LLM
|
768 |
-
|
769 |
-
|
770 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
771 |
Here is the knowledge base:
|
772 |
{knowledge}
|
773 |
-
The above is the knowledge base
|
774 |
-
```
|
775 |
|
776 |
### Examples
|
777 |
|
@@ -787,12 +793,12 @@ assistant = rag.create_chat("Miss R", knowledgebases=knowledge_base)
|
|
787 |
|
788 |
## Update chat
|
789 |
|
790 |
-
Updates the current chat assistant.
|
791 |
-
|
792 |
```python
|
793 |
Chat.update(update_message: dict)
|
794 |
```
|
795 |
|
|
|
|
|
796 |
### Parameters
|
797 |
|
798 |
#### update_message: `dict[str, Any]`, *Required*
|
@@ -800,14 +806,26 @@ Chat.update(update_message: dict)
|
|
800 |
- `"name"`: `str` The name of the chat assistant to update.
|
801 |
- `"avatar"`: `str` Base64 encoding of the avatar. Defaults to `""`
|
802 |
- `"knowledgebases"`: `list[str]` Knowledge bases to update.
|
803 |
-
- `"llm"`: `dict`
|
804 |
-
- `"model_name"`, `str` The chat model name.
|
805 |
-
- `"temperature"`, `float`
|
806 |
- `"top_p"`, `float` Also known as “nucleus sampling”, this parameter sets a threshold to select a smaller set of words to sample from.
|
807 |
-
- `"presence_penalty"`, `float` This discourages the model from repeating the same information by penalizing words that have
|
808 |
-
- `"frequency penalty"`, `float` Similar to
|
809 |
- `"max_token"`, `int` This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).
|
810 |
-
- `"prompt"` : Instructions for LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
811 |
|
812 |
### Returns
|
813 |
|
@@ -822,8 +840,7 @@ from ragflow import RAGFlow
|
|
822 |
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
823 |
knowledge_base = rag.list_datasets(name="kb_1")
|
824 |
assistant = rag.create_chat("Miss R", knowledgebases=knowledge_base)
|
825 |
-
assistant.update({"llm": {"temperature":0.8}})
|
826 |
-
|
827 |
```
|
828 |
|
829 |
---
|
@@ -833,14 +850,14 @@ assistant.update({"llm": {"temperature":0.8}})
|
|
833 |
Deletes specified chat assistants.
|
834 |
|
835 |
```python
|
836 |
-
RAGFlow.delete_chats(ids:
|
837 |
```
|
838 |
|
839 |
### Parameters
|
840 |
|
841 |
#### ids
|
842 |
|
843 |
-
IDs of the chat assistants to delete.
|
844 |
|
845 |
### Returns
|
846 |
|
@@ -868,14 +885,14 @@ RAGFlow.list_chats(
|
|
868 |
desc: bool = True,
|
869 |
id: str = None,
|
870 |
name: str = None
|
871 |
-
) ->
|
872 |
```
|
873 |
|
874 |
### Parameters
|
875 |
|
876 |
#### page
|
877 |
|
878 |
-
|
879 |
|
880 |
#### page_size
|
881 |
|
@@ -891,15 +908,15 @@ Indicates whether to sort the results in descending order. Defaults to `True`.
|
|
891 |
|
892 |
#### id: `string`
|
893 |
|
894 |
-
The ID of the chat to
|
895 |
|
896 |
#### name: `string`
|
897 |
|
898 |
-
The name of the chat to
|
899 |
|
900 |
### Returns
|
901 |
|
902 |
-
- Success: A list of `Chat` objects
|
903 |
- Failure: `Exception`.
|
904 |
|
905 |
### Examples
|
@@ -924,69 +941,63 @@ Chat-session APIs
|
|
924 |
Chat.create_session(name: str = "New session") -> Session
|
925 |
```
|
926 |
|
927 |
-
|
928 |
-
|
929 |
-
A `session` object.
|
930 |
-
|
931 |
-
#### id: `str`
|
932 |
-
|
933 |
-
The id of the created session is used to identify different sessions.
|
934 |
-
- id can not be provided in creating
|
935 |
-
|
936 |
-
#### name: `str`
|
937 |
-
|
938 |
-
The name of the created session. Defaults to `"New session"`.
|
939 |
-
|
940 |
-
#### messages: `List[Message]`
|
941 |
-
|
942 |
-
The messages of the created session.
|
943 |
-
- messages cannot be provided.
|
944 |
|
945 |
-
|
946 |
|
947 |
-
|
948 |
|
949 |
-
|
950 |
-
[{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]
|
951 |
-
```
|
952 |
|
953 |
-
|
954 |
|
955 |
-
|
956 |
-
- `
|
|
|
|
|
|
|
|
|
957 |
|
958 |
### Examples
|
959 |
|
960 |
```python
|
961 |
from ragflow import RAGFlow
|
962 |
|
963 |
-
rag = RAGFlow(api_key="
|
964 |
-
|
965 |
-
|
966 |
-
|
967 |
```
|
968 |
|
969 |
-
|
970 |
## Update session
|
971 |
|
972 |
```python
|
973 |
-
Session.update(update_message:dict)
|
974 |
```
|
975 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
976 |
### Returns
|
977 |
|
978 |
-
|
|
|
979 |
|
980 |
### Examples
|
981 |
|
982 |
```python
|
983 |
from ragflow import RAGFlow
|
984 |
|
985 |
-
rag = RAGFlow(api_key="
|
986 |
-
|
987 |
-
|
988 |
-
|
989 |
-
|
990 |
```
|
991 |
|
992 |
---
|
@@ -999,64 +1010,66 @@ Session.ask(question: str, stream: bool = False) -> Optional[Message, iter[Messa
|
|
999 |
|
1000 |
### Parameters
|
1001 |
|
1002 |
-
#### question
|
1003 |
-
|
1004 |
-
The question to start an AI chat. Defaults to `None`. ???????????????????
|
1005 |
|
1006 |
-
|
1007 |
|
1008 |
-
|
1009 |
|
|
|
1010 |
|
1011 |
### Returns
|
1012 |
|
1013 |
-
[Message, iter[Message]]
|
|
|
|
|
|
|
1014 |
|
1015 |
#### id: `str`
|
1016 |
|
1017 |
-
The
|
1018 |
|
1019 |
#### content: `str`
|
1020 |
|
1021 |
The content of the message. Defaults to `"Hi! I am your assistant, can I help you?"`.
|
1022 |
|
1023 |
-
#### reference: `
|
1024 |
|
1025 |
The auto-generated reference of the message. Each `chunk` object includes the following attributes:
|
1026 |
|
1027 |
- **id**: `str`
|
1028 |
-
The id of the chunk.
|
1029 |
- **content**: `str`
|
1030 |
-
The content of the chunk.
|
1031 |
- **document_id**: `str`
|
1032 |
-
The ID of the document being referenced.
|
1033 |
- **document_name**: `str`
|
1034 |
-
The name of the referenced document being referenced.
|
1035 |
- **knowledgebase_id**: `str`
|
1036 |
-
The id of the knowledge base to which the relevant document belongs.
|
1037 |
- **image_id**: `str`
|
1038 |
-
The id of the image related to the chunk.
|
1039 |
- **similarity**: `float`
|
1040 |
-
A general similarity score, usually a composite score derived from various similarity measures . This score represents the degree of similarity between two objects. The value ranges between 0 and 1, where a value closer to 1 indicates higher similarity.
|
1041 |
- **vector_similarity**: `float`
|
1042 |
-
A similarity score based on vector representations. This score is obtained by converting texts, words, or objects into vectors and then calculating the cosine similarity or other distance measures between these vectors to determine the similarity in vector space. A higher value indicates greater similarity in the vector space.
|
1043 |
- **term_similarity**: `float`
|
1044 |
-
The similarity score based on terms or keywords. This score is calculated by comparing the similarity of key terms between texts or datasets, typically measuring how similar two words or phrases are in meaning or context. A higher value indicates a stronger similarity between terms.
|
1045 |
-
- **position**: `
|
1046 |
-
Indicates the position or index of keywords or specific terms within the text. An array is typically used to mark the location of keywords or specific elements, facilitating precise operations or analysis of the text.
|
1047 |
|
1048 |
### Examples
|
1049 |
|
1050 |
```python
|
1051 |
from ragflow import RAGFlow
|
1052 |
|
1053 |
-
rag = RAGFlow(api_key="
|
1054 |
-
|
1055 |
-
|
1056 |
-
sess =
|
1057 |
|
1058 |
print("\n==================== Miss R =====================\n")
|
1059 |
-
print(
|
1060 |
|
1061 |
while True:
|
1062 |
question = input("\n==================== User =====================\n> ")
|
@@ -1081,81 +1094,82 @@ Chat.list_sessions(
|
|
1081 |
desc: bool = True,
|
1082 |
id: str = None,
|
1083 |
name: str = None
|
1084 |
-
) ->
|
1085 |
```
|
1086 |
|
1087 |
-
|
1088 |
|
1089 |
-
|
1090 |
-
description: the List contains information about multiple assistant object, with each dictionary containing information about one assistant.
|
1091 |
|
1092 |
-
|
1093 |
|
1094 |
-
|
1095 |
-
from ragflow import RAGFlow
|
1096 |
|
1097 |
-
|
1098 |
-
assi = rag.list_chats(name="Miss R")
|
1099 |
-
assi = assi[0]
|
1100 |
-
for sess in assi.list_sessions():
|
1101 |
-
print(sess)
|
1102 |
-
```
|
1103 |
|
1104 |
-
|
1105 |
|
1106 |
-
####
|
1107 |
|
1108 |
-
The
|
1109 |
-
- `1`
|
1110 |
|
1111 |
-
####
|
1112 |
|
1113 |
-
|
1114 |
-
- `1024`
|
1115 |
|
1116 |
-
####
|
1117 |
|
1118 |
-
The
|
1119 |
-
- `"create_time"`
|
1120 |
|
1121 |
-
####
|
1122 |
|
1123 |
-
|
1124 |
-
- `True`
|
1125 |
|
1126 |
-
|
1127 |
|
1128 |
-
|
1129 |
-
- `
|
1130 |
|
1131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1132 |
|
1133 |
-
The name of the chat to be retrieved.
|
1134 |
-
- `None`
|
1135 |
---
|
1136 |
|
1137 |
-
## Delete
|
1138 |
|
1139 |
```python
|
1140 |
-
Chat.delete_sessions(ids:
|
1141 |
```
|
1142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1143 |
### Returns
|
1144 |
|
1145 |
-
|
|
|
1146 |
|
1147 |
### Examples
|
1148 |
|
1149 |
```python
|
1150 |
from ragflow import RAGFlow
|
1151 |
|
1152 |
-
rag = RAGFlow(api_key="
|
1153 |
-
|
1154 |
-
|
1155 |
-
|
1156 |
-
```
|
1157 |
-
### Parameters
|
1158 |
-
#### ids: `List[string]`
|
1159 |
-
IDs of the sessions to be deleted.
|
1160 |
-
- `None`
|
1161 |
-
|
|
|
137 |
desc: bool = True,
|
138 |
id: str = None,
|
139 |
name: str = None
|
140 |
+
) -> list[DataSet]
|
141 |
```
|
142 |
|
143 |
Retrieves a list of knowledge bases.
|
|
|
244 |
## Upload document
|
245 |
|
246 |
```python
|
247 |
+
DataSet.upload_documents(document_list: list[dict])
|
248 |
```
|
249 |
|
250 |
### Parameters
|
251 |
|
252 |
+
#### document_list:`list[dict]`
|
253 |
A list composed of dicts containing `name` and `blob`.
|
254 |
|
255 |
|
|
|
260 |
```python
|
261 |
from ragflow import RAGFlow
|
262 |
|
263 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
264 |
ds = rag.create_dataset(name="kb_1")
|
265 |
ds.upload_documents([{name="1.txt", blob="123"}, ...] }
|
266 |
```
|
|
|
286 |
```python
|
287 |
from ragflow import RAGFlow
|
288 |
|
289 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
290 |
ds=rag.list_datasets(id='id')
|
291 |
ds=ds[0]
|
292 |
doc = ds.list_documents(id="wdfxb5t547d")
|
|
|
311 |
```python
|
312 |
from ragflow import RAGFlow
|
313 |
|
314 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
315 |
ds=rag.list_datasets(id="id")
|
316 |
ds=ds[0]
|
317 |
doc = ds.list_documents(id="wdfxb5t547d")
|
|
|
325 |
## List documents
|
326 |
|
327 |
```python
|
328 |
+
Dataset.list_documents(id:str =None, keywords: str=None, offset: int=0, limit:int = 1024,order_by:str = "create_time", desc: bool = True) -> list[Document]
|
329 |
```
|
330 |
|
331 |
### Parameters
|
|
|
353 |
A boolean flag indicating whether the sorting should be in descending order.
|
354 |
### Returns
|
355 |
|
356 |
+
list[Document]
|
357 |
|
358 |
A document object containing the following attributes:
|
359 |
|
|
|
427 |
```python
|
428 |
from ragflow import RAGFlow
|
429 |
|
430 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
431 |
ds = rag.create_dataset(name="kb_1")
|
432 |
|
433 |
filename1 = "~/ragflow.txt"
|
|
|
443 |
## Delete documents
|
444 |
|
445 |
```python
|
446 |
+
DataSet.delete_documents(ids: list[str] = None)
|
447 |
```
|
448 |
### Returns
|
449 |
|
|
|
454 |
```python
|
455 |
from ragflow import RAGFlow
|
456 |
|
457 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
458 |
ds = rag.list_datasets(name="kb_1")
|
459 |
ds = ds[0]
|
460 |
ds.delete_documents(ids=["id_1","id_2"])
|
|
|
465 |
## Parse and stop parsing document
|
466 |
|
467 |
```python
|
468 |
+
DataSet.async_parse_documents(document_ids:list[str]) -> None
|
469 |
+
DataSet.async_cancel_parse_documents(document_ids:list[str])-> None
|
470 |
```
|
471 |
|
472 |
### Parameters
|
473 |
|
474 |
+
#### document_ids:`list[str]`
|
475 |
The ids of the documents to be parsed
|
476 |
????????????????????????????????????????????????????
|
477 |
|
|
|
503 |
|
504 |
## List chunks
|
505 |
```python
|
506 |
+
Document.list_chunks(keywords: str = None, offset: int = 0, limit: int = -1, id : str = None) -> list[Chunk]
|
507 |
```
|
508 |
### Parameters
|
509 |
|
|
|
523 |
The ID of the chunk to be retrieved
|
524 |
default: `None`
|
525 |
### Returns
|
526 |
+
list[chunk]
|
527 |
|
528 |
### Examples
|
529 |
```python
|
530 |
from ragflow import RAGFlow
|
531 |
|
532 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
533 |
ds = rag.list_datasets("123")
|
534 |
ds = ds[0]
|
535 |
ds.async_parse_documents(["wdfxb5t547d"])
|
|
|
546 |
|
547 |
#### content: `str`, *Required*
|
548 |
Contains the main text or information of the chunk.
|
549 |
+
#### important_keywords :`list[str]`
|
550 |
list the key terms or phrases that are significant or central to the chunk's content.
|
551 |
|
552 |
### Returns
|
|
|
558 |
```python
|
559 |
from ragflow import RAGFlow
|
560 |
|
561 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
562 |
ds = rag.list_datasets(id="123")
|
563 |
ds = ds[0]
|
564 |
doc = ds.list_documents(id="wdfxb5t547d")
|
|
|
571 |
## Delete chunk
|
572 |
|
573 |
```python
|
574 |
+
Document.delete_chunks(chunk_ids: list[str])
|
575 |
```
|
576 |
### Parameters
|
577 |
+
#### chunk_ids:`list[str]`
|
578 |
The list of chunk_id
|
579 |
|
580 |
### Returns
|
|
|
586 |
```python
|
587 |
from ragflow import RAGFlow
|
588 |
|
589 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
590 |
ds = rag.list_datasets(id="123")
|
591 |
ds = ds[0]
|
592 |
doc = ds.list_documents(id="wdfxb5t547d")
|
|
|
606 |
- `content`: `str`
|
607 |
Contains the main text or information of the chunk
|
608 |
|
609 |
+
- `important_keywords`: `list[str]`
|
610 |
List the key terms or phrases that are significant or central to the chunk's content
|
611 |
|
612 |
- `available`: `int`
|
|
|
621 |
```python
|
622 |
from ragflow import RAGFlow
|
623 |
|
624 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
625 |
ds = rag.list_datasets(id="123")
|
626 |
ds = ds[0]
|
627 |
doc = ds.list_documents(id="wdfxb5t547d")
|
|
|
635 |
## Retrieval
|
636 |
|
637 |
```python
|
638 |
+
RAGFlow.retrieve(question:str="", datasets:list[str]=None, document=list[str]=None, offset:int=1, limit:int=30, similarity_threshold:float=0.2, vector_similarity_weight:float=0.3, top_k:int=1024,rerank_id:str=None,keyword:bool=False,higlight:bool=False) -> list[Chunk]
|
639 |
```
|
640 |
|
641 |
### Parameters
|
|
|
644 |
|
645 |
The user query or query keywords. Defaults to `""`.
|
646 |
|
647 |
+
#### datasets: `list[Dataset]`, *Required*
|
648 |
|
649 |
The scope of datasets.
|
650 |
|
651 |
+
#### document: `list[Document]`
|
652 |
|
653 |
The scope of document. `None` means no limitation. Defaults to `None`.
|
654 |
|
|
|
683 |
Specifying whether to enable highlighting of matched terms in the results (True) or not (False).
|
684 |
### Returns
|
685 |
|
686 |
+
list[Chunk]
|
687 |
|
688 |
### Examples
|
689 |
|
690 |
```python
|
691 |
from ragflow import RAGFlow
|
692 |
|
693 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
694 |
ds = rag.list_datasets(name="ragflow")
|
695 |
ds = ds[0]
|
696 |
name = 'ragflow_test.txt'
|
|
|
714 |
Chat APIs
|
715 |
:::
|
716 |
|
717 |
+
## Create chat assistant
|
|
|
|
|
718 |
|
719 |
```python
|
720 |
RAGFlow.create_chat(
|
721 |
name: str = "assistant",
|
722 |
avatar: str = "path",
|
723 |
+
knowledgebases: list[DataSet] = [],
|
724 |
llm: Chat.LLM = None,
|
725 |
prompt: Chat.Prompt = None
|
726 |
) -> Chat
|
727 |
```
|
728 |
|
729 |
+
Creates a chat assistant.
|
730 |
+
|
731 |
### Returns
|
732 |
|
733 |
- Success: A `Chat` object representing the chat assistant.
|
|
|
752 |
- **model_name**, `str`
|
753 |
The chat model name. If it is `None`, the user's default chat model will be returned.
|
754 |
- **temperature**, `float`
|
755 |
+
Controls the randomness of the model's predictions. A lower temperature increases the model's conficence in its responses; a higher temperature increases creativity and diversity. Defaults to `0.1`.
|
756 |
- **top_p**, `float`
|
757 |
Also known as “nucleus sampling”, this parameter sets a threshold to select a smaller set of words to sample from. It focuses on the most likely words, cutting off the less probable ones. Defaults to `0.3`
|
758 |
- **presence_penalty**, `float`
|
|
|
764 |
|
765 |
#### Prompt: `str`
|
766 |
|
767 |
+
Instructions for the LLM to follow.
|
768 |
+
|
769 |
+
- `"similarity_threshold"`: `float` A similarity score to evaluate distance between two lines of text. It's weighted keywords similarity and vector cosine similarity. If the similarity between query and chunk is less than this threshold, the chunk will be filtered out. Defaults to `0.2`.
|
770 |
+
- `"keywords_similarity_weight"`: `float` It's weighted keywords similarity and vector cosine similarity or rerank score (0~1). Defaults to `0.7`.
|
771 |
+
- `"top_n"`: `int` Not all the chunks whose similarity score is above the 'similarity threshold' will be feed to LLMs. LLM can only see these 'Top N' chunks. Defaults to `8`.
|
772 |
+
- `"variables"`: `list[dict[]]` If you use dialog APIs, the variables might help you chat with your clients with different strategies. The variables are used to fill in the 'System' part in prompt in order to give LLM a hint. The 'knowledge' is a very special variable which will be filled-in with the retrieved chunks. All the variables in 'System' should be curly bracketed. Defaults to `[{"key": "knowledge", "optional": True}]`
|
773 |
+
- `"rerank_model"`: `str` If it is not specified, vector cosine similarity will be used; otherwise, reranking score will be used. Defaults to `""`.
|
774 |
+
- `"empty_response"`: `str` If nothing is retrieved in the knowledge base for the user's question, this will be used as the response. To allow the LLM to improvise when nothing is retrieved, leave this blank. Defaults to `None`.
|
775 |
+
- `"opener"`: `str` The opening greeting for the user. Defaults to `"Hi! I am your assistant, can I help you?"`.
|
776 |
+
- `"show_quote`: `bool` Indicates whether the source of text should be displayed Defaults to `True`.
|
777 |
+
- `"prompt"`: `str` The prompt content. Defaults to `You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the knowledge base!" Answers need to consider chat history.
|
778 |
Here is the knowledge base:
|
779 |
{knowledge}
|
780 |
+
The above is the knowledge base.`.
|
|
|
781 |
|
782 |
### Examples
|
783 |
|
|
|
793 |
|
794 |
## Update chat
|
795 |
|
|
|
|
|
796 |
```python
|
797 |
Chat.update(update_message: dict)
|
798 |
```
|
799 |
|
800 |
+
Updates the current chat assistant.
|
801 |
+
|
802 |
### Parameters
|
803 |
|
804 |
#### update_message: `dict[str, Any]`, *Required*
|
|
|
806 |
- `"name"`: `str` The name of the chat assistant to update.
|
807 |
- `"avatar"`: `str` Base64 encoding of the avatar. Defaults to `""`
|
808 |
- `"knowledgebases"`: `list[str]` Knowledge bases to update.
|
809 |
+
- `"llm"`: `dict` The LLM settings:
|
810 |
+
- `"model_name"`, `str` The chat model name.
|
811 |
+
- `"temperature"`, `float` Controls the randomness of the model's predictions.
|
812 |
- `"top_p"`, `float` Also known as “nucleus sampling”, this parameter sets a threshold to select a smaller set of words to sample from.
|
813 |
+
- `"presence_penalty"`, `float` This discourages the model from repeating the same information by penalizing words that have appeared in the conversation.
|
814 |
+
- `"frequency penalty"`, `float` Similar to presence penalty, this reduces the model’s tendency to repeat the same words.
|
815 |
- `"max_token"`, `int` This sets the maximum length of the model’s output, measured in the number of tokens (words or pieces of words).
|
816 |
+
- `"prompt"` : Instructions for the LLM to follow.
|
817 |
+
- `"similarity_threshold"`: `float` A score to evaluate distance between two lines of text. It's weighted keywords similarity and vector cosine similarity. If the similarity between query and chunk is less than this threshold, the chunk will be filtered out. Defaults to `0.2`.
|
818 |
+
- `"keywords_similarity_weight"`: `float` It's weighted keywords similarity and vector cosine similarity or rerank score (0~1). Defaults to `0.7`.
|
819 |
+
- `"top_n"`: `int` Not all the chunks whose similarity score is above the 'similarity threshold' will be feed to LLMs. LLM can only see these 'Top N' chunks. Defaults to `8`.
|
820 |
+
- `"variables"`: `list[dict[]]` If you use dialog APIs, the variables might help you chat with your clients with different strategies. The variables are used to fill in the 'System' part in prompt in order to give LLM a hint. The 'knowledge' is a very special variable which will be filled-in with the retrieved chunks. All the variables in 'System' should be curly bracketed. Defaults to `[{"key": "knowledge", "optional": True}]`
|
821 |
+
- `"rerank_model"`: `str` If it is not specified, vector cosine similarity will be used; otherwise, reranking score will be used. Defaults to `""`.
|
822 |
+
- `"empty_response"`: `str` If nothing is retrieved in the knowledge base for the user's question, this will be used as the response. To allow the LLM to improvise when nothing is retrieved, leave this blank. Defaults to `None`.
|
823 |
+
- `"opener"`: `str` The opening greeting for the user. Defaults to `"Hi! I am your assistant, can I help you?"`.
|
824 |
+
- `"show_quote`: `bool` Indicates whether the source of text should be displayed Defaults to `True`.
|
825 |
+
- `"prompt"`: `str` The prompt content. Defaults to `You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the knowledge base!" Answers need to consider chat history.
|
826 |
+
Here is the knowledge base:
|
827 |
+
{knowledge}
|
828 |
+
The above is the knowledge base.`.
|
829 |
|
830 |
### Returns
|
831 |
|
|
|
840 |
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
841 |
knowledge_base = rag.list_datasets(name="kb_1")
|
842 |
assistant = rag.create_chat("Miss R", knowledgebases=knowledge_base)
|
843 |
+
assistant.update({"name": "Stefan", "llm": {"temperature": 0.8}, "prompt": {"top_n": 8}})
|
|
|
844 |
```
|
845 |
|
846 |
---
|
|
|
850 |
Deletes specified chat assistants.
|
851 |
|
852 |
```python
|
853 |
+
RAGFlow.delete_chats(ids: list[str] = None)
|
854 |
```
|
855 |
|
856 |
### Parameters
|
857 |
|
858 |
#### ids
|
859 |
|
860 |
+
IDs of the chat assistants to delete. If not specified, all chat assistants will be deleted.
|
861 |
|
862 |
### Returns
|
863 |
|
|
|
885 |
desc: bool = True,
|
886 |
id: str = None,
|
887 |
name: str = None
|
888 |
+
) -> list[Chat]
|
889 |
```
|
890 |
|
891 |
### Parameters
|
892 |
|
893 |
#### page
|
894 |
|
895 |
+
Specifies the page on which the records will be displayed. Defaults to `1`.
|
896 |
|
897 |
#### page_size
|
898 |
|
|
|
908 |
|
909 |
#### id: `string`
|
910 |
|
911 |
+
The ID of the chat to retrieve. Defaults to `None`.
|
912 |
|
913 |
#### name: `string`
|
914 |
|
915 |
+
The name of the chat to retrieve. Defaults to `None`.
|
916 |
|
917 |
### Returns
|
918 |
|
919 |
+
- Success: A list of `Chat` objects.
|
920 |
- Failure: `Exception`.
|
921 |
|
922 |
### Examples
|
|
|
941 |
Chat.create_session(name: str = "New session") -> Session
|
942 |
```
|
943 |
|
944 |
+
Creates a chat session.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
945 |
|
946 |
+
### Parameters
|
947 |
|
948 |
+
#### name
|
949 |
|
950 |
+
The name of the chat session to create.
|
|
|
|
|
951 |
|
952 |
+
### Returns
|
953 |
|
954 |
+
- Success: A `Session` object containing the following attributes:
|
955 |
+
- `id`: `str` The auto-generated unique identifier of the created session.
|
956 |
+
- `name`: `str` The name of the created session.
|
957 |
+
- `message`: `list[Message]` The messages of the created session assistant. Default: `[{"role": "assistant", "content": "Hi! I am your assistant,can I help you?"}]`
|
958 |
+
- `chat_id`: `str` The ID of the associated chat assistant.
|
959 |
+
- Failure: `Exception`
|
960 |
|
961 |
### Examples
|
962 |
|
963 |
```python
|
964 |
from ragflow import RAGFlow
|
965 |
|
966 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
967 |
+
assistant = rag.list_chats(name="Miss R")
|
968 |
+
assistant = assistant[0]
|
969 |
+
session = assistant.create_session()
|
970 |
```
|
971 |
|
|
|
972 |
## Update session
|
973 |
|
974 |
```python
|
975 |
+
Session.update(update_message: dict)
|
976 |
```
|
977 |
|
978 |
+
Updates the current session.
|
979 |
+
|
980 |
+
### Parameters
|
981 |
+
|
982 |
+
#### update_message: `dict[str, Any]`, *Required*
|
983 |
+
|
984 |
+
- `"name"`: `str` The name of the session to update.
|
985 |
+
|
986 |
### Returns
|
987 |
|
988 |
+
- Success: No value is returned.
|
989 |
+
- Failure: `Exception`
|
990 |
|
991 |
### Examples
|
992 |
|
993 |
```python
|
994 |
from ragflow import RAGFlow
|
995 |
|
996 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
997 |
+
assistant = rag.list_chats(name="Miss R")
|
998 |
+
assistant = assistant[0]
|
999 |
+
session = assistant.create_session("session_name")
|
1000 |
+
session.update({"name": "updated_name"})
|
1001 |
```
|
1002 |
|
1003 |
---
|
|
|
1010 |
|
1011 |
### Parameters
|
1012 |
|
1013 |
+
#### question *Required*
|
|
|
|
|
1014 |
|
1015 |
+
The question to start an AI chat. Defaults to `None`.
|
1016 |
|
1017 |
+
#### stream
|
1018 |
|
1019 |
+
Indicates whether to output responses in a streaming way. Defaults to `False`.
|
1020 |
|
1021 |
### Returns
|
1022 |
|
1023 |
+
Optional[Message, iter[Message]]
|
1024 |
+
|
1025 |
+
- Message object, if `stream` is set to `False`
|
1026 |
+
- iter[Message] object, if `stream` is set to `True`
|
1027 |
|
1028 |
#### id: `str`
|
1029 |
|
1030 |
+
The ID of the message. `id` is automatically generated.
|
1031 |
|
1032 |
#### content: `str`
|
1033 |
|
1034 |
The content of the message. Defaults to `"Hi! I am your assistant, can I help you?"`.
|
1035 |
|
1036 |
+
#### reference: `list[Chunk]`
|
1037 |
|
1038 |
The auto-generated reference of the message. Each `chunk` object includes the following attributes:
|
1039 |
|
1040 |
- **id**: `str`
|
1041 |
+
The id of the chunk.
|
1042 |
- **content**: `str`
|
1043 |
+
The content of the chunk.
|
1044 |
- **document_id**: `str`
|
1045 |
+
The ID of the document being referenced.
|
1046 |
- **document_name**: `str`
|
1047 |
+
The name of the referenced document being referenced.
|
1048 |
- **knowledgebase_id**: `str`
|
1049 |
+
The id of the knowledge base to which the relevant document belongs.
|
1050 |
- **image_id**: `str`
|
1051 |
+
The id of the image related to the chunk.
|
1052 |
- **similarity**: `float`
|
1053 |
+
A general similarity score, usually a composite score derived from various similarity measures . This score represents the degree of similarity between two objects. The value ranges between 0 and 1, where a value closer to 1 indicates higher similarity.
|
1054 |
- **vector_similarity**: `float`
|
1055 |
+
A similarity score based on vector representations. This score is obtained by converting texts, words, or objects into vectors and then calculating the cosine similarity or other distance measures between these vectors to determine the similarity in vector space. A higher value indicates greater similarity in the vector space.
|
1056 |
- **term_similarity**: `float`
|
1057 |
+
The similarity score based on terms or keywords. This score is calculated by comparing the similarity of key terms between texts or datasets, typically measuring how similar two words or phrases are in meaning or context. A higher value indicates a stronger similarity between terms.
|
1058 |
+
- **position**: `list[string]`
|
1059 |
+
Indicates the position or index of keywords or specific terms within the text. An array is typically used to mark the location of keywords or specific elements, facilitating precise operations or analysis of the text.
|
1060 |
|
1061 |
### Examples
|
1062 |
|
1063 |
```python
|
1064 |
from ragflow import RAGFlow
|
1065 |
|
1066 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
1067 |
+
assistant = rag.list_chats(name="Miss R")
|
1068 |
+
assistant = assistant[0]
|
1069 |
+
sess = assistant.create_session()
|
1070 |
|
1071 |
print("\n==================== Miss R =====================\n")
|
1072 |
+
print(assistant.get_prologue())
|
1073 |
|
1074 |
while True:
|
1075 |
question = input("\n==================== User =====================\n> ")
|
|
|
1094 |
desc: bool = True,
|
1095 |
id: str = None,
|
1096 |
name: str = None
|
1097 |
+
) -> list[Session]
|
1098 |
```
|
1099 |
|
1100 |
+
Lists sessions associated with the current chat assistant.
|
1101 |
|
1102 |
+
### Parameters
|
|
|
1103 |
|
1104 |
+
#### page
|
1105 |
|
1106 |
+
Specifies the page on which records will be displayed. Defaults to `1`.
|
|
|
1107 |
|
1108 |
+
#### page_size
|
|
|
|
|
|
|
|
|
|
|
1109 |
|
1110 |
+
The number of records on each page. Defaults to `1024`.
|
1111 |
|
1112 |
+
#### orderby
|
1113 |
|
1114 |
+
The field by which the records should be sorted. This specifies the attribute or column used to sort the results. Defaults to `"create_time"`.
|
|
|
1115 |
|
1116 |
+
#### desc
|
1117 |
|
1118 |
+
Whether the sorting should be in descending order. Defaults to `True`.
|
|
|
1119 |
|
1120 |
+
#### id
|
1121 |
|
1122 |
+
The ID of the chat session to retrieve. Defaults to `None`.
|
|
|
1123 |
|
1124 |
+
#### name
|
1125 |
|
1126 |
+
The name of the chat to retrieve. Defaults to `None`.
|
|
|
1127 |
|
1128 |
+
### Returns
|
1129 |
|
1130 |
+
- Success: A list of `Session` objects associated with the current chat assistant.
|
1131 |
+
- Failure: `Exception`.
|
1132 |
|
1133 |
+
### Examples
|
1134 |
+
|
1135 |
+
```python
|
1136 |
+
from ragflow import RAGFlow
|
1137 |
+
|
1138 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
1139 |
+
assistant = rag.list_chats(name="Miss R")
|
1140 |
+
assistant = assistant[0]
|
1141 |
+
for session in assistant.list_sessions():
|
1142 |
+
print(session)
|
1143 |
+
```
|
1144 |
|
|
|
|
|
1145 |
---
|
1146 |
|
1147 |
+
## Delete sessions
|
1148 |
|
1149 |
```python
|
1150 |
+
Chat.delete_sessions(ids:list[str] = None)
|
1151 |
```
|
1152 |
|
1153 |
+
Deletes specified sessions or all sessions associated with the current chat assistant.
|
1154 |
+
|
1155 |
+
### Parameters
|
1156 |
+
|
1157 |
+
#### ids
|
1158 |
+
|
1159 |
+
IDs of the sessions to delete. If not specified, all sessions associated with the current chat assistant will be deleted.
|
1160 |
+
|
1161 |
### Returns
|
1162 |
|
1163 |
+
- Success: No value is returned.
|
1164 |
+
- Failure: `Exception`
|
1165 |
|
1166 |
### Examples
|
1167 |
|
1168 |
```python
|
1169 |
from ragflow import RAGFlow
|
1170 |
|
1171 |
+
rag = RAGFlow(api_key="<YOUR_API_KEY>", base_url="http://<YOUR_BASE_URL>:9380")
|
1172 |
+
assistant = rag.list_chats(name="Miss R")
|
1173 |
+
assistant = assistant[0]
|
1174 |
+
assistant.delete_sessions(ids=["id_1","id_2"])
|
1175 |
+
```
|
|
|
|
|
|
|
|
|
|