UlisesHdz18 commited on
Commit
8e8bc53
·
verified ·
1 Parent(s): a475e95

Update version

Browse files

Refine agent approach

Files changed (5) hide show
  1. agent.py +156 -57
  2. app.py +3 -2
  3. metadata.jsonl +0 -0
  4. system_prompt.txt +5 -0
  5. test.ipynb +701 -0
agent.py CHANGED
@@ -5,40 +5,143 @@ Created on Fri May 30 11:35:33 2025
5
  @author: p_uli
6
  """
7
 
 
8
  from dotenv import load_dotenv
9
-
10
- load_dotenv()
11
-
12
-
13
-
14
  from langgraph.graph import START, StateGraph, MessagesState
15
  from langgraph.prebuilt import tools_condition
16
  from langgraph.prebuilt import ToolNode
17
- from langchain_core.messages import SystemMessage, HumanMessage
18
  from langchain_groq import ChatGroq
19
- from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
20
- #tools
21
- from search_and_math_tools import wiki_search, web_search, arxiv_search, vector_store
22
- from search_and_math_tools import multiply, add, subtract, divide, modulus,power,square_root
23
- from codetools import execute_code_multilang
24
- from doctools import save_and_read_file,download_file_from_url, extract_text_from_image, analyze_csv_file, analyze_excel_file
25
- from image_tools import analyze_image, transform_image, draw_on_image, generate_simple_image, combine_images
 
 
26
 
 
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # System message
30
- sys_prompt = '''
31
- You are a helpful assistant tasked with answering questions using a set of tools.
32
- Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
33
- FINAL ANSWER: [YOUR FINAL ANSWER].
34
- YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
35
- If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
36
- If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
37
- If you are asked for a comma separated list, Apply the rules above for each element (number or string), ensure there is exactly one space after each comma.
38
- Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
39
- '''
40
-
41
- sys_msg = SystemMessage(content=sys_prompt)
 
 
 
 
 
 
42
 
43
 
44
 
@@ -48,50 +151,47 @@ tools = [
48
  subtract,
49
  divide,
50
  modulus,
51
- power,
52
- square_root,
53
- web_search,
54
  wiki_search,
55
- arxiv_search,
56
- save_and_read_file,
57
- analyze_csv_file,
58
- analyze_excel_file,
59
- download_file_from_url,
60
- extract_text_from_image,
61
- analyze_image,
62
- transform_image,
63
- draw_on_image,
64
- generate_simple_image,
65
- combine_images,
66
- execute_code_multilang
67
  ]
68
 
69
  # Build graph function
70
- def build_graph():
71
  """Build the graph"""
72
  # Load environment variables from .env file
73
- model = ChatGroq(model="qwen-qwq-32b", temperature=0)
74
-
75
- # Adding tools to the model
76
- model_with_tools = model.bind_tools(tools)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # Node
79
  def assistant(state: MessagesState):
80
  """Assistant node"""
81
- return {"messages": [model_with_tools.invoke(state["messages"])]}
82
-
83
  def retriever(state: MessagesState):
84
  """Retriever node"""
85
  similar_question = vector_store.similarity_search(state["messages"][0].content)
86
-
87
- if similar_question: # Check if the list is not empty
88
- example_msg = HumanMessage(
89
- content=f"Here is a similar question and answer for reference: \n\n{similar_question[0].page_content}",
90
- )
91
- return {"messages": [sys_msg] + state["messages"] + [example_msg]}
92
- else:
93
- # Handle the case when no similar questions are found
94
- return {"messages": [sys_msg] + state["messages"]}
95
 
96
  builder = StateGraph(MessagesState)
97
  builder.add_node("retriever", retriever)
@@ -107,4 +207,3 @@ def build_graph():
107
 
108
  # Compile graph
109
  return builder.compile()
110
-
 
5
  @author: p_uli
6
  """
7
 
8
+ import os
9
  from dotenv import load_dotenv
 
 
 
 
 
10
  from langgraph.graph import START, StateGraph, MessagesState
11
  from langgraph.prebuilt import tools_condition
12
  from langgraph.prebuilt import ToolNode
13
+ from langchain_google_genai import ChatGoogleGenerativeAI
14
  from langchain_groq import ChatGroq
15
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
16
+ from langchain_community.tools.tavily_search import TavilySearchResults
17
+ from langchain_community.document_loaders import WikipediaLoader
18
+ from langchain_community.document_loaders import ArxivLoader
19
+ from langchain_community.vectorstores import SupabaseVectorStore
20
+ from langchain_core.messages import SystemMessage, HumanMessage
21
+ from langchain_core.tools import tool
22
+ from langchain.tools.retriever import create_retriever_tool
23
+ from supabase.client import Client, create_client
24
 
25
+ load_dotenv()
26
 
27
+ @tool
28
+ def multiply(a: int, b: int) -> int:
29
+ """Multiply two numbers.
30
+ Args:
31
+ a: first int
32
+ b: second int
33
+ """
34
+ return a * b
35
+
36
+ @tool
37
+ def add(a: int, b: int) -> int:
38
+ """Add two numbers.
39
+
40
+ Args:
41
+ a: first int
42
+ b: second int
43
+ """
44
+ return a + b
45
+
46
+ @tool
47
+ def subtract(a: int, b: int) -> int:
48
+ """Subtract two numbers.
49
+
50
+ Args:
51
+ a: first int
52
+ b: second int
53
+ """
54
+ return a - b
55
+
56
+ @tool
57
+ def divide(a: int, b: int) -> int:
58
+ """Divide two numbers.
59
+
60
+ Args:
61
+ a: first int
62
+ b: second int
63
+ """
64
+ if b == 0:
65
+ raise ValueError("Cannot divide by zero.")
66
+ return a / b
67
+
68
+ @tool
69
+ def modulus(a: int, b: int) -> int:
70
+ """Get the modulus of two numbers.
71
+
72
+ Args:
73
+ a: first int
74
+ b: second int
75
+ """
76
+ return a % b
77
+
78
+ @tool
79
+ def wiki_search(query: str) -> str:
80
+ """Search Wikipedia for a query and return maximum 2 results.
81
+
82
+ Args:
83
+ query: The search query."""
84
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
85
+ formatted_search_docs = "\n\n---\n\n".join(
86
+ [
87
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
88
+ for doc in search_docs
89
+ ])
90
+ return {"wiki_results": formatted_search_docs}
91
+
92
+ @tool
93
+ def web_search(query: str) -> str:
94
+ """Search Tavily for a query and return maximum 3 results.
95
+
96
+ Args:
97
+ query: The search query."""
98
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
99
+ formatted_search_docs = "\n\n---\n\n".join(
100
+ [
101
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
102
+ for doc in search_docs
103
+ ])
104
+ return {"web_results": formatted_search_docs}
105
+
106
+ @tool
107
+ def arvix_search(query: str) -> str:
108
+ """Search Arxiv for a query and return maximum 3 result.
109
+
110
+ Args:
111
+ query: The search query."""
112
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
113
+ formatted_search_docs = "\n\n---\n\n".join(
114
+ [
115
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
116
+ for doc in search_docs
117
+ ])
118
+ return {"arvix_results": formatted_search_docs}
119
+
120
+
121
+
122
+ # load the system prompt from the file
123
+ with open("system_prompt.txt", "r", encoding="utf-8") as f:
124
+ system_prompt = f.read()
125
 
126
  # System message
127
+ sys_msg = SystemMessage(content=system_prompt)
128
+
129
+ # build a retriever
130
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
131
+ supabase: Client = create_client(
132
+ os.environ.get("SUPABASE_URL"),
133
+ os.environ.get("SUPABASE_SERVICE_KEY"))
134
+ vector_store = SupabaseVectorStore(
135
+ client=supabase,
136
+ embedding= embeddings,
137
+ table_name="documents",
138
+ query_name="match_documents_langchain",
139
+ )
140
+ question_retrieve_tool = create_retriever_tool(
141
+ retriever=vector_store.as_retriever(),
142
+ name="Question Search",
143
+ description="A tool to retrieve similar questions from a vector store.",
144
+ )
145
 
146
 
147
 
 
151
  subtract,
152
  divide,
153
  modulus,
 
 
 
154
  wiki_search,
155
+ web_search,
156
+ arvix_search,
157
+ question_retrieve_tool
 
 
 
 
 
 
 
 
 
158
  ]
159
 
160
  # Build graph function
161
+ def build_graph(provider: str = "groq"):
162
  """Build the graph"""
163
  # Load environment variables from .env file
164
+ if provider == "google":
165
+ # Google Gemini
166
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
167
+ elif provider == "groq":
168
+ # Groq https://console.groq.com/docs/models
169
+ llm = ChatGroq(model="qwen-qwq-32b", temperature=0) # optional : qwen-qwq-32b gemma2-9b-it
170
+ elif provider == "huggingface":
171
+ # TODO: Add huggingface endpoint
172
+ llm = ChatHuggingFace(
173
+ llm=HuggingFaceEndpoint(
174
+ repo_id="Meta-DeepLearning/llama-2-7b-chat-hf",
175
+ temperature=0,
176
+ ),
177
+ )
178
+ else:
179
+ raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
180
+ # Bind tools to LLM
181
+ llm_with_tools = llm.bind_tools(tools)
182
 
183
  # Node
184
  def assistant(state: MessagesState):
185
  """Assistant node"""
186
+ return {"messages": [llm_with_tools.invoke(state["messages"])]}
187
+
188
  def retriever(state: MessagesState):
189
  """Retriever node"""
190
  similar_question = vector_store.similarity_search(state["messages"][0].content)
191
+ example_msg = HumanMessage(
192
+ content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
193
+ )
194
+ return {"messages": [sys_msg] + state["messages"] + [example_msg]}
 
 
 
 
 
195
 
196
  builder = StateGraph(MessagesState)
197
  builder.add_node("retriever", retriever)
 
207
 
208
  # Compile graph
209
  return builder.compile()
 
app.py CHANGED
@@ -16,7 +16,7 @@ class BasicAgent:
16
  """A langgraph agent."""
17
  def __init__(self):
18
  print("BasicAgent initialized.")
19
- self.graph = build_graph()
20
 
21
  def __call__(self, question: str) -> str:
22
  print(f"Agent received question (first 50 chars): {question[:50]}...")
@@ -32,7 +32,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
32
  and displays the results.
33
  """
34
  # --- Determine HF Space Runtime URL and Repo URL ---
35
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
36
 
37
  if profile:
38
  username= f"{profile.username}"
 
16
  """A langgraph agent."""
17
  def __init__(self):
18
  print("BasicAgent initialized.")
19
+ self.graph = build_graph() #provider = 'huggingface'
20
 
21
  def __call__(self, question: str) -> str:
22
  print(f"Agent received question (first 50 chars): {question[:50]}...")
 
32
  and displays the results.
33
  """
34
  # --- Determine HF Space Runtime URL and Repo URL ---
35
+ # space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
36
+ space_id = 'UlisesHdz18/MyfirstAgent'
37
 
38
  if profile:
39
  username= f"{profile.username}"
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
system_prompt.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ You are a helpful assistant tasked with answering questions using a set of tools.
2
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
5
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
test.ipynb ADDED
@@ -0,0 +1,701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "d0cc4adf",
6
+ "metadata": {},
7
+ "source": [
8
+ "### Question data"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 2,
14
+ "id": "14e3f417",
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "# Load metadata.jsonl\n",
19
+ "import json\n",
20
+ "# Load the metadata.jsonl file\n",
21
+ "with open('metadata.jsonl', 'r') as jsonl_file:\n",
22
+ " json_list = list(jsonl_file)\n",
23
+ "\n",
24
+ "json_QA = []\n",
25
+ "for json_str in json_list:\n",
26
+ " json_data = json.loads(json_str)\n",
27
+ " json_QA.append(json_data)"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": 5,
33
+ "id": "5e2da6fc",
34
+ "metadata": {},
35
+ "outputs": [
36
+ {
37
+ "name": "stdout",
38
+ "output_type": "stream",
39
+ "text": [
40
+ "==================================================\n",
41
+ "Task ID: 2a649bb1-795f-4a01-b3be-9a01868dae73\n",
42
+ "Question: What are the EC numbers of the two most commonly used chemicals for the virus testing method in the paper about SPFMV and SPCSV in the Pearl Of Africa from 2016? Return the semicolon-separated numbers in the order of the alphabetized chemicals.\n",
43
+ "Level: 2\n",
44
+ "Final Answer: 3.1.3.1; 1.11.1.7\n",
45
+ "Annotator Metadata: \n",
46
+ " ├── Steps: \n",
47
+ " │ ├── 1. Searched \"Pearl of Africa\" on Google.\n",
48
+ " │ ├── 2. Noted the answer from the results.\n",
49
+ " │ ├── 3. Searched \"SPFMV and SPCSV in Uganda 2016 paper\" on Google.\n",
50
+ " │ ├── 4. Opened \"Effects of Sweet Potato Feathery Mottle Virus and ...\" at https://onlinelibrary.wiley.com/doi/full/10.1111/jph.12451.\n",
51
+ " │ ├── 5. Found the section on virus testing.\n",
52
+ " │ ├── 6. Searched \"most commonly used chemicals for ELISA\" on Google.\n",
53
+ " │ ├── 7. Noted horseradish peroxidase and alkaline phosphatase from the results.\n",
54
+ " │ ├── 8. Searched \"horseradish peroxidase EC number\" on Google.\n",
55
+ " │ ├── 9. Noted the answer from the featured text snippet (1.11.1.7).\n",
56
+ " │ ├── 10. Searched \"alkaline phosphatase EC number\" on Google.\n",
57
+ " │ ├── 11. Noted the answer from the featured text snippet (3.1.3.1).\n",
58
+ " │ ├── 12. Alphabetized the chemicals.\n",
59
+ " │ ├── 13. Put the numbers in the order of the chemicals.\n",
60
+ " ├── Number of steps: 13\n",
61
+ " ├── How long did this take?: 15 minutes\n",
62
+ " ├── Tools:\n",
63
+ " │ ├── 1. Web browser\n",
64
+ " │ ├── 2. Search engine\n",
65
+ " └── Number of tools: 2\n",
66
+ "==================================================\n"
67
+ ]
68
+ }
69
+ ],
70
+ "source": [
71
+ "# randomly select 3 samples\n",
72
+ "# {\"task_id\": \"c61d22de-5f6c-4958-a7f6-5e9707bd3466\", \"Question\": \"A paper about AI regulation that was originally submitted to arXiv.org in June 2022 shows a figure with three axes, where each axis has a label word at both ends. Which of these words is used to describe a type of society in a Physics and Society article submitted to arXiv.org on August 11, 2016?\", \"Level\": 2, \"Final answer\": \"egalitarian\", \"file_name\": \"\", \"Annotator Metadata\": {\"Steps\": \"1. Go to arxiv.org and navigate to the Advanced Search page.\\n2. Enter \\\"AI regulation\\\" in the search box and select \\\"All fields\\\" from the dropdown.\\n3. Enter 2022-06-01 and 2022-07-01 into the date inputs, select \\\"Submission date (original)\\\", and submit the search.\\n4. Go through the search results to find the article that has a figure with three axes and labels on each end of the axes, titled \\\"Fairness in Agreement With European Values: An Interdisciplinary Perspective on AI Regulation\\\".\\n5. Note the six words used as labels: deontological, egalitarian, localized, standardized, utilitarian, and consequential.\\n6. Go back to arxiv.org\\n7. Find \\\"Physics and Society\\\" and go to the page for the \\\"Physics and Society\\\" category.\\n8. Note that the tag for this category is \\\"physics.soc-ph\\\".\\n9. Go to the Advanced Search page.\\n10. Enter \\\"physics.soc-ph\\\" in the search box and select \\\"All fields\\\" from the dropdown.\\n11. Enter 2016-08-11 and 2016-08-12 into the date inputs, select \\\"Submission date (original)\\\", and submit the search.\\n12. Search for instances of the six words in the results to find the paper titled \\\"Phase transition from egalitarian to hierarchical societies driven by competition between cognitive and social constraints\\\", indicating that \\\"egalitarian\\\" is the correct answer.\", \"Number of steps\": \"12\", \"How long did this take?\": \"8 minutes\", \"Tools\": \"1. Web browser\\n2. Image recognition tools (to identify and parse a figure with three axes)\", \"Number of tools\": \"2\"}}\n",
73
+ "\n",
74
+ "import random\n",
75
+ "# random.seed(42)\n",
76
+ "random_samples = random.sample(json_QA, 1)\n",
77
+ "for sample in random_samples:\n",
78
+ " print(\"=\" * 50)\n",
79
+ " print(f\"Task ID: {sample['task_id']}\")\n",
80
+ " print(f\"Question: {sample['Question']}\")\n",
81
+ " print(f\"Level: {sample['Level']}\")\n",
82
+ " print(f\"Final Answer: {sample['Final answer']}\")\n",
83
+ " print(f\"Annotator Metadata: \")\n",
84
+ " print(f\" ├── Steps: \")\n",
85
+ " for step in sample['Annotator Metadata']['Steps'].split('\\n'):\n",
86
+ " print(f\" │ ├── {step}\")\n",
87
+ " print(f\" ├── Number of steps: {sample['Annotator Metadata']['Number of steps']}\")\n",
88
+ " print(f\" ├── How long did this take?: {sample['Annotator Metadata']['How long did this take?']}\")\n",
89
+ " print(f\" ├── Tools:\")\n",
90
+ " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n",
91
+ " print(f\" │ ├── {tool}\")\n",
92
+ " print(f\" └── Number of tools: {sample['Annotator Metadata']['Number of tools']}\")\n",
93
+ "print(\"=\" * 50)"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": 6,
99
+ "id": "4bb02420",
100
+ "metadata": {},
101
+ "outputs": [
102
+ {
103
+ "ename": "SupabaseException",
104
+ "evalue": "supabase_url is required",
105
+ "output_type": "error",
106
+ "traceback": [
107
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
108
+ "\u001b[1;31mSupabaseException\u001b[0m Traceback (most recent call last)",
109
+ "Cell \u001b[1;32mIn[6], line 15\u001b[0m\n\u001b[0;32m 13\u001b[0m supabase_url \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39menviron\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSUPABASE_URL\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 14\u001b[0m supabase_key \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39menviron\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSUPABASE_SERVICE_KEY\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m---> 15\u001b[0m supabase: Client \u001b[38;5;241m=\u001b[39m create_client(supabase_url, supabase_key)\n",
110
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\supabase\\_sync\\client.py:341\u001b[0m, in \u001b[0;36mcreate_client\u001b[1;34m(supabase_url, supabase_key, options)\u001b[0m\n\u001b[0;32m 310\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcreate_client\u001b[39m(\n\u001b[0;32m 311\u001b[0m supabase_url: \u001b[38;5;28mstr\u001b[39m,\n\u001b[0;32m 312\u001b[0m supabase_key: \u001b[38;5;28mstr\u001b[39m,\n\u001b[0;32m 313\u001b[0m options: Optional[ClientOptions] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 314\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m SyncClient:\n\u001b[0;32m 315\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Create client function to instantiate supabase client like JS runtime.\u001b[39;00m\n\u001b[0;32m 316\u001b[0m \n\u001b[0;32m 317\u001b[0m \u001b[38;5;124;03m Parameters\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 339\u001b[0m \u001b[38;5;124;03m Client\u001b[39;00m\n\u001b[0;32m 340\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m--> 341\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m SyncClient\u001b[38;5;241m.\u001b[39mcreate(\n\u001b[0;32m 342\u001b[0m supabase_url\u001b[38;5;241m=\u001b[39msupabase_url, supabase_key\u001b[38;5;241m=\u001b[39msupabase_key, options\u001b[38;5;241m=\u001b[39moptions\n\u001b[0;32m 343\u001b[0m )\n",
111
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\supabase\\_sync\\client.py:103\u001b[0m, in \u001b[0;36mSyncClient.create\u001b[1;34m(cls, supabase_url, supabase_key, options)\u001b[0m\n\u001b[0;32m 95\u001b[0m \u001b[38;5;129m@classmethod\u001b[39m\n\u001b[0;32m 96\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcreate\u001b[39m(\n\u001b[0;32m 97\u001b[0m \u001b[38;5;28mcls\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 100\u001b[0m options: Optional[ClientOptions] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[0;32m 101\u001b[0m ):\n\u001b[0;32m 102\u001b[0m auth_header \u001b[38;5;241m=\u001b[39m options\u001b[38;5;241m.\u001b[39mheaders\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAuthorization\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mif\u001b[39;00m options \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m--> 103\u001b[0m client \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mcls\u001b[39m(supabase_url, supabase_key, options)\n\u001b[0;32m 105\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m auth_header \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m 106\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n",
112
+ "File \u001b[1;32m~\\anaconda3\\Lib\\site-packages\\supabase\\_sync\\client.py:52\u001b[0m, in \u001b[0;36mSyncClient.__init__\u001b[1;34m(self, supabase_url, supabase_key, options)\u001b[0m\n\u001b[0;32m 38\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Instantiate the client.\u001b[39;00m\n\u001b[0;32m 39\u001b[0m \n\u001b[0;32m 40\u001b[0m \u001b[38;5;124;03mParameters\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 48\u001b[0m \u001b[38;5;124;03m `DEFAULT_OPTIONS` dict.\u001b[39;00m\n\u001b[0;32m 49\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 51\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m supabase_url:\n\u001b[1;32m---> 52\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m SupabaseException(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msupabase_url is required\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m 53\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m supabase_key:\n\u001b[0;32m 54\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m SupabaseException(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msupabase_key is required\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
113
+ "\u001b[1;31mSupabaseException\u001b[0m: supabase_url is required"
114
+ ]
115
+ }
116
+ ],
117
+ "source": [
118
+ "### build a vector database based on the metadata.jsonl\n",
119
+ "# https://python.langchain.com/docs/integrations/vectorstores/supabase/\n",
120
+ "import os\n",
121
+ "from dotenv import load_dotenv\n",
122
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
123
+ "from langchain_community.vectorstores import SupabaseVectorStore\n",
124
+ "from supabase.client import Client, create_client\n",
125
+ "\n",
126
+ "\n",
127
+ "load_dotenv()\n",
128
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\") # dim=768\n",
129
+ "\n",
130
+ "supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
131
+ "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
132
+ "supabase: Client = create_client(supabase_url, supabase_key)"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "code",
137
+ "execution_count": null,
138
+ "id": "a070b955",
139
+ "metadata": {},
140
+ "outputs": [],
141
+ "source": [
142
+ "# wrap the metadata.jsonl's questions and answers into a list of document\n",
143
+ "from langchain.schema import Document\n",
144
+ "docs = []\n",
145
+ "for sample in json_QA:\n",
146
+ " content = f\"Question : {sample['Question']}\\n\\nFinal answer : {sample['Final answer']}\"\n",
147
+ " doc = {\n",
148
+ " \"content\" : content,\n",
149
+ " \"metadata\" : { # meatadata的格式必须时source键,否则会报错\n",
150
+ " \"source\" : sample['task_id']\n",
151
+ " },\n",
152
+ " \"embedding\" : embeddings.embed_query(content),\n",
153
+ " }\n",
154
+ " docs.append(doc)\n",
155
+ "\n",
156
+ "# upload the documents to the vector database\n",
157
+ "try:\n",
158
+ " response = (\n",
159
+ " supabase.table(\"documents\")\n",
160
+ " .insert(docs)\n",
161
+ " .execute()\n",
162
+ " )\n",
163
+ "except Exception as exception:\n",
164
+ " print(\"Error inserting data into Supabase:\", exception)\n",
165
+ "\n",
166
+ "# ALTERNATIVE : Save the documents (a list of dict) into a csv file, and manually upload it to Supabase\n",
167
+ "# import pandas as pd\n",
168
+ "# df = pd.DataFrame(docs)\n",
169
+ "# df.to_csv('supabase_docs.csv', index=False)"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "code",
174
+ "execution_count": 54,
175
+ "id": "77fb9dbb",
176
+ "metadata": {},
177
+ "outputs": [],
178
+ "source": [
179
+ "# add items to vector database\n",
180
+ "vector_store = SupabaseVectorStore(\n",
181
+ " client=supabase,\n",
182
+ " embedding= embeddings,\n",
183
+ " table_name=\"documents\",\n",
184
+ " query_name=\"match_documents_langchain\",\n",
185
+ ")\n",
186
+ "retriever = vector_store.as_retriever()"
187
+ ]
188
+ },
189
+ {
190
+ "cell_type": "code",
191
+ "execution_count": 55,
192
+ "id": "12a05971",
193
+ "metadata": {},
194
+ "outputs": [
195
+ {
196
+ "name": "stderr",
197
+ "output_type": "stream",
198
+ "text": [
199
+ "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
200
+ "To disable this warning, you can either:\n",
201
+ "\t- Avoid using `tokenizers` before the fork if possible\n",
202
+ "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
203
+ ]
204
+ },
205
+ {
206
+ "data": {
207
+ "text/plain": [
208
+ "Document(metadata={'source': '840bfca7-4f7b-481a-8794-c560c340185d'}, page_content='Question : On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\\n\\nFinal answer : 80GSFC21M0002')"
209
+ ]
210
+ },
211
+ "execution_count": 55,
212
+ "metadata": {},
213
+ "output_type": "execute_result"
214
+ }
215
+ ],
216
+ "source": [
217
+ "query = \"On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\"\n",
218
+ "# matched_docs = vector_store.similarity_search(query, 2)\n",
219
+ "docs = retriever.invoke(query)\n",
220
+ "docs[0]"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": 31,
226
+ "id": "1eae5ba4",
227
+ "metadata": {},
228
+ "outputs": [
229
+ {
230
+ "name": "stdout",
231
+ "output_type": "stream",
232
+ "text": [
233
+ "List of tools used in all samples:\n",
234
+ "Total number of tools used: 83\n",
235
+ " ├── web browser: 107\n",
236
+ " ├── image recognition tools (to identify and parse a figure with three axes): 1\n",
237
+ " ├── search engine: 101\n",
238
+ " ├── calculator: 34\n",
239
+ " ├── unlambda compiler (optional): 1\n",
240
+ " ├── a web browser.: 2\n",
241
+ " ├── a search engine.: 2\n",
242
+ " ├── a calculator.: 1\n",
243
+ " ├── microsoft excel: 5\n",
244
+ " ├── google search: 1\n",
245
+ " ├── ne: 9\n",
246
+ " ├── pdf access: 7\n",
247
+ " ├── file handling: 2\n",
248
+ " ├── python: 3\n",
249
+ " ├── image recognition tools: 12\n",
250
+ " ├── jsonld file access: 1\n",
251
+ " ├── video parsing: 1\n",
252
+ " ├── python compiler: 1\n",
253
+ " ├── video recognition tools: 3\n",
254
+ " ├── pdf viewer: 7\n",
255
+ " ├── microsoft excel / google sheets: 3\n",
256
+ " ├── word document access: 1\n",
257
+ " ├── tool to extract text from images: 1\n",
258
+ " ├── a word reversal tool / script: 1\n",
259
+ " ├── counter: 1\n",
260
+ " ├── excel: 3\n",
261
+ " ├── image recognition: 5\n",
262
+ " ├── color recognition: 3\n",
263
+ " ├── excel file access: 3\n",
264
+ " ├── xml file access: 1\n",
265
+ " ├── access to the internet archive, web.archive.org: 1\n",
266
+ " ├── text processing/diff tool: 1\n",
267
+ " ├── gif parsing tools: 1\n",
268
+ " ├── a web browser: 7\n",
269
+ " ├── a search engine: 7\n",
270
+ " ├── a speech-to-text tool: 2\n",
271
+ " ├── code/data analysis tools: 1\n",
272
+ " ├── audio capability: 2\n",
273
+ " ├── pdf reader: 1\n",
274
+ " ├── markdown: 1\n",
275
+ " ├── a calculator: 5\n",
276
+ " ├── access to wikipedia: 3\n",
277
+ " ├── image recognition/ocr: 3\n",
278
+ " ├── google translate access: 1\n",
279
+ " ├── ocr: 4\n",
280
+ " ├── bass note data: 1\n",
281
+ " ├── text editor: 1\n",
282
+ " ├── xlsx file access: 1\n",
283
+ " ├── powerpoint viewer: 1\n",
284
+ " ├── csv file access: 1\n",
285
+ " ├── calculator (or use excel): 1\n",
286
+ " ├── computer algebra system: 1\n",
287
+ " ├── video processing software: 1\n",
288
+ " ├── audio processing software: 1\n",
289
+ " ├── computer vision: 1\n",
290
+ " ├── google maps: 1\n",
291
+ " ├── access to excel files: 1\n",
292
+ " ├── calculator (or ability to count): 1\n",
293
+ " ├── a file interface: 3\n",
294
+ " ├── a python ide: 1\n",
295
+ " ├── spreadsheet editor: 1\n",
296
+ " ├── tools required: 1\n",
297
+ " ├── b browser: 1\n",
298
+ " ├── image recognition and processing tools: 1\n",
299
+ " ├── computer vision or ocr: 1\n",
300
+ " ├── c++ compiler: 1\n",
301
+ " ├── access to google maps: 1\n",
302
+ " ├── youtube player: 1\n",
303
+ " ├── natural language processor: 1\n",
304
+ " ├── graph interaction tools: 1\n",
305
+ " ├── bablyonian cuniform -> arabic legend: 1\n",
306
+ " ├── access to youtube: 1\n",
307
+ " ├── image search tools: 1\n",
308
+ " ├── calculator or counting function: 1\n",
309
+ " ├── a speech-to-text audio processing tool: 1\n",
310
+ " ├── access to academic journal websites: 1\n",
311
+ " ├── pdf reader/extracter: 1\n",
312
+ " ├── rubik's cube model: 1\n",
313
+ " ├── wikipedia: 1\n",
314
+ " ├── video capability: 1\n",
315
+ " ├── image processing tools: 1\n",
316
+ " ├── age recognition software: 1\n",
317
+ " ├── youtube: 1\n"
318
+ ]
319
+ }
320
+ ],
321
+ "source": [
322
+ "# list of the tools used in all the samples\n",
323
+ "from collections import Counter, OrderedDict\n",
324
+ "\n",
325
+ "tools = []\n",
326
+ "for sample in json_QA:\n",
327
+ " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n",
328
+ " tool = tool[2:].strip().lower()\n",
329
+ " if tool.startswith(\"(\"):\n",
330
+ " tool = tool[11:].strip()\n",
331
+ " tools.append(tool)\n",
332
+ "tools_counter = OrderedDict(Counter(tools))\n",
333
+ "print(\"List of tools used in all samples:\")\n",
334
+ "print(\"Total number of tools used:\", len(tools_counter))\n",
335
+ "for tool, count in tools_counter.items():\n",
336
+ " print(f\" ├── {tool}: {count}\")"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "markdown",
341
+ "id": "5efee12a",
342
+ "metadata": {},
343
+ "source": [
344
+ "#### Graph"
345
+ ]
346
+ },
347
+ {
348
+ "cell_type": "code",
349
+ "execution_count": 55,
350
+ "id": "7fe573cc",
351
+ "metadata": {},
352
+ "outputs": [],
353
+ "source": [
354
+ "system_prompt = \"\"\"\n",
355
+ "You are a helpful assistant tasked with answering questions using a set of tools.\n",
356
+ "If the tool is not available, you can try to find the information online. You can also use your own knowledge to answer the question. \n",
357
+ "You need to provide a step-by-step explanation of how you arrived at the answer.\n",
358
+ "==========================\n",
359
+ "Here is a few examples showing you how to answer the question step by step.\n",
360
+ "\"\"\"\n",
361
+ "for i, samples in enumerate(random_samples):\n",
362
+ " system_prompt += f\"\\nQuestion {i+1}: {samples['Question']}\\nSteps:\\n{samples['Annotator Metadata']['Steps']}\\nTools:\\n{samples['Annotator Metadata']['Tools']}\\nFinal Answer: {samples['Final answer']}\\n\"\n",
363
+ "system_prompt += \"\\n==========================\\n\"\n",
364
+ "system_prompt += \"Now, please answer the following question step by step.\\n\"\n",
365
+ "\n",
366
+ "# save the system_prompt to a file\n",
367
+ "with open('system_prompt.txt', 'w') as f:\n",
368
+ " f.write(system_prompt)"
369
+ ]
370
+ },
371
+ {
372
+ "cell_type": "code",
373
+ "execution_count": 56,
374
+ "id": "d6beb0da",
375
+ "metadata": {},
376
+ "outputs": [
377
+ {
378
+ "name": "stdout",
379
+ "output_type": "stream",
380
+ "text": [
381
+ "\n",
382
+ "You are a helpful assistant tasked with answering questions using a set of tools.\n",
383
+ "If the tool is not available, you can try to find the information online. You can also use your own knowledge to answer the question. \n",
384
+ "You need to provide a step-by-step explanation of how you arrived at the answer.\n",
385
+ "==========================\n",
386
+ "Here is a few examples showing you how to answer the question step by step.\n",
387
+ "\n",
388
+ "Question 1: In terms of geographical distance between capital cities, which 2 countries are the furthest from each other within the ASEAN bloc according to wikipedia? Answer using a comma separated list, ordering the countries by alphabetical order.\n",
389
+ "Steps:\n",
390
+ "1. Search the web for \"ASEAN bloc\".\n",
391
+ "2. Click the Wikipedia result for the ASEAN Free Trade Area.\n",
392
+ "3. Scroll down to find the list of member states.\n",
393
+ "4. Click into the Wikipedia pages for each member state, and note its capital.\n",
394
+ "5. Search the web for the distance between the first two capitals. The results give travel distance, not geographic distance, which might affect the answer.\n",
395
+ "6. Thinking it might be faster to judge the distance by looking at a map, search the web for \"ASEAN bloc\" and click into the images tab.\n",
396
+ "7. View a map of the member countries. Since they're clustered together in an arrangement that's not very linear, it's difficult to judge distances by eye.\n",
397
+ "8. Return to the Wikipedia page for each country. Click the GPS coordinates for each capital to get the coordinates in decimal notation.\n",
398
+ "9. Place all these coordinates into a spreadsheet.\n",
399
+ "10. Write formulas to calculate the distance between each capital.\n",
400
+ "11. Write formula to get the largest distance value in the spreadsheet.\n",
401
+ "12. Note which two capitals that value corresponds to: Jakarta and Naypyidaw.\n",
402
+ "13. Return to the Wikipedia pages to see which countries those respective capitals belong to: Indonesia, Myanmar.\n",
403
+ "Tools:\n",
404
+ "1. Search engine\n",
405
+ "2. Web browser\n",
406
+ "3. Microsoft Excel / Google Sheets\n",
407
+ "Final Answer: Indonesia, Myanmar\n",
408
+ "\n",
409
+ "Question 2: Review the chess position provided in the image. It is black's turn. Provide the correct next move for black which guarantees a win. Please provide your response in algebraic notation.\n",
410
+ "Steps:\n",
411
+ "Step 1: Evaluate the position of the pieces in the chess position\n",
412
+ "Step 2: Report the best move available for black: \"Rd5\"\n",
413
+ "Tools:\n",
414
+ "1. Image recognition tools\n",
415
+ "Final Answer: Rd5\n",
416
+ "\n",
417
+ "==========================\n",
418
+ "Now, please answer the following question step by step.\n",
419
+ "\n"
420
+ ]
421
+ }
422
+ ],
423
+ "source": [
424
+ "# load the system prompt from the file\n",
425
+ "with open('system_prompt.txt', 'r') as f:\n",
426
+ " system_prompt = f.read()\n",
427
+ "print(system_prompt)"
428
+ ]
429
+ },
430
+ {
431
+ "cell_type": "code",
432
+ "execution_count": null,
433
+ "id": "42fde0f8",
434
+ "metadata": {},
435
+ "outputs": [],
436
+ "source": [
437
+ "import dotenv\n",
438
+ "from langgraph.graph import MessagesState, START, StateGraph\n",
439
+ "from langgraph.prebuilt import tools_condition\n",
440
+ "from langgraph.prebuilt import ToolNode\n",
441
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
442
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
443
+ "from langchain_community.tools.tavily_search import TavilySearchResults\n",
444
+ "from langchain_community.document_loaders import WikipediaLoader\n",
445
+ "from langchain_community.document_loaders import ArxivLoader\n",
446
+ "from langchain_community.vectorstores import SupabaseVectorStore\n",
447
+ "from langchain.tools.retriever import create_retriever_tool\n",
448
+ "from langchain_core.messages import HumanMessage, SystemMessage\n",
449
+ "from langchain_core.tools import tool\n",
450
+ "from supabase.client import Client, create_client\n",
451
+ "\n",
452
+ "# Define the retriever from supabase\n",
453
+ "load_dotenv()\n",
454
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\") # dim=768\n",
455
+ "\n",
456
+ "supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
457
+ "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
458
+ "supabase: Client = create_client(supabase_url, supabase_key)\n",
459
+ "vector_store = SupabaseVectorStore(\n",
460
+ " client=supabase,\n",
461
+ " embedding= embeddings,\n",
462
+ " table_name=\"documents\",\n",
463
+ " query_name=\"match_documents_langchain\",\n",
464
+ ")\n",
465
+ "\n",
466
+ "question_retrieve_tool = create_retriever_tool(\n",
467
+ " vector_store.as_retriever(),\n",
468
+ " \"Question Retriever\",\n",
469
+ " \"Find similar questions in the vector database for the given question.\",\n",
470
+ ")\n",
471
+ "\n",
472
+ "@tool\n",
473
+ "def multiply(a: int, b: int) -> int:\n",
474
+ " \"\"\"Multiply two numbers.\n",
475
+ "\n",
476
+ " Args:\n",
477
+ " a: first int\n",
478
+ " b: second int\n",
479
+ " \"\"\"\n",
480
+ " return a * b\n",
481
+ "\n",
482
+ "@tool\n",
483
+ "def add(a: int, b: int) -> int:\n",
484
+ " \"\"\"Add two numbers.\n",
485
+ " \n",
486
+ " Args:\n",
487
+ " a: first int\n",
488
+ " b: second int\n",
489
+ " \"\"\"\n",
490
+ " return a + b\n",
491
+ "\n",
492
+ "@tool\n",
493
+ "def subtract(a: int, b: int) -> int:\n",
494
+ " \"\"\"Subtract two numbers.\n",
495
+ " \n",
496
+ " Args:\n",
497
+ " a: first int\n",
498
+ " b: second int\n",
499
+ " \"\"\"\n",
500
+ " return a - b\n",
501
+ "\n",
502
+ "@tool\n",
503
+ "def divide(a: int, b: int) -> int:\n",
504
+ " \"\"\"Divide two numbers.\n",
505
+ " \n",
506
+ " Args:\n",
507
+ " a: first int\n",
508
+ " b: second int\n",
509
+ " \"\"\"\n",
510
+ " if b == 0:\n",
511
+ " raise ValueError(\"Cannot divide by zero.\")\n",
512
+ " return a / b\n",
513
+ "\n",
514
+ "@tool\n",
515
+ "def modulus(a: int, b: int) -> int:\n",
516
+ " \"\"\"Get the modulus of two numbers.\n",
517
+ " \n",
518
+ " Args:\n",
519
+ " a: first int\n",
520
+ " b: second int\n",
521
+ " \"\"\"\n",
522
+ " return a % b\n",
523
+ "\n",
524
+ "@tool\n",
525
+ "def wiki_search(query: str) -> str:\n",
526
+ " \"\"\"Search Wikipedia for a query and return maximum 2 results.\n",
527
+ " \n",
528
+ " Args:\n",
529
+ " query: The search query.\"\"\"\n",
530
+ " search_docs = WikipediaLoader(query=query, load_max_docs=2).load()\n",
531
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
532
+ " [\n",
533
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content}\\n</Document>'\n",
534
+ " for doc in search_docs\n",
535
+ " ])\n",
536
+ " return {\"wiki_results\": formatted_search_docs}\n",
537
+ "\n",
538
+ "@tool\n",
539
+ "def web_search(query: str) -> str:\n",
540
+ " \"\"\"Search Tavily for a query and return maximum 3 results.\n",
541
+ " \n",
542
+ " Args:\n",
543
+ " query: The search query.\"\"\"\n",
544
+ " search_docs = TavilySearchResults(max_results=3).invoke(query=query)\n",
545
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
546
+ " [\n",
547
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content}\\n</Document>'\n",
548
+ " for doc in search_docs\n",
549
+ " ])\n",
550
+ " return {\"web_results\": formatted_search_docs}\n",
551
+ "\n",
552
+ "@tool\n",
553
+ "def arvix_search(query: str) -> str:\n",
554
+ " \"\"\"Search Arxiv for a query and return maximum 3 result.\n",
555
+ " \n",
556
+ " Args:\n",
557
+ " query: The search query.\"\"\"\n",
558
+ " search_docs = ArxivLoader(query=query, load_max_docs=3).load()\n",
559
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
560
+ " [\n",
561
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content[:1000]}\\n</Document>'\n",
562
+ " for doc in search_docs\n",
563
+ " ])\n",
564
+ " return {\"arvix_results\": formatted_search_docs}\n",
565
+ "\n",
566
+ "@tool\n",
567
+ "def similar_question_search(question: str) -> str:\n",
568
+ " \"\"\"Search the vector database for similar questions and return the first results.\n",
569
+ " \n",
570
+ " Args:\n",
571
+ " question: the question human provided.\"\"\"\n",
572
+ " matched_docs = vector_store.similarity_search(query, 3)\n",
573
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
574
+ " [\n",
575
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content[:1000]}\\n</Document>'\n",
576
+ " for doc in matched_docs\n",
577
+ " ])\n",
578
+ " return {\"similar_questions\": formatted_search_docs}\n",
579
+ "\n",
580
+ "tools = [\n",
581
+ " multiply,\n",
582
+ " add,\n",
583
+ " subtract,\n",
584
+ " divide,\n",
585
+ " modulus,\n",
586
+ " wiki_search,\n",
587
+ " web_search,\n",
588
+ " arvix_search,\n",
589
+ " question_retrieve_tool\n",
590
+ "]\n",
591
+ "\n",
592
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
593
+ "llm_with_tools = llm.bind_tools(tools)"
594
+ ]
595
+ },
596
+ {
597
+ "cell_type": "code",
598
+ "execution_count": null,
599
+ "id": "7dd0716c",
600
+ "metadata": {},
601
+ "outputs": [],
602
+ "source": [
603
+ "# load the system prompt from the file\n",
604
+ "with open('system_prompt.txt', 'r') as f:\n",
605
+ " system_prompt = f.read()\n",
606
+ "\n",
607
+ "\n",
608
+ "# System message\n",
609
+ "sys_msg = SystemMessage(content=system_prompt)\n",
610
+ "\n",
611
+ "# Node\n",
612
+ "def assistant(state: MessagesState):\n",
613
+ " \"\"\"Assistant node\"\"\"\n",
614
+ " return {\"messages\": [llm_with_tools.invoke([sys_msg] + state[\"messages\"])]}\n",
615
+ "\n",
616
+ "# Build graph\n",
617
+ "builder = StateGraph(MessagesState)\n",
618
+ "builder.add_node(\"assistant\", assistant)\n",
619
+ "builder.add_node(\"tools\", ToolNode(tools))\n",
620
+ "builder.add_edge(START, \"assistant\")\n",
621
+ "builder.add_conditional_edges(\n",
622
+ " \"assistant\",\n",
623
+ " # If the latest message (result) from assistant is a tool call -> tools_condition routes to tools\n",
624
+ " # If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END\n",
625
+ " tools_condition,\n",
626
+ ")\n",
627
+ "builder.add_edge(\"tools\", \"assistant\")\n",
628
+ "\n",
629
+ "# Compile graph\n",
630
+ "graph = builder.compile()\n"
631
+ ]
632
+ },
633
+ {
634
+ "cell_type": "code",
635
+ "execution_count": 49,
636
+ "id": "f4e77216",
637
+ "metadata": {},
638
+ "outputs": [
639
+ {
640
+ "data": {
641
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAANgAAAD5CAIAAADKsmwpAAAQAElEQVR4nOydB1wUR9vA5zrcwdGOXqRIFRC7gkZsxK7YguU1xhgTJcVXjVETNSYajCbGYCxYYuJnjYliYq+xRo2xIIqAgNI7HFzh+vfo5UVEQEzYuzl2/r/7HXu7e7dX/jwz88zsLFun0yECwdiwEYGAAUREAhYQEQlYQEQkYAERkYAFREQCFpikiAq5pixfKavWyKrVarVOrTSBDBTPnMnmMviWbL6Q5ehuhgjPYkoiSqtU6TekmcmSqjKVpS2Hb8mC31Voy0GmkArValDRQ4WsWsrhMbPvy7yCBd4hcLNAhCcwTCKhrdXoLv9WVpqvsHPhegdbuLY1R6ZMjUyTlSzNTZflZ9aED7Xz7WCJaI8JiHj3ivj3fSXhw+w6RNqg1gWE9suHyhQyTdR/nMwtWIjG4C7i7/uKzfjM7kNEqPVSWqBIXJc38HUnN18+oitYi3hyR5GTl1lIhBWiAQfW5fWKFolceIiW4Cti4vq8tmEWweG0sFDPgXW5IRHW8KkR/WAiLLmQWOIZJKCVhUB0rNuVo2UVRUpEP3AUMfVGNZvDDIu0RvRj4nyPs/uKaTg2D0cRz+0r6diXjhYCDAYDigLIVSGagZ2If52qCI4Q8szpm8vo2Nfm3tWqGqkG0Qm8RIQiKTtVFj60NSdrmsMro+xvnatEdAIvETPvSKFPFtEeD39+8mUxohN4/erQ8QWdsMiwfPTRR7/99ht6efr375+fn48oAHpZrEXcgodyRBvwErGyROUdYmgRU1JS0MtTWFhYWUlh6enX2SInTYZoA0YiQvW8olhJXTMlMTFx3LhxERER/fr1+/DDD4uKimBl586dIaotXbo0MjISHmo0mo0bN44cOTI8PHzQoEErVqyQy/8OSxD/du3a9f777/fo0ePChQtDhw6FlcOHD58zZw6iAIGQXZpLo4QiRiJKq9Tw7SNquHnz5rJly8aPH793795vv/0Wgtn8+fNh/ZEjR+AevDx48CAsgGo//PDDzJkz9+zZs2TJknPnzq1bt07/Cmw2e//+/W3btk1ISOjSpUtcXBys3LFjx2effYYoAL4K+EIQbcBoPKK0SiMQUhUOMzIyeDzesGHDwCc3NzcIdQUFBbDeyupx5w2fz9cvQBSEgAe2wbKHh0dUVNSlS5f0rwAZPjMzM4iI+ocCweMqhFAo1C+0OAIrllRMowwORiLqtDouZU1mKILBpGnTpo0YMaJbt24uLi52dnbP72ZtbX348GGIncXFxWq1WiaTgaO1W0NDQ5GhYLEZXDMaJRAw+qh8IVtcokLU4OnpuW3bNoiFa9euhYrdlClTkpOTn99t1apVW7Zsgark5s2boZiOjo6uu9XCwnDDESSVanAR0QaMRIRyGUpnRBm+vr4Q6k6ePAmVPBaLNWvWLKXymdYAtFSgpvj6668PHjzY1dVVJBJJJBJkJCitqGAIThHRkm3rxNFqKenvh/iXlJQEC6Bgp06dZsyYAe2VsrK/u3T1gwy0Wi24qK8sAlKp9Pz5802PP6BudIJCprF3p9HYRLxqIWZ8FnSuIAq4fPny7NmzT58+nZubm5qaCo1iZ2dnJycn3hNu3LgBK6ES6e/vf+jQIdgnPT0dQibkeqqqqh4+fAj1xXovCM0UuL948WJmZiaigNS/qp09TfvUnJcCLxE92wke3qVExKlTp0KFb82aNWPGjImNjYVIFh8fD+bBJqgvnjp1ClI2kDJcvHgxBEWoIy5YsCAmJgb2BFknT54MbZd6LxgYGAi5xm+++WblypWopdGodXkP5B4BNDpzAK8R2nKJ+sSOohHvuCJ6k3VXkpMmfyXaHtEGvCKiuQXbxpF7m2YDT57n8q9ldBudjt0J9hHDRAnzM9r3bnhgLJSb0EHX4CZoAnO53AY3eXl5Qe4GUcMPT2hwE6R7Gmt3Q8m+YcOGBjfdv17l4G5m69jwZ2mt4Hjy1K1zlQyGrv0rDZ/FXF1d3eB6hUIBIuqrffVgMpkU9X/oj1svDVSLSqXicDgNboLGe91UeV0ObcnvPcbe0rrhJ7ZWMD2LD36Mdt2tDD8kzOjQ9oNj2ok0dJrL+f0lZYUKRCfO7C128jSjoYUI5/Oaoet579c5r4yyd/GhRTrt7E/Fbr7mtJ0HB99udQaTEfOhxx9HylKuVaFWjVajO7Auz9aJS+fZmExgEqbLh0qzU2Thw0StMsH754ny1OvVkWPt6TzxDTKVaelK8hSXfysVCNlQTEMVylxg8qMBinNqslNl109UhEVadx1oy2TSaKBNg5iGiHpy02UQPLKSpfbuPCsRB7yEG1/I0moR/rAYSFyukoo1OqS7/2c1vPO27QWhr1hzuOSsxceYkoi1FGTJS/OU0io13JgMhkzSkoPHZDLZo0ePIOGMWhRLGw581QIrlqUtx83HXGBFZi9/BpMUkVJSUlKWL1++Y8cORDAg5P+SgAVERAIWEBEJWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWEBEJWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARKwPg8Gwt6fR5NWYQESsj06nKykpQQTDQkQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWEBEJWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFpAL/vzN+PHjJRIJg8FQKpVisVgkEsGyQqE4fvw4IlAPuRDc3wwaNKi4uDg/P7+0tFSlUhUUFMCypSV9r1trYIiIfxMTE+Pu7l53DUTE3r17I4JBICL+DZfLHTlyJIv19AK8Hh4eY8aMQQSDQER8yrhx41xdXfXLEA779Onj7OyMCAaBiPgUCIqjR4/WB0UIh2PHjkUEQ0FEfAYIii4uLvpw6OjoiAiGAsc8olyiKStQKBXGySuNGDD9999/79lxdGayFBkcBtIJrNm2jlw2h14xAq88orJGe2pXUV6G3N1foJRrEf3g8hgVxSqtVuvfybLzAFtEGzASUS7V7F+b132YvYObOaI9fx4rMeMzw4fZIXqAUfzfvTK730QXYqGeLgPta+TaP0+UI3qAi4i3z1cGdLUSCEnf91O6vGr/8K5MLlUjGoCLiEWPavhCDiLUg4EqClWIBuAiokqpE9oSEetj52xWXU6LiIhLUVgj0eg0iFAPpUKjpcfwKFInI2ABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWkHNWUGbmgz79Ot+5cwsRjAcREYnsHWZ9MN/Fxa2JfbKyMmImDEX/jpGj+hcU5iNCQ5CiGQkthSOGv+BE+rS0FPTvKCoqFIsrEaERTFjE+6n3tmz5Lv1BqlKp8Gzj/eabsZ07ddNvOnwk8edfdhUU5PF4Zu1DO74bO9fBwbGx9VA0v/lWTPyaLSEhYaDLxoQ1t27/JZNJnZxcxoyeMGzoqB9+TPhx+2Z4OpTgsTNnw8rGDn3w15+3/bAxbvma+O9W5eQ8FFpaTZr05uBBI27euj57zjuww4SJwyf/Z9obU95BhGcx1aJZoVB8NP89Dpf71ar1G9ZtD2oXumjxnJKSYtiUlHTzq6+XjR41fuuWvXFffCuuqlz6+fwm1tdl5aqlpWUlXyxf8/3Wn0ZFx6z5dsWf16/EvPb6qFExoGzi/lPDho5u4tBsNlsqlWzfsWXpkpW/Hfw9KmrIN2viYFNIcNjiRXGwQ8LGHeNjpiDCc5hqRGSxWN98nWBnJ7KysoaHU6fM2L9/T/Ld230iB2Q9zODxeANfHQZauLq4LVm0orCoAPZpbH1dMrMeRI98LTCgHSy7Dh/j5xvg6OhsZmbG4/IYDIb+WGq1urFD67dOiJmiD8CDBo6AUJqRkda9e08+XwBrLC2F8GqI8BymKiLIpFKr4teufJCRJpFU60+KraoSw32HsM4gzfuzpkGZ2KlTN2cnF1tbuybW1yW8xyu79/wAL9itW0RoSIfAwOCXOrQeb29f/QJoB/fVkmpEeBGmWjTn5mbPmfuOUqlcuODzTRt3JmzYUbvJw8Pzu/ht0AretHkt1MlmvjvlXkpyE+vr8t9ZC6ZNjU1KujH3w5nRo/vDnhDhmn9oPRB3n3lMpkJtBqYaEc+cPaHRaD75eLn+V4dGRt2tPj6+nyxcBjtAdnDrtvULP571054jXC63wfV1nwjRbvTo8XArLy87cfLw1u/XW1vbjBs7qfmHJvwzTDUiqlRKaPnWxp6Tp576lJKSfPduEnpSjwwL6zT1jRmQNwGxGltf+0SJRHLy1FF9CIRSO+a1yUFBIdCmbv6hXwiZKLoxTFXEwIBg0OjosV/LykoTD+67n3oXQlfG40qb5Oq1yx8vmn3u/Om8/FzIsEBLwsnR2dHRqbH1ta8JNcj4tV9Cyxq25hfknTp9DNKHoCxssrCwhANBu7uwsKCJQzfxhoVP6otXrlyEV0CE5zDVojk8/JXXxv0nYVP8+g2ru3WNmD9v6c+/7Ny950cmkwnZQbVatXHjGkjECAQWwcHtV8TFg2STJk5tcH3tawoEgi9XfAcJwtlz3oYqIOQRIeEHrWzY1K/vwOMnDs35cMaE8VNgZWOH9vUNaOwN+/kFdu0avmHjN0VFBTPemYUIz4LLJEy/fJsb1kfk0IakNp7h0sGiNgHmgV2FqLVDuvgIWEBEJGABEZGABUREAhYQEQlYQEQkYAERkYAFREQCFhARCVhARCRgARGRgAVERAIWEBEJWICLiFYiro5BBo3Wh8dncXm0mAQBlw/JEzBL82oQ4VlyUqW2zlxEA3AR0TOQLy5WIkIdJGKV0JZj40BENCDu/nwLa9bVoyWI8D/O7i7oFS1C9ACv6zVfOVpeWaxy8jIXuZrR7crZehgMXVW5uqpMeeVwyaQFbaxEdLksHF4iAll3pek3JTUyTXlBoyW1UqlkPQFRgFajUapUBpuPQS6Xc7nc2s9iJmBxuAxnH7NuA+1YLAaiDdiJ+EKys7MPHDjwwQcfIGpYunTp+fPnly9f3r17d0Q9EokkLi4ODofojSmJKBaLCwsLnZycrKysEDXcu3fvk08+AdfDw8Pj4+ORAdm7d29oaGhgYCCiJSZTDystLY2Ojvby8qLOQmD37t1gIXo8IWLapUuXkAEZMmQIxMXKSprOoWgaIkJFCvw4c+YMVKcQZaSkpNy4cUO/DN7v2rULGRALC4sdOx5Po/Pw4cPc3FxEM0xAxDlz5kD9oWPHjohidu7cWVRUVPsQimkDB0XA2tra2dk5NjYWjo7oBO4i7tmzZ9iwYXw+H1EM/PC14VAPVEn1IcrA8Hi8gwcPQiEAy/QpqfEV8eLFi3APFkZGRiLq2b59O4RDrVar+x+w8v79+8hIdOr0eM4dCI3nzp1DNADTVjN8+8ePH//iiy+QwYGaIjQajBILGwT+QyZPnqxWq9ns1jxUCtOIyGQyjWIhhoCFcL969Wr41d9QSQAAD6ZJREFUz0StF7xELC8vnz59Oiz06tULEeowb948KCVqalrtACW8oj38369atQoRGgKKCCig9Q35iIgI1LrAJSIePnwY7pctW0ZpvtrUgWpijx49oA8mOTkZtS6wEHHhwoUCgQARmgHUnqHvEdKNsHzrVuu5fqCRRayoqID78ePHGyZH02pwc3t85cANGzYcPXoUtQqMKeKxY8cSExNhISQkBBFenoSEBOgYhIX8fJO/1qQxRbxw4cIbb7yBCP8CfXph9+7d27ZtQ6aMcUQ8ffo03JNBeC2FvjseFmQyGTJNDC2iSqXq1q1bWFgYIrQoU6dORU/6RXfu3IlMEIOKCJ25ZWVlkAmzs7NDBAqIioqCLxl6KU1u4L3hRIyLi6uqqnJycmrdfaZGZ/bs2e7u7pCOOHjwIDIdDOQEJGB9n4AI1KNvSt++fRvi4siRI5EpQLmIUExwuVwvL6/g4GBEMCCLFy/OzMyEhWvXrnXt2hXhDbVFM3wR0DT28fEhHSdGwdvbG+6vX7/+9ddfI7yhUETooTfWIOd/yfPXaDZpZs6cCZkK9OTUVYQrVIm4b9++v/76q0OHDsjUuHPnzvDhw1HromfPnuhJTwy2p2VRJSI0jaEHD5ka+oEtEyZMQK0R+B/Td+5jCFWnCkDiGlKGkKxBpsP3339fWlo6b9481EqBTycUCik9JfcfY3pTjlBEfHw8i8WKjY1FBGNAYWMFMqtGPAvupYBku5WVVau3cO7cudj+IhSK6OzsbBIjNxctWgSZ9tdffx21dqBohioTwhIKi2b1Eww2v9s/A8J2//79Bw8ejGgAqSNiyttvvw0N5N69eyOCsaG2ZyUyMlKpxHRm7IkTJ06fPp1WFtK0jgj4+flBXzPCj+joaKga6qf1oA80rSNiS1RU1JYtWzw8PBDNoG8dERorWq0Wn08O7wfK4l9//ZWMzMUNaovm7OxsqIohPBCLxREREadPn6athfStI3p7eysUChxmbCkoKIB64dWrVzFPJ1EKqSMamQcPHsyaNevQoUOI3tA6j1hVVcVkMvWD140C9O5AD97evXsRAWMoP3nq0qVLK1asQEYCjr527VpioR761hGB0NDQM2fODB06FJqrBpiQvS4nT54EBbdu3YoIT6BjHRE6LZKSkuqNube1tYXoaBgdExMTr1y5YsRgjCE41xGpioibNm1ycXGptxJarBAgEfXs3Lnzzp07xMJ6iEQiPC1ElBbN7777ro2NTe1DCL3t2rUzwNn1CQkJRUVF0IOHCM9C0zpi3759hwwZwuH8faFXUFB/LhmlrF69msFgzJ49GxGeg9Z5xBkzZly7dg3kgP6M9evX+/j4IMr4/PPPIYWOT18ObtCxjlhLfHy8h4cH9DhbW1tTauH8+fNDQkKIhU2Acx2xWTU2tUorl2jRP4Tx8UfLlixZ0ql9z+oKqk5cX7J4yaDh/QYMGIAIjQN1xGnTpgUEBCD8eEHRnHKtKumCuLxQaW5ByeXiWwT4CFyBtiJf5xUs6NjX2tnLHBHqAPkyqBrBtwT3+jWw7Ofnt2fPHoQNTUXEayfKS/NVvUY5WdpyEPbAlysuUf3+S1H4ELs2gZRfRNKE8Pf3T01NhY7W2jXQ4/rWW28hnGi0jnj1WLm4RN0r2tEkLATg393agTv0LXd4549STHUGXyqIiYkxN3+mlGjTpk2/fv0QTjQsYkWxsjRP0X2oAzJB+k10vnkW04k1jMKIESNcXV1rH/L5fAzn0G9YRLAQahTINOHyWJUlqqpyTBNmRgGSCbXtZchw9enTB2FGwyJKxBp7dxMeQOruL6goJiI+BYKi/hpBAoFgypQpCD8aFlGl0Kpq/nG+xvhIKlU6DZnT5xkgKEIvF4RDPC/yReZVx5FH96WQc5VVaZRybY1cg1oCAeoe2e496O4/tbsItQQCIVur0cG9QMhy8jKztPlXjVoiIkakXq9Kuyl9dE/q4idUqXQsNovFYSNmi2UtuvYYAvfVLZRRkNYw1EqVNlup0+qq9peaC1htwwTtwoUWVv/kDRMRsSD9ZvWFxDIbFwGLJ2g3wL4282wqOPgiebUiJ0t271q+VxC/50g7Nufleo+JiEZGo9Ed3loorUZu7Z255ib8c5hb8uAm8rIpzxFvWpAVOdY+qJuw+U8nIhqT4pyafWtyfbq5CN15qLVg624Ftzt/lJTkKXqPsm/ms3C5gj0NEZcpj2wrbtcf6vmtx8JaHP3ty0qZUN9o5v5ERONQ+KgmcX2hZxdX1HqxdbcuLkRHfyxszs5ERCOgVmn3r81r07k1W6jHro21TMq8furFPa5ERCNw+Psin+6t30I9dl52j1IVOenSpncjIhqau3+IpVIGT2AaY5paBL5IeO6XF1QWiYiG5tJv5Q7etohOmAt5TDYbcqVN7IORiEs+nTdn7gzUqkm+LLZrY8nmYTrc/Xby6bmLukmllailsfOyvXulqSsBtpiIBxJ/WrHyU0RokvvXJTwBHefF4/E55YXKiqJGJ1RvMRHT0nCcKxsrVAptSU6NhR1NT6kRiPiZdxoNii3TszJr9vTbt2/AwvHjhzYl7PRt63/nzq3NW78DO6HbNDAg+K233gsMaKff+fCRxJ/27cjPzzU353frGj7jnf/a2tafwhX2+fmXXQUFeTyeWfvQju/GznVwcEQmzsMUqcjLElHGzaQT5y7tKirJ4vH4HUKiBvWfweU+jr7b9yyEvmt/3x5nz28XV5c4iNpED53bxj0EPe5gVB888s2NpGM6rTbIv2db786IMizt+YXZjVYTWyYiLvtstZ9vQN8+UYn7T3l7tc3JeTR33kx7kcO6tT98F7/NnM+f++GM4uLHo49OnDj81dfLogYM+X7L3s8+XZWWfn/Bwg/qnUmYlHQT9hk9avzWLXvjvvhWXFW59PP5yPQRl6g1KqpGMyTfO7dz3yK/tl3nxO54LXpR0t0zP/8ap9/EYrGzHt3Ozrk7a+b2Tz86xudb7d2/TL/pzPkfr15PHD5o1n9nbvfyDDt17ntEGRweuyBT3tjWlhHRwsKCxWZzuFwrK2sWi3Xw158h2i2Y/5mPjy/cPl6wTK1WHz/xeMLWfT/vjIjoPXHCG+7ubcLCOr337ofgYnLy7bqvlvUwg8fjDXx1mKuLW1Bg8JJFK2JnzkGmj6RSTV0z5cyF7d6eHQcPmCmycw/0Cx8SFXvj9rFK8d9DD5VKOdjG45pDjOwYOrC49KFS+Xg+6b9uHw0O6t214zB4VnjX0X4+FM4JwzFj10gbHVtJSas5LT0FAmTtfEt8Ph+0y8hIAx0zMtODAkNq9/T3D4L7BxlpdZ/eIawzFOjvz5p26PCBgsJ8KLhBR2T6yCQaikTUarW5+SkQDmvXgJRwX1D4QP8QPNMX0wDf/PGgGJm8Sq1WlZbluLsG1T7Lw60dohKegCWtavgUDkpG38hkUjtbUd01fL4AVspr5FAKw/LT9eaPT0CWy58Zq+nh4QkF+u69P27avLZ69fLAwGCoI7YCF6mbZUilqtFqNSfObD559plZSauqS/ULbPbz4yp0ECbhD6fOJqhcIirRaXSNDbWkRESBwEIqfaZ9BA9BTXMzcyaTCUY+Xf9kGfav9wpQoH+ycJlGo4FGz9Zt6xd+POunPUewnbelmVhYsUpKWmbcfz04HDOoCPbs/lq3TsOfOaKgqcw550mMlCue/lJyeVM5538JxCBljZZv2bByLVk017Y5/P2CUtNSamdAq5ZUZ2c/DAh4PDliWx+/O8lPr517724S+l8BXUtKSvLdJ+uhugn1yKlvzBCLK8vLmzugCFssrNlqJSUiwr+3q3NARWWBg72n/mZr48pksvn8poamcthcG2vngsL02jVpGdcQZagVGjNBozWTFhPR0sLywYPU9AepIM2IEWMVipqVX30GzefMzAfLln8MMe/VqKGw29ixk65cuQjpm8LCgpu3rq9d91X79h0DnhXx6rXLHy+afe786bz8XHjB/fv3ODk6Ozo6IRPH2p7DZlF1bmRkz0l37p2FVnBxyaO8/NRdPy9Zt2V6Tc0LhhpAlgea21euJ0Jt8tylnfkFaYgylHK1s3ejOdQWK5qjo2PiVix+/4M3l366qmuXHqu+XLdpy9pp08dDVAsJDvvm6wRr68ezx/bvNxAcBRE3b/kO7OwZEfn22x/Ue6lJE6dCPXrjxjWlZSWwT3Bw+xVx8SZ3GsfzeLYTHPuxUOQtQhQQ2q7P+NFLz17Yfvz0JjMzC0+P0BlT15uZCZp+1oC+06SyykPH4rU6baBfxJCod7fvXQDLiAKkpVLf0EaHADc8G9i14+XQum8faap982d257fvZQU/PMKMA+vy2UJLSxEd54jKuJwzZparlV3Dw47I6BuDEtDVQiFRIPpRI1GK3HiNWYjIyVMGJrCL8I9DD4WOFlzzhn+S5JTze/YvbXCTwNxKKhc3uKl7p5FDB76HWoisR7e27mi4BwGSREwGEzVUTerRZRRk0VEjlGaW9xxmjRqHiGhoeo20+/N0hUu7hmda8/PpOnvm/zW4CfpCapPS9eDxWrIS4uYS2Nh7UKkULBan7lSLzXkP0ooaDkfnGdTUmyQiGhrfDpbpt6Q11YoGT94D1Wy5LsiocDg8W5uWfA81FdV9xr6giUbqiEZg8BtOmdfytVpaTBNVlFbi38Hc4UWTyxERjcP4eR6ZV3JRa6covczemRkcbvXCPYmIxsHGgTvhI9f0i9katQlP/9c0JRllPkGcvuOaNe8wEdFo8C04r81xAxelFXLUutCqtXnJhZ5+7M79bZr5FCKiMRHact750oejlebeLpBXtZL8YklWRer57J5DrLtEvUSHCGk1G5+oSY45abLzB0p5Fjwmlyu0F2B7ml8TSMrkklJZVbGk/SvWY2e+9CXGiIhY4O7Hn/iRx6N70rRb0sxreTbO5soaLZvLZnHZDCamnexMFlMlV2pUGqTTVhTIoV0c1EkQ1N3zZWdG1ENExIg2QYI2T7K+Rdk1T6YuVtfItAoZJSPH/j3mFjoGky0Q8vhCtrOXE4f7r6p5REQccfQwc/RAtKJhEblmDC0y4WFXAmsOk2Xyw8ZoRcPh1NKGU/LIhHMK2SkSWyfTPq+AbjQsooM7z3THocolapErz8Ka1DpMiUYjomtbs/O/NGuuT9w4tSO/y4Dm5lEJmNDU9Zrv/iFOvyVp39vOxpHLYuOe+q6RaapKlZcOFg+c7OjgQceJjkyaF1w4POuu9Na5ysKsGhYb66LaSsSpKld5Bgk6D7CBblxEMDVeIGItCjnWffM6LTITkO5KE6a5IhIIlEKalgQsICISsICISMACIiIBC4iIBCwgIhKw4P8BAAD//2v4e7oAAAAGSURBVAMA1x7mMDWkAPIAAAAASUVORK5CYII=",
642
+ "text/plain": [
643
+ "<IPython.core.display.Image object>"
644
+ ]
645
+ },
646
+ "metadata": {},
647
+ "output_type": "display_data"
648
+ }
649
+ ],
650
+ "source": [
651
+ "from IPython.display import Image, display\n",
652
+ "\n",
653
+ "display(Image(graph.get_graph(xray=True).draw_mermaid_png()))"
654
+ ]
655
+ },
656
+ {
657
+ "cell_type": "code",
658
+ "execution_count": null,
659
+ "id": "5987d58c",
660
+ "metadata": {},
661
+ "outputs": [],
662
+ "source": [
663
+ "question = \"\"\n",
664
+ "messages = [HumanMessage(content=question)]\n",
665
+ "messages = graph.invoke({\"messages\": messages})"
666
+ ]
667
+ },
668
+ {
669
+ "cell_type": "code",
670
+ "execution_count": null,
671
+ "id": "330cbf17",
672
+ "metadata": {},
673
+ "outputs": [],
674
+ "source": [
675
+ "for m in messages['messages']:\n",
676
+ " m.pretty_print()"
677
+ ]
678
+ }
679
+ ],
680
+ "metadata": {
681
+ "kernelspec": {
682
+ "display_name": "Python 3 (ipykernel)",
683
+ "language": "python",
684
+ "name": "python3"
685
+ },
686
+ "language_info": {
687
+ "codemirror_mode": {
688
+ "name": "ipython",
689
+ "version": 3
690
+ },
691
+ "file_extension": ".py",
692
+ "mimetype": "text/x-python",
693
+ "name": "python",
694
+ "nbconvert_exporter": "python",
695
+ "pygments_lexer": "ipython3",
696
+ "version": "3.12.7"
697
+ }
698
+ },
699
+ "nbformat": 4,
700
+ "nbformat_minor": 5
701
+ }