onisj's picture
feat(tools): add more tool to extend the functionaily of jarvis
751d628
import logging
import os
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, Field
from typing import Optional, List
from serpapi import GoogleSearch
logger = logging.getLogger(__name__)
class SearchInput(BaseModel):
query: str = Field(description="Search query")
async def search_func(query: str) -> List[str]:
"""
Perform a web search using SerpAPI and return relevant snippets.
Args:
query (str): The search query to execute.
Returns:
List[str]: A list of search result snippets.
"""
try:
logger.info(f"Executing SerpAPI search for query: {query}")
params = {
"q": query,
"api_key": os.getenv("SERPAPI_API_KEY"),
"num": 10
}
search = GoogleSearch(params)
results = search.get_dict().get("organic_results", [])
return [result.get("snippet", "") for result in results if "snippet" in result]
except Exception as e:
logger.error(f"SerpAPI search failed for query '{query}': {e}")
return []
search_tool = StructuredTool.from_function(
func=search_func,
name="search_tool",
args_schema=SearchInput,
coroutine=search_func
)
class MultiHopSearchInput(BaseModel):
query: str = Field(description="Multi-hop search query")
steps: int = Field(description="Number of search steps", ge=1, le=3)
llm_client: Optional[object] = Field(description="LLM client", default=None)
llm_type: Optional[str] = Field(description="LLM type", default="together")
llm_model: Optional[str] = Field(description="LLM model", default="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free")
async def multi_hop_search_func(query: str, steps: int, llm_client: Optional[object] = None, llm_type: Optional[str] = "together", llm_model: Optional[str] = "meta-llama/Llama-3.3-70B-Instruct-Turbo-Free") -> List[str]:
"""
Perform a multi-hop web search using SerpAPI with iterative query refinement.
Args:
query (str): The initial multi-hop search query.
steps (int): Number of search steps to perform (1 to 3).
llm_client (Optional[object]): LLM client for query refinement.
llm_type (Optional[str]): Type of LLM (e.g., 'together').
llm_model (Optional[str]): LLM model name.
Returns:
List[str]: A list of search result snippets from all steps.
"""
try:
logger.info(f"Executing multi-hop search for query: {query}, steps: {steps}")
results = []
current_query = query
for step in range(steps):
logger.info(f"Multi-hop step {step + 1}: {current_query}")
step_results = await search_func(current_query)
results.extend(step_results)
if step < steps - 1 and llm_client:
prompt = f"Given the query '{current_query}' and results: {step_results[:3]}, generate a follow-up search query to refine or expand the search."
messages = [
{"role": "system", "content": "Generate a single search query as a string."},
{"role": "user", "content": prompt}
]
if llm_type == "together":
response = llm_client.chat.completions.create(
model=llm_model,
messages=messages,
max_tokens=50,
temperature=0.7
)
current_query = response.choices[0].message.content.strip()
else:
logger.warning("LLM not configured for multi-hop refinement")
break
return results[:5] if results else ["No results found"]
except Exception as e:
logger.error(f"Multi-hop search failed for query '{query}': {e}")
return [f"Error: {str(e)}"]
multi_hop_search_tool = StructuredTool.from_function(
func=multi_hop_search_func,
name="multi_hop_search_tool",
args_schema=MultiHopSearchInput,
coroutine=multi_hop_search_func
)