Spaces:
Starting
Starting
import aiohttp | |
import ssl | |
import logging | |
from langchain_core.tools import tool | |
from tenacity import retry, stop_after_attempt, wait_exponential | |
from typing import Optional | |
import json | |
import os | |
logger = logging.getLogger(__name__) | |
async def fetch_hf_models(author: str) -> Optional[dict]: | |
url = f"https://huggingface.co/api/models?author={author}&sort=downloads&direction=-1&limit=1" | |
ssl_context = ssl.create_default_context() | |
try: | |
async with aiohttp.ClientSession() as session: | |
async with session.get(url, ssl=ssl_context) as response: | |
response.raise_for_status() | |
return await response.json() | |
except aiohttp.ClientError as e: | |
logger.error(f"Failed to fetch models for {author}: {e}") | |
raise | |
async def hub_stats_tool(author: str) -> str: | |
""" | |
Fetch the most downloaded model from a specific author on Hugging Face Hub. | |
Args: | |
author (str): Hugging Face author username. | |
Returns: | |
str: Model information or error message. | |
""" | |
try: | |
# Check local cache | |
cache_file = f"temp/hf_cache_{author}.json" | |
if os.path.exists(cache_file): | |
with open(cache_file, "r") as f: | |
models = json.load(f) | |
logger.debug(f"Loaded cached models for {author}") | |
else: | |
models = await fetch_hf_models(author) | |
os.makedirs("temp", exist_ok=True) | |
with open(cache_file, "w") as f: | |
json.dump(models, f) | |
if models and isinstance(models, list) and models: | |
model = models[0] | |
return f"The most downloaded model by {author} is {model['id']} with {model.get('downloads', 0):,} downloads." | |
return f"No models found for author {author}." | |
except Exception as e: | |
logger.error(f"Error fetching models for {author}: {e}") | |
return f"Error: {str(e)}" |