daniielyan's picture
πŸ”§ Update main.py to change MCP server flag, add Hugging Face dependencies in pyproject.toml, and enhance LLM service with Hugging Face integration. Add new job listings and user profiles in JSON data files.
4a5b92f
"""LLM service for generating cover letters and Q&A responses."""
from typing import Dict, Any, Optional
import openai
from anthropic import Anthropic
import requests
import json
from ..config import get_settings
from .profile_service import UserProfile
class LLMService:
"""Service for LLM-based text generation."""
def __init__(self):
self.settings = get_settings()
self.openai_client = None
self.anthropic_client = None
self.hf_client = None
self._initialize_clients()
def _initialize_clients(self):
"""Initialize LLM clients based on available API keys."""
if self.settings.openai_api_key:
openai.api_key = self.settings.openai_api_key
self.openai_client = openai
if self.settings.anthropic_api_key:
self.anthropic_client = Anthropic(api_key=self.settings.anthropic_api_key)
if self.settings.hf_access_token:
# Use the new Inference Providers API
self.hf_client = {
"api_url": f"https://router.huggingface.co/{self.settings.hf_inference_provider}/v3/openai/chat/completions",
"headers": {
"Authorization": f"Bearer {self.settings.hf_access_token}",
"Content-Type": "application/json",
},
}
print(
f"πŸš€ HuggingFace Inference Providers initialized with {self.settings.hf_inference_provider} provider"
)
def _get_client(self):
"""Get the appropriate LLM client based on provider setting."""
if self.settings.llm_provider == "huggingface" and self.hf_client:
return self.hf_client, "huggingface"
elif self.settings.llm_provider == "openai" and self.openai_client:
return self.openai_client, "openai"
elif self.settings.llm_provider == "anthropic" and self.anthropic_client:
return self.anthropic_client, "anthropic"
elif self.hf_client:
return self.hf_client, "huggingface"
elif self.openai_client:
return self.openai_client, "openai"
elif self.anthropic_client:
return self.anthropic_client, "anthropic"
else:
return None, None
def _call_openai(self, messages: list, max_tokens: int = None) -> Optional[str]:
"""Call OpenAI API."""
try:
response = self.openai_client.ChatCompletion.create(
model=self.settings.llm_model,
messages=messages,
max_tokens=max_tokens or self.settings.max_tokens,
temperature=self.settings.temperature,
)
return response.choices[0].message.content.strip()
except Exception as e:
print(f"OpenAI API error: {e}")
return None
def _call_anthropic(self, messages: list, max_tokens: int = None) -> Optional[str]:
"""Call Anthropic API."""
try:
# Convert messages format for Anthropic
system_message = ""
user_messages = []
for msg in messages:
if msg["role"] == "system":
system_message = msg["content"]
else:
user_messages.append(msg)
# Combine user messages
combined_content = "\n\n".join([msg["content"] for msg in user_messages])
response = self.anthropic_client.messages.create(
model="claude-3-haiku-20240307", # Using a lighter model for cost efficiency
system=system_message,
messages=[{"role": "user", "content": combined_content}],
max_tokens=max_tokens or self.settings.max_tokens,
temperature=self.settings.temperature,
)
return response.content[0].text.strip()
except Exception as e:
print(f"Anthropic API error: {e}")
return None
def _call_huggingface(
self, messages: list, max_tokens: int = None
) -> Optional[str]:
"""Call HuggingFace Inference Providers API."""
try:
# Use OpenAI-compatible format for Inference Providers
payload = {
"model": self.settings.llm_model,
"messages": messages,
"max_tokens": max_tokens or self.settings.max_tokens,
"temperature": self.settings.temperature,
"stream": False,
}
print(
f"πŸ”„ Calling HF Inference Providers API with {self.settings.hf_inference_provider} provider..."
)
print(f"πŸ“‘ URL: {self.hf_client['api_url']}")
print(f"πŸ€– Model: {self.settings.llm_model}")
response = requests.post(
self.hf_client["api_url"],
headers=self.hf_client["headers"],
json=payload,
timeout=60,
)
print(f"πŸ“Š Response status: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"βœ… Success: {result}")
# Handle OpenAI-compatible response format
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
return content.strip()
else:
print(f"❌ Unexpected response format: {result}")
return None
else:
error_detail = response.text
print(
f"❌ HuggingFace Inference Providers API error: {response.status_code} - {error_detail}"
)
# Try to parse error details
try:
error_json = response.json()
if "error" in error_json:
print(f"πŸ” Error details: {error_json['error']}")
except:
pass
return None
except requests.exceptions.Timeout:
print(f"⏱️ HuggingFace API timeout error")
return None
except Exception as e:
print(f"❌ HuggingFace API error: {e}")
return None
def generate_text(self, messages: list, max_tokens: int = None) -> Optional[str]:
"""Generate text using the configured LLM."""
client, provider = self._get_client()
if not client:
print("❌ No LLM client available")
return None
print(f"🎯 Using {provider} provider for text generation")
if provider == "huggingface":
return self._call_huggingface(messages, max_tokens)
elif provider == "openai":
return self._call_openai(messages, max_tokens)
elif provider == "anthropic":
return self._call_anthropic(messages, max_tokens)
return None
def generate_cover_letter(
self, profile: UserProfile, job_description: str, tone: str = "professional"
) -> Dict[str, Any]:
"""
Generate a personalized cover letter.
Args:
profile: User profile containing skills, experience, etc.
job_description: The job posting description
tone: Desired tone (professional, casual, enthusiastic, formal)
Returns:
Dict with generated cover letter and metadata
"""
try:
# Prepare context
skills_text = ", ".join(profile.skills[:10]) # Limit skills for brevity
# Create prompt messages
system_prompt = f"""You are an expert cover letter writer. Generate a compelling, personalized cover letter that:
1. Is concise (under 300 words)
2. Matches the {tone} tone
3. Highlights relevant skills and experience
4. Shows enthusiasm for the specific role
5. Includes a strong opening and closing
6. Avoids generic templates
Format as a professional cover letter without date/address headers."""
user_prompt = f"""Generate a cover letter for this job:
JOB DESCRIPTION:
{job_description[:2000]} # Limit to avoid token limits
CANDIDATE PROFILE:
- Skills: {skills_text}
- Experience: {profile.experience_level or "Not specified"}
- Career Goals: {profile.career_goals}
- Location: {profile.location or "Not specified"}
- Education: {profile.education or "Not specified"}
RESUME SUMMARY:
{profile.resume[:1000]} # Limit resume text
Tone: {tone}
Write a cover letter that specifically connects the candidate's background to this job opportunity."""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
# Generate cover letter
cover_letter = self.generate_text(messages, max_tokens=400)
if cover_letter:
return {
"success": True,
"cover_letter": cover_letter,
"tone_used": tone,
"word_count": len(cover_letter.split()),
"character_count": len(cover_letter),
}
else:
return {"success": False, "message": "Failed to generate cover letter"}
except Exception as e:
return {
"success": False,
"message": f"Error generating cover letter: {str(e)}",
}
def generate_qa_response(
self, profile: UserProfile, question: str, context: str = ""
) -> Dict[str, Any]:
"""
Generate a response to an interview question or client inquiry.
Args:
profile: User profile for personalization
question: The question to answer
context: Additional context about the conversation
Returns:
Dict with generated response and confidence score
"""
try:
# Create prompt
system_prompt = """You are a professional career advisor helping someone answer interview questions and client inquiries.
Generate responses that are:
1. Concise (under 150 words)
2. Professional and confident
3. Specific to the person's background
4. Include relevant examples when possible
5. End with enthusiasm or next steps
Avoid generic answers and be authentic."""
user_prompt = f"""Answer this question professionally:
QUESTION: {question}
CONTEXT: {context}
CANDIDATE BACKGROUND:
- Skills: {", ".join(profile.skills[:8])}
- Experience Level: {profile.experience_level or "Not specified"}
- Career Goals: {profile.career_goals}
- Key Background: {profile.resume[:800]}
Provide a specific, personalized answer that showcases relevant experience and skills."""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
# Generate response
response = self.generate_text(messages, max_tokens=200)
if response:
# Simple confidence scoring based on response characteristics
word_count = len(response.split())
has_examples = any(
keyword in response.lower()
for keyword in [
"experience",
"project",
"worked",
"developed",
"managed",
]
)
has_specific_skills = any(
skill.lower() in response.lower() for skill in profile.skills[:5]
)
confidence_score = 0.6 # Base score
if has_examples:
confidence_score += 0.2
if has_specific_skills:
confidence_score += 0.15
if 50 <= word_count <= 150:
confidence_score += 0.05
confidence_score = min(confidence_score, 1.0)
return {
"success": True,
"response": response,
"confidence_score": round(confidence_score, 2),
"word_count": word_count,
}
else:
return {"success": False, "message": "Failed to generate response"}
except Exception as e:
return {"success": False, "message": f"Error generating response: {str(e)}"}