Invalid Task Type
#2
by
nawajish10
- opened
change C:\Users\XXX.cache\huggingface\hub\models--LlamaFactoryAI--cv-job-description-matching\snapshots\b5ca72308137610a605d45e4bcd887550363b783\adapter_config.json
"task_type": "QUESTION_ANS",
and try this
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, PeftConfig
import os
base_model_name = "akjindal53244/Llama-3.1-Storm-8B"
os.makedirs("./offload", exist_ok=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the base model
base_model = AutoModelForCausalLM.from_pretrained(
base_model_name,
torch_dtype=torch.bfloat16,
device_map="auto",
load_in_8bit=False,
load_in_4bit=False,
offload_folder="./offload",
)
tokenizer = AutoTokenizer.from_pretrained(
base_model_name,
)
if tokenizer.pad_token is None:
tokenizer.pad_toke = "[PAD]"
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
base_model.resize_token_embeddings(len(tokenizer))
# Load the LoRA adapter
peft_model_id = "LlamaFactoryAI/cv-job-description-matching"
config = PeftConfig.from_pretrained(peft_model_id)
model = PeftModel.from_pretrained(
base_model,
peft_model_id,
torch_dtype=torch.bfloat16,
load_in_8bit=False,
load_in_4bit=False,
offload_folder="./offload",
).to(device)
# Use the model
messages = [
{
"role": "system",
"content": """You are an advanced AI model designed to analyze the compatibility between a CV and a job description. You will receive a CV and a job description. Your task is to output a structured JSON format that includes the following:
1. matching_analysis: Analyze the CV against the job description to identify key strengths and gaps.
2. description: Summarize the relevance of the CV to the job description in a few concise sentences.
3. score: Provide a numerical compatibility score (0-100) based on qualifications, skills, and experience.
4. recommendation: Suggest actions for the candidate to improve their match or readiness for the role.
Your output must be in JSON format as follows:
{
"matching_analysis": "Your detailed analysis here.",
"description": "A brief summary here.",
"score": 85,
"recommendation": "Your suggestions here."
}
""",
},
{
"role": "user",
"content": "<CV> {cv} </CV>\n<job_description> {job_description} </job_description>",
},
]
cv = """
???
""" # Replace with actual CV
job_description = """
???
"""
# Replace with actual job description
messages[1]["content"] = messages[1]["content"].format(
cv=cv, job_description=job_description
)
inputs = tokenizer.apply_chat_template(
messages, add_generation_prompt=True, return_tensors="pt", return_dict=True
).to(device)
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_new_tokens=128,
do_sample=True,
temperature=0.7,
top_p=0.9,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(generated_text)
Okay. I will try to do this
Thank you.