Spaces:
Running
Running
Update model.py
Browse files
model.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from transformers import pipeline
|
|
|
2 |
|
3 |
def modelFeedback(ats_score, resume_data, job_description):
|
4 |
"""
|
@@ -29,12 +30,26 @@ def modelFeedback(ats_score, resume_data, job_description):
|
|
29 |
#### Job Description: {job_description}
|
30 |
"""
|
31 |
|
32 |
-
|
33 |
-
#
|
|
|
|
|
34 |
try:
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
return response_text
|
38 |
except Exception as e:
|
39 |
-
print(f"
|
40 |
return "Error: Unable to generate feedback."
|
|
|
1 |
from transformers import pipeline
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
def modelFeedback(ats_score, resume_data, job_description):
|
5 |
"""
|
|
|
30 |
#### Job Description: {job_description}
|
31 |
"""
|
32 |
|
33 |
+
|
34 |
+
# Load the tokenizer and model
|
35 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
|
36 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
|
37 |
try:
|
38 |
+
# Tokenize the input
|
39 |
+
input_ids = tokenizer.encode(input_prompt, return_tensors="pt")
|
40 |
+
|
41 |
+
# Generate the output
|
42 |
+
output = model.generate(
|
43 |
+
input_ids,
|
44 |
+
max_length=1500,
|
45 |
+
temperature=0.01,
|
46 |
+
top_p=0.7,
|
47 |
+
pad_token_id=tokenizer.eos_token_id # Ensure padding works properly
|
48 |
+
)
|
49 |
+
|
50 |
+
# Decode the output
|
51 |
+
response_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
52 |
return response_text
|
53 |
except Exception as e:
|
54 |
+
print(f"Error during generation: {e}")
|
55 |
return "Error: Unable to generate feedback."
|