bainskarman commited on
Commit
5560955
·
verified ·
1 Parent(s): b2a231b

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +20 -5
model.py CHANGED
@@ -1,4 +1,5 @@
1
  from transformers import pipeline
 
2
 
3
  def modelFeedback(ats_score, resume_data, job_description):
4
  """
@@ -29,12 +30,26 @@ def modelFeedback(ats_score, resume_data, job_description):
29
  #### Job Description: {job_description}
30
  """
31
 
32
- pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
33
- # Generate the response using the pipeline
 
 
34
  try:
35
- output = pipe(input_prompt, max_length=1500, temperature=0.01, top_p=0.7, return_full_text=True)
36
- response_text = output[0]['generated_text'] # Extract generated text from the pipeline's output
 
 
 
 
 
 
 
 
 
 
 
 
37
  return response_text
38
  except Exception as e:
39
- print(f"Pipeline generation error: {e}")
40
  return "Error: Unable to generate feedback."
 
1
  from transformers import pipeline
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  def modelFeedback(ats_score, resume_data, job_description):
5
  """
 
30
  #### Job Description: {job_description}
31
  """
32
 
33
+
34
+ # Load the tokenizer and model
35
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
36
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
37
  try:
38
+ # Tokenize the input
39
+ input_ids = tokenizer.encode(input_prompt, return_tensors="pt")
40
+
41
+ # Generate the output
42
+ output = model.generate(
43
+ input_ids,
44
+ max_length=1500,
45
+ temperature=0.01,
46
+ top_p=0.7,
47
+ pad_token_id=tokenizer.eos_token_id # Ensure padding works properly
48
+ )
49
+
50
+ # Decode the output
51
+ response_text = tokenizer.decode(output[0], skip_special_tokens=True)
52
  return response_text
53
  except Exception as e:
54
+ print(f"Error during generation: {e}")
55
  return "Error: Unable to generate feedback."