Jon Hall commited on
Commit
94a6fdd
·
1 Parent(s): 605e092

Update model files

Browse files
Files changed (1) hide show
  1. run_gpt2.py +31 -0
run_gpt2.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+
4
+ # Path to your local model
5
+ model_name = "./" # assuming run_gpt2.py is in the model folder
6
+
7
+ # Load tokenizer and model
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
+
11
+ # Make sure model is in evaluation mode
12
+ model.eval()
13
+
14
+ # Example input text
15
+ input_text = "Hello, my name is"
16
+
17
+ # Encode input
18
+ inputs = tokenizer(input_text, return_tensors="pt")
19
+
20
+ # Safe generation settings for CPU
21
+ with torch.no_grad():
22
+ output_ids = model.generate(
23
+ **inputs,
24
+ max_new_tokens=20, # generate 20 new tokens
25
+ do_sample=False, # deterministic, avoids NaNs
26
+ pad_token_id=tokenizer.eos_token_id # avoids attention mask issues
27
+ )
28
+
29
+ # Decode and print output
30
+ generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
31
+ print("Generated text:\n", generated_text)