kaiiddo commited on
Commit
8789475
·
verified ·
1 Parent(s): 49c453e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +17 -4
README.md CHANGED
@@ -68,13 +68,26 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
68
  tokenizer = AutoTokenizer.from_pretrained("kaiiddo/A3ON-1B")
69
  model = AutoModelForCausalLM.from_pretrained("kaiiddo/A3ON-1B")
70
 
71
- # Generate text
 
 
 
72
  inputs = tokenizer("Hello, how can I help you today?", return_tensors="pt")
73
- outputs = model.generate(**inputs, max_length=500)
 
 
 
 
 
 
 
 
74
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
75
 
76
- # Print the response
77
- print(response)
 
78
  ```
79
 
80
  ### Model Parameter Count
 
68
  tokenizer = AutoTokenizer.from_pretrained("kaiiddo/A3ON-1B")
69
  model = AutoModelForCausalLM.from_pretrained("kaiiddo/A3ON-1B")
70
 
71
+ # Set pad_token_id to eos_token_id to avoid warnings
72
+ model.config.pad_token_id = model.config.eos_token_id
73
+
74
+ # Generate text with adjusted parameters
75
  inputs = tokenizer("Hello, how can I help you today?", return_tensors="pt")
76
+ outputs = model.generate(
77
+ **inputs,
78
+ max_length=500,
79
+ do_sample=True,
80
+ temperature=0.7,
81
+ top_k=50
82
+ )
83
+
84
+ # Decode the output and split into lines
85
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
86
+ response_lines = response.split('\n')
87
 
88
+ # Print each line of the response
89
+ for line in response_lines:
90
+ print(line)
91
  ```
92
 
93
  ### Model Parameter Count