goku commited on
Commit
1a164ab
·
verified ·
1 Parent(s): 829a27b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -53,8 +53,8 @@ import torch
53
  from transformers import AutoTokenizer, AutoModelForCausalLM
54
 
55
  # Load the model and tokenizer
56
- tokenizer = AutoTokenizer.from_pretrained("premai-io/prem-1B-chat")
57
- model = AutoModelForCausalLM.from_pretrained('premai-io/prem-1B-chat', torch_dtype=torch.bfloat16)
58
  model = model.to('cuda')
59
 
60
  # Setup terminators
@@ -89,7 +89,7 @@ import torch
89
  from transformers import pipeline
90
 
91
  # Load the pipeline
92
- pipe = pipeline("text-generation", model="premai-io/prem-1B-chat", torch_dtype=torch.bfloat16, device=0)
93
 
94
  # Prepare prompt
95
  messages = [
 
53
  from transformers import AutoTokenizer, AutoModelForCausalLM
54
 
55
  # Load the model and tokenizer
56
+ tokenizer = AutoTokenizer.from_pretrained("prem-research/prem-1B-chat")
57
+ model = AutoModelForCausalLM.from_pretrained('prem-research/prem-1B-chat', torch_dtype=torch.bfloat16)
58
  model = model.to('cuda')
59
 
60
  # Setup terminators
 
89
  from transformers import pipeline
90
 
91
  # Load the pipeline
92
+ pipe = pipeline("text-generation", model="prem-research/prem-1B-chat", torch_dtype=torch.bfloat16, device=0)
93
 
94
  # Prepare prompt
95
  messages = [