goku commited on
Commit
6e67914
·
verified ·
1 Parent(s): 89b22fa

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -52,8 +52,8 @@ Using `AutoModelForCausalLM` and `AutoTokenizer`
52
  from transformers import AutoTokenizer, AutoModelForCausalLM
53
 
54
  # Load the model and tokenizer
55
- tokenizer = AutoTokenizer.from_pretrained("premai-io/prem-1B-chat")
56
- model = AutoModelForCausalLM.from_pretrained('premai-io/prem-1B-chat', torch_dtype=torch.bfloat16)
57
  model = model.to('cuda')
58
 
59
  # Setup terminators
@@ -88,7 +88,7 @@ import torch
88
  from transformers import pipeline
89
 
90
  # Load the pipeline
91
- pipe = pipeline("text-generation", model="premai-io/prem-1B-chat", torch_dtype=torch.bfloat16, device=0)
92
 
93
  # Prepare prompt
94
  messages = [
 
52
  from transformers import AutoTokenizer, AutoModelForCausalLM
53
 
54
  # Load the model and tokenizer
55
+ tokenizer = AutoTokenizer.from_pretrained("prem-research/prem-1B-chat")
56
+ model = AutoModelForCausalLM.from_pretrained('prem-research/prem-1B-chat', torch_dtype=torch.bfloat16)
57
  model = model.to('cuda')
58
 
59
  # Setup terminators
 
88
  from transformers import pipeline
89
 
90
  # Load the pipeline
91
+ pipe = pipeline("text-generation", model="prem-research/prem-1B-chat", torch_dtype=torch.bfloat16, device=0)
92
 
93
  # Prepare prompt
94
  messages = [