Update README.md
Browse files
README.md
CHANGED
@@ -52,8 +52,8 @@ Using `AutoModelForCausalLM` and `AutoTokenizer`
|
|
52 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
53 |
|
54 |
# Load the model and tokenizer
|
55 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
56 |
-
model = AutoModelForCausalLM.from_pretrained('
|
57 |
model = model.to('cuda')
|
58 |
|
59 |
# Setup terminators
|
@@ -88,7 +88,7 @@ import torch
|
|
88 |
from transformers import pipeline
|
89 |
|
90 |
# Load the pipeline
|
91 |
-
pipe = pipeline("text-generation", model="
|
92 |
|
93 |
# Prepare prompt
|
94 |
messages = [
|
|
|
52 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
53 |
|
54 |
# Load the model and tokenizer
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained("prem-research/prem-1B-chat")
|
56 |
+
model = AutoModelForCausalLM.from_pretrained('prem-research/prem-1B-chat', torch_dtype=torch.bfloat16)
|
57 |
model = model.to('cuda')
|
58 |
|
59 |
# Setup terminators
|
|
|
88 |
from transformers import pipeline
|
89 |
|
90 |
# Load the pipeline
|
91 |
+
pipe = pipeline("text-generation", model="prem-research/prem-1B-chat", torch_dtype=torch.bfloat16, device=0)
|
92 |
|
93 |
# Prepare prompt
|
94 |
messages = [
|