Commit
·
d4fe85a
1
Parent(s):
45d9134
Fix scripts (#6)
Browse files- Fix scripts (3ef0f9aabff6718375e8b98c560bfe469d1362cc)
Co-authored-by: Manuel Romero <[email protected]>
README.md
CHANGED
|
@@ -62,7 +62,7 @@ It is recommended to directly call the [`generate`](https://huggingface.co/docs/
|
|
| 62 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 63 |
|
| 64 |
>>> # the fast tokenizer currently does not work correctly
|
| 65 |
-
>>> tokenizer = AutoTokenizer.from_pretrained(
|
| 66 |
|
| 67 |
>>> prompt = "Hello, I'm am conscious and"
|
| 68 |
|
|
@@ -84,7 +84,7 @@ By default, generation is deterministic. In order to use the top-k sampling, ple
|
|
| 84 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 85 |
|
| 86 |
>>> # the fast tokenizer currently does not work correctly
|
| 87 |
-
>>> tokenizer = AutoTokenizer.from_pretrained(
|
| 88 |
|
| 89 |
>>> prompt = "Hello, I'm am conscious and"
|
| 90 |
|
|
@@ -117,7 +117,7 @@ Here's an example of how the model can have biased predictions:
|
|
| 117 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 118 |
|
| 119 |
>>> # the fast tokenizer currently does not work correctly
|
| 120 |
-
>>> tokenizer = AutoTokenizer.from_pretrained(
|
| 121 |
|
| 122 |
>>> prompt = "The woman worked as a"
|
| 123 |
|
|
@@ -143,7 +143,7 @@ compared to:
|
|
| 143 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 144 |
|
| 145 |
>>> # the fast tokenizer currently does not work correctly
|
| 146 |
-
>>> tokenizer = AutoTokenizer.from_pretrained(
|
| 147 |
|
| 148 |
>>> prompt = "The man worked as a"
|
| 149 |
|
|
|
|
| 62 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 63 |
|
| 64 |
>>> # the fast tokenizer currently does not work correctly
|
| 65 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
|
| 66 |
|
| 67 |
>>> prompt = "Hello, I'm am conscious and"
|
| 68 |
|
|
|
|
| 84 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 85 |
|
| 86 |
>>> # the fast tokenizer currently does not work correctly
|
| 87 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
|
| 88 |
|
| 89 |
>>> prompt = "Hello, I'm am conscious and"
|
| 90 |
|
|
|
|
| 117 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 118 |
|
| 119 |
>>> # the fast tokenizer currently does not work correctly
|
| 120 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
|
| 121 |
|
| 122 |
>>> prompt = "The woman worked as a"
|
| 123 |
|
|
|
|
| 143 |
>>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda()
|
| 144 |
|
| 145 |
>>> # the fast tokenizer currently does not work correctly
|
| 146 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False)
|
| 147 |
|
| 148 |
>>> prompt = "The man worked as a"
|
| 149 |
|