Update README.md
Browse filesRemove GaudiConfig from example as it is now optional
README.md
CHANGED
|
@@ -23,24 +23,23 @@ This enables to specify:
|
|
| 23 |
## Usage
|
| 24 |
|
| 25 |
The model is instantiated the same way as in the Transformers library.
|
| 26 |
-
The only difference is that
|
| 27 |
|
| 28 |
```
|
| 29 |
-
from optimum.habana import
|
| 30 |
from transformers import GPT2Tokenizer, GPT2Model
|
| 31 |
|
| 32 |
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
| 33 |
model = GPT2Model.from_pretrained('gpt2')
|
| 34 |
-
gaudi_config = GaudiConfig.from_pretrained("Habana/gpt2")
|
| 35 |
args = GaudiTrainingArguments(
|
| 36 |
output_dir="/tmp/output_dir",
|
| 37 |
use_habana=True,
|
| 38 |
use_lazy_mode=True,
|
|
|
|
| 39 |
)
|
| 40 |
|
| 41 |
trainer = GaudiTrainer(
|
| 42 |
model=model,
|
| 43 |
-
gaudi_config=gaudi_config,
|
| 44 |
args=args,
|
| 45 |
tokenizer=tokenizer,
|
| 46 |
)
|
|
|
|
| 23 |
## Usage
|
| 24 |
|
| 25 |
The model is instantiated the same way as in the Transformers library.
|
| 26 |
+
The only difference is that there are a few new training arguments specific to HPUs:
|
| 27 |
|
| 28 |
```
|
| 29 |
+
from optimum.habana import GaudiTrainer, GaudiTrainingArguments
|
| 30 |
from transformers import GPT2Tokenizer, GPT2Model
|
| 31 |
|
| 32 |
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
| 33 |
model = GPT2Model.from_pretrained('gpt2')
|
|
|
|
| 34 |
args = GaudiTrainingArguments(
|
| 35 |
output_dir="/tmp/output_dir",
|
| 36 |
use_habana=True,
|
| 37 |
use_lazy_mode=True,
|
| 38 |
+
gaudi_config_name="Habana/gpt2",
|
| 39 |
)
|
| 40 |
|
| 41 |
trainer = GaudiTrainer(
|
| 42 |
model=model,
|
|
|
|
| 43 |
args=args,
|
| 44 |
tokenizer=tokenizer,
|
| 45 |
)
|