Update README.md
Browse filesRemove GaudiConfig from the usage example because it is not mandatory anymore
README.md
CHANGED
@@ -23,24 +23,23 @@ This enables to specify:
|
|
23 |
## Usage
|
24 |
|
25 |
The model is instantiated the same way as in the Transformers library.
|
26 |
-
The only difference is that
|
27 |
|
28 |
```
|
29 |
-
from optimum.habana import
|
30 |
from transformers import AlbertTokenizer, AlbertModel
|
31 |
|
32 |
tokenizer = AlbertTokenizer.from_pretrained("albert-large-v2")
|
33 |
model = AlbertModel.from_pretrained("albert-large-v2")
|
34 |
-
gaudi_config = GaudiConfig.from_pretrained("Habana/albert-large-v2")
|
35 |
args = GaudiTrainingArguments(
|
36 |
output_dir="/tmp/output_dir",
|
37 |
use_habana=True,
|
38 |
use_lazy_mode=True,
|
|
|
39 |
)
|
40 |
|
41 |
trainer = GaudiTrainer(
|
42 |
model=model,
|
43 |
-
gaudi_config=gaudi_config,
|
44 |
args=args,
|
45 |
tokenizer=tokenizer,
|
46 |
)
|
|
|
23 |
## Usage
|
24 |
|
25 |
The model is instantiated the same way as in the Transformers library.
|
26 |
+
The only difference is that there are a few new training arguments specific to HPUs:
|
27 |
|
28 |
```
|
29 |
+
from optimum.habana import GaudiTrainer, GaudiTrainingArguments
|
30 |
from transformers import AlbertTokenizer, AlbertModel
|
31 |
|
32 |
tokenizer = AlbertTokenizer.from_pretrained("albert-large-v2")
|
33 |
model = AlbertModel.from_pretrained("albert-large-v2")
|
|
|
34 |
args = GaudiTrainingArguments(
|
35 |
output_dir="/tmp/output_dir",
|
36 |
use_habana=True,
|
37 |
use_lazy_mode=True,
|
38 |
+
gaudi_config_name="Habana/albert-large-v2",
|
39 |
)
|
40 |
|
41 |
trainer = GaudiTrainer(
|
42 |
model=model,
|
|
|
43 |
args=args,
|
44 |
tokenizer=tokenizer,
|
45 |
)
|