Update README.md
Browse files
README.md
CHANGED
@@ -71,10 +71,10 @@ Find below some example scripts on how to use the model in `transformers`:
|
|
71 |
|
72 |
```python
|
73 |
|
74 |
-
from transformers import AutoTokenizer,
|
75 |
|
76 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
77 |
-
model =
|
78 |
|
79 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
80 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
@@ -93,10 +93,10 @@ print(tokenizer.decode(outputs[0]))
|
|
93 |
|
94 |
```python
|
95 |
# pip install accelerate
|
96 |
-
from transformers import AutoTokenizer,
|
97 |
|
98 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
99 |
-
model =
|
100 |
|
101 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
102 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0)
|
@@ -117,10 +117,10 @@ print(tokenizer.decode(outputs[0]))
|
|
117 |
|
118 |
```python
|
119 |
# pip install accelerate
|
120 |
-
from transformers import AutoTokenizer,
|
121 |
|
122 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
123 |
-
model =
|
124 |
|
125 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
126 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0)
|
@@ -139,10 +139,10 @@ print(tokenizer.decode(outputs[0]))
|
|
139 |
|
140 |
```python
|
141 |
# pip install bitsandbytes accelerate
|
142 |
-
from transformers import AutoTokenizer,
|
143 |
|
144 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
145 |
-
model =
|
146 |
|
147 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
148 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0)
|
|
|
71 |
|
72 |
```python
|
73 |
|
74 |
+
from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
|
75 |
|
76 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
77 |
+
model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-32")
|
78 |
|
79 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
80 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
|
|
93 |
|
94 |
```python
|
95 |
# pip install accelerate
|
96 |
+
from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
|
97 |
|
98 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
99 |
+
model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-32", device_map="auto")
|
100 |
|
101 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
102 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0)
|
|
|
117 |
|
118 |
```python
|
119 |
# pip install accelerate
|
120 |
+
from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
|
121 |
|
122 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
123 |
+
model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-32", device_map="auto", torch_dtype=torch.float16)
|
124 |
|
125 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
126 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0)
|
|
|
139 |
|
140 |
```python
|
141 |
# pip install bitsandbytes accelerate
|
142 |
+
from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration
|
143 |
|
144 |
tokenizer = AutoTokenizer.from_pretrained("google/switch-base-32")
|
145 |
+
model = SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-32", device_map="auto")
|
146 |
|
147 |
input_text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
|
148 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(0)
|