Update code examples so they can work out of the box
Browse files
README.md
CHANGED
@@ -35,6 +35,7 @@ This allows the model to better adapt to human preferences in the conversations.
|
|
35 |
This requires a GPU with 48GB memory.
|
36 |
```python
|
37 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
38 |
# init
|
39 |
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B")
|
40 |
model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B", torch_dtype=torch.float16)
|
@@ -66,6 +67,7 @@ print(output_str)
|
|
66 |
|
67 |
```python
|
68 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
69 |
# init
|
70 |
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B")
|
71 |
model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B", torch_dtype=torch.bfloat16)
|
|
|
35 |
This requires a GPU with 48GB memory.
|
36 |
```python
|
37 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
38 |
+
import torch
|
39 |
# init
|
40 |
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B")
|
41 |
model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B", torch_dtype=torch.float16)
|
|
|
67 |
|
68 |
```python
|
69 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
70 |
+
import torch
|
71 |
# init
|
72 |
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B")
|
73 |
model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B", torch_dtype=torch.bfloat16)
|