Update README.md
Browse files
README.md
CHANGED
@@ -36,12 +36,18 @@ You can use the models through Huggingface's Transformers library. Check our Git
|
|
36 |
## CLI demo
|
37 |
```python
|
38 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
|
|
39 |
from string import Template
|
40 |
prompt_template = Template("Human: ${inst} </s> Assistant: ")
|
41 |
|
|
|
|
|
|
|
|
|
|
|
42 |
tokenizer = AutoTokenizer.from_pretrained("m-a-p/ChatMusician", trust_remote_code=True)
|
43 |
-
model = AutoModelForCausalLM.from_pretrained("m-a-p/ChatMusician",
|
44 |
-
|
45 |
generation_config = GenerationConfig(
|
46 |
temperature=0.2,
|
47 |
top_k=40,
|
|
|
36 |
## CLI demo
|
37 |
```python
|
38 |
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
39 |
+
import torch
|
40 |
from string import Template
|
41 |
prompt_template = Template("Human: ${inst} </s> Assistant: ")
|
42 |
|
43 |
+
if torch.cuda.is_available():
|
44 |
+
device_map = "cpu"
|
45 |
+
else:
|
46 |
+
device_map = "cuda"
|
47 |
+
|
48 |
tokenizer = AutoTokenizer.from_pretrained("m-a-p/ChatMusician", trust_remote_code=True)
|
49 |
+
model = AutoModelForCausalLM.from_pretrained("m-a-p/ChatMusician", torch_dtype=torch.float16, device_map=device_map, resume_download=True).eval()
|
50 |
+
|
51 |
generation_config = GenerationConfig(
|
52 |
temperature=0.2,
|
53 |
top_k=40,
|