nextai-team
commited on
Commit
•
a7c3b2a
1
Parent(s):
782e8df
Update README.md
Browse files
README.md
CHANGED
@@ -24,12 +24,13 @@ from transformers import AutoTokenizer
|
|
24 |
import transformers
|
25 |
import torch
|
26 |
|
27 |
-
model = "nextai-team/Moe-2x7b-QA-Code"
|
28 |
|
29 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
30 |
pipeline = transformers.pipeline(
|
31 |
"text-generation",
|
32 |
model=model,
|
|
|
33 |
model_kwargs={"torch_dtype": torch.float16},
|
34 |
)
|
35 |
|
@@ -39,7 +40,7 @@ def generate_resposne(query):
|
|
39 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
40 |
return outputs[0]['generated_text']
|
41 |
|
42 |
-
response = generate_resposne("How to
|
43 |
print(response)
|
44 |
|
45 |
```
|
|
|
24 |
import transformers
|
25 |
import torch
|
26 |
|
27 |
+
model = "nextai-team/Moe-2x7b-QA-Code" #If you want to test your own model, replace this value with the model directory path
|
28 |
|
29 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
30 |
pipeline = transformers.pipeline(
|
31 |
"text-generation",
|
32 |
model=model,
|
33 |
+
device_map="auto",
|
34 |
model_kwargs={"torch_dtype": torch.float16},
|
35 |
)
|
36 |
|
|
|
40 |
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
41 |
return outputs[0]['generated_text']
|
42 |
|
43 |
+
response = generate_resposne("How to learn coding .Please provide a step by step procedure")
|
44 |
print(response)
|
45 |
|
46 |
```
|