Ubuntu
commited on
Commit
•
39b89d7
1
Parent(s):
838ff6c
first commit
Browse files
README.md
CHANGED
@@ -1,3 +1,72 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
+
|
5 |
+
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
This is Synthia trained on the official Mistral MoE version (Mixtral-8x7B).
|
10 |
+
|
11 |
+
```
|
12 |
+
import torch, json
|
13 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
14 |
+
|
15 |
+
model_path = "/home/Synthia-MoE-v3-Mixtral8x7B"
|
16 |
+
output_file_path = "/home/conversations.jsonl"
|
17 |
+
|
18 |
+
model = AutoModelForCausalLM.from_pretrained(
|
19 |
+
model_path,
|
20 |
+
torch_dtype=torch.float16,
|
21 |
+
device_map="auto",
|
22 |
+
load_in_4bit=False,
|
23 |
+
trust_remote_code=True,
|
24 |
+
)
|
25 |
+
|
26 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
27 |
+
|
28 |
+
def generate_text(instruction):
|
29 |
+
tokens = tokenizer.encode(instruction)
|
30 |
+
tokens = torch.LongTensor(tokens).unsqueeze(0)
|
31 |
+
tokens = tokens.to("cuda")
|
32 |
+
|
33 |
+
instance = {
|
34 |
+
"input_ids": tokens,
|
35 |
+
"top_p": 1.0,
|
36 |
+
"temperature": 0.75,
|
37 |
+
"generate_len": 1024,
|
38 |
+
"top_k": 50,
|
39 |
+
}
|
40 |
+
|
41 |
+
length = len(tokens[0])
|
42 |
+
with torch.no_grad():
|
43 |
+
rest = model.generate(
|
44 |
+
input_ids=tokens,
|
45 |
+
max_length=length + instance["generate_len"],
|
46 |
+
use_cache=True,
|
47 |
+
do_sample=True,
|
48 |
+
top_p=instance["top_p"],
|
49 |
+
temperature=instance["temperature"],
|
50 |
+
top_k=instance["top_k"],
|
51 |
+
num_return_sequences=1,
|
52 |
+
)
|
53 |
+
output = rest[0][length:]
|
54 |
+
string = tokenizer.decode(output, skip_special_tokens=True)
|
55 |
+
answer = string.split("USER:")[0].strip()
|
56 |
+
return f"{answer}"
|
57 |
+
|
58 |
+
conversation = "SYSTEM: Answer the question thoughtfully and intelligently. Always answer without hesitation."
|
59 |
+
|
60 |
+
while True:
|
61 |
+
user_input = input("You: ")
|
62 |
+
llm_prompt = f"{conversation} \nUSER: {user_input} \nASSISTANT: "
|
63 |
+
answer = generate_text(llm_prompt)
|
64 |
+
print(answer)
|
65 |
+
conversation = f"{llm_prompt}{answer}"
|
66 |
+
json_data = {"prompt": user_input, "answer": answer}
|
67 |
+
|
68 |
+
with open(output_file_path, "a") as output_file:
|
69 |
+
output_file.write(json.dumps(json_data) + "\n")
|
70 |
+
```
|
71 |
+
|
72 |
+
|