TimeMobius commited on
Commit
b7efe13
1 Parent(s): 6a5fb6f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +25 -0
README.md CHANGED
@@ -1,3 +1,28 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+ # Huggingface format for Mobius Chat 12B 128k v4
5
+
6
+ ```python
7
+ import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer
9
+ def generate_prompt(instruction, input=""):
10
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
11
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
12
+ if input:
13
+ return f"""Instruction: {instruction}
14
+ Input: {input}
15
+ Response:"""
16
+ else:
17
+ return f"""User: {instruction}
18
+
19
+ Assistant:"""
20
+ #model = AutoModelForCausalLM.from_pretrained("TimeMobius/Mobius-Chat-12B-128k-HF", trust_remote_code=True, torch_dtype=torch.bfloat16).to(0)
21
+ model = AutoModelForCausalLM.from_pretrained("TimeMobius/Mobius-Chat-12B-128k-HF", trust_remote_code=True, torch_dtype=torch.float16).to(0)
22
+ tokenizer = AutoTokenizer.from_pretrained("TimeMobius/Mobius-Chat-12B-128k-HF", trust_remote_code=True)
23
+ text = "Write a beginning of sci-fi novel"
24
+ prompt = generate_prompt(text)
25
+ inputs = tokenizer(prompt, return_tensors="pt").to(0)
26
+ output = model.generate(inputs["input_ids"], max_new_tokens=128, do_sample=True, temperature=1.0, top_p=0.3, top_k=0, )
27
+ print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True))
28
+ ```