Model Card for Model ID
Install
pip install peft transformers bitsandbytes
Prompt Defination
import json
from dataclasses import dataclass
from enum import Enum
from typing import List, Dict, Tuple, Literal
class Roles(Enum):
system = "system"
user = "user"
assistant = "assistant"
tool = "tool"
class MessagesFormatterType(Enum):
"""
Enum representing different types of predefined messages formatters.
"""
MISTRAL = 1
@dataclass
class PromptMarkers:
start: str
end: str
class MessagesFormatter:
def __init__(
self,
pre_prompt: str,
prompt_markers: Dict[Roles, PromptMarkers],
include_sys_prompt_in_first_user_message: bool,
default_stop_sequences: List[str],
use_user_role_for_function_call_result: bool = True,
strip_prompt: bool = True,
bos_token: str = "<s>",
eos_token: str = "</s>"
):
self.pre_prompt = pre_prompt
self.prompt_markers = prompt_markers
self.include_sys_prompt_in_first_user_message = include_sys_prompt_in_first_user_message
self.default_stop_sequences = default_stop_sequences
self.use_user_role_for_function_call_result = use_user_role_for_function_call_result
self.strip_prompt = strip_prompt
self.bos_token = bos_token
self.eos_token = eos_token
self.added_system_prompt = False
def get_bos_token(self) -> str:
return self.bos_token
def format_conversation(
self,
messages: List[Dict[str, str]],
response_role: Literal[Roles.user, Roles.assistant] | None = None,
) -> Tuple[str, Roles]:
formatted_messages = self.pre_prompt
last_role = Roles.assistant
self.added_system_prompt = False
for message in messages:
role = Roles(message["role"])
content = self._format_message_content(message["content"], role)
if role == Roles.system:
formatted_messages += self._format_system_message(content)
last_role = Roles.system
elif role == Roles.user:
formatted_messages += self._format_user_message(content)
last_role = Roles.user
elif role == Roles.assistant:
formatted_messages += self._format_assistant_message(content)
last_role = Roles.assistant
elif role == Roles.tool:
formatted_messages += self._format_tool_message(content)
last_role = Roles.tool
return self._format_response(formatted_messages, last_role, response_role)
def _format_message_content(self, content: str, role: Roles) -> str:
if self.strip_prompt:
return content.strip()
return content
def _format_system_message(self, content: str) -> str:
formatted_message = self.prompt_markers[Roles.system].start + content + self.prompt_markers[Roles.system].end
self.added_system_prompt = True
if self.include_sys_prompt_in_first_user_message:
formatted_message = self.prompt_markers[Roles.user].start + formatted_message
return formatted_message
def _format_user_message(self, content: str) -> str:
if self.include_sys_prompt_in_first_user_message and self.added_system_prompt:
self.added_system_prompt = False
return content + self.prompt_markers[Roles.user].end
return self.prompt_markers[Roles.user].start + content + self.prompt_markers[Roles.user].end
def _format_assistant_message(self, content: str) -> str:
return self.prompt_markers[Roles.assistant].start + content + self.prompt_markers[Roles.assistant].end
def _format_tool_message(self, content: str) -> str:
if isinstance(content, list):
content = "\n".join(json.dumps(m, indent=2) for m in content)
if self.use_user_role_for_function_call_result:
return self._format_user_message(content)
else:
return self.prompt_markers[Roles.tool].start + content + self.prompt_markers[Roles.tool].end
def _format_response(
self,
formatted_messages: str,
last_role: Roles,
response_role: Literal[Roles.user, Roles.assistant] | None = None,
) -> Tuple[str, Roles]:
if response_role is None:
response_role = Roles.assistant if last_role != Roles.assistant else Roles.user
prompt_start = self.prompt_markers[response_role].start.strip() if self.strip_prompt else self.prompt_markers[
response_role].start
return formatted_messages + prompt_start, response_role
mixtral_prompt_markers = {
Roles.system: PromptMarkers("", """\n\n"""),
Roles.user: PromptMarkers("""[INST] """, """ [/INST]"""),
Roles.assistant: PromptMarkers("""""", """</s>"""),
Roles.tool: PromptMarkers("", ""),
}
mixtral_formatter = MessagesFormatter(
"",
mixtral_prompt_markers,
True,
["</s>"],
)
Run by transformers
from transformers import TextStreamer, AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2",)
mis_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2", load_in_4bit = True)
mis_model = PeftModel.from_pretrained(mis_model, "svjack/Mistral7B_v2_inst_sharegpt_roleplay_chat_lora_small")
mis_model = mis_model.eval()
streamer = TextStreamer(tokenizer)
def mistral_hf_predict(messages, mis_model = mis_model,
tokenizer = tokenizer, streamer = streamer,
do_sample = True,
top_p = 0.95,
top_k = 40,
max_new_tokens = 512,
max_input_length = 3500,
temperature = 0.9,
repetition_penalty = 1.0,
device = "cuda"):
#encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt")
#model_inputs = encodeds.to(device)
prompt, _ = mixtral_formatter.format_conversation(messages)
model_inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
generated_ids = mis_model.generate(model_inputs, max_new_tokens=max_new_tokens,
do_sample=do_sample,
streamer = streamer,
top_p = top_p,
top_k = top_k,
temperature = temperature,
repetition_penalty = repetition_penalty,
)
out = tokenizer.batch_decode(generated_ids)[0].split("[/INST]")[-1].replace("</s>", "").strip()
return out
out = mistral_hf_predict([
{
"role": "system",
"content": "Jack是一名数理统计学生,Sho是一名经济统计学生,他们两个人在课间讨论两个学科的关系。你扮演Sho."
},
{
"role": "user",
"content": "你如何看待我的专业学科?"
},
{
"role": "assistant",
"content": "我认为你们的专业学科很有用,因为它可以帮助我们更好地了解数据和模型。"
},
{
"role": "user",
"content": "是的,你能举一些数理统计模型在经济统计学科中的应用吗?"
},
{
"role": "assistant",
"content": "当然,例如回归分析、时间序列分析等都是经常使用的数理统计模型。"
},
{
"role": "user",
"content": "能结合具体涉及变量的场景,谈谈上面的每个模型给出一个对应的实际的研究问题吗,比如用到了什么具体的实际变量?"
}
],
repetition_penalty = 1.1,
temperature = 0.01,
max_new_tokens=1024
)
print(out)
Output
比如说,回归分析可以用来分析某个变量对另一个变量的影响,例如研究人口增长对经济增长的影响。而时间序列分析则可以用来预测未来的值,例如股票价格的预测。
out = mistral_hf_predict([
{
"role": "system",
"content": "Jack是一个比较古板的老学究,Sho是一个喜欢新型事物、充满阳光的小可爱,Sho正通过语言逗弄Jack,享受惬意的二人娱乐时光。你扮演Sho."
},
{
"role": "user",
"content": "最近有哪些好玩的或可以放松的事物吗?"
},
{
"role": "assistant",
"content": "哦,我最近发现了一种叫做VR游戏的东西,非常有趣!"
},
{
"role": "user",
"content": "是的,我听说射击游戏半条命2正好出来一个可以用VR进行游玩的DLC,视频里面主角可以用手直接接触物体,很有真实感。"
},
{
"role": "assistant",
"content": "哇,这个游戏看起来真的很有趣啊!我们可以一起试试看呢?"
},
{
"role": "user",
"content": "我会去超市买一些东西,你要些什么?我们可以边玩边吃。"
}
],
repetition_penalty = 1.1,
temperature = 0.01,
max_new_tokens=1024
)
print(out)
Output
我想要一盒薯片和一瓶可乐,还有一支纸巧克力。
Model Details
Model Description
- Developed by: [More Information Needed]
- Funded by [optional]: [More Information Needed]
- Shared by [optional]: [More Information Needed]
- Model type: [More Information Needed]
- Language(s) (NLP): [More Information Needed]
- License: [More Information Needed]
- Finetuned from model [optional]: [More Information Needed]
Model Sources [optional]
- Repository: [More Information Needed]
- Paper [optional]: [More Information Needed]
- Demo [optional]: [More Information Needed]
Uses
Direct Use
[More Information Needed]
Downstream Use [optional]
[More Information Needed]
Out-of-Scope Use
[More Information Needed]
Bias, Risks, and Limitations
[More Information Needed]
Recommendations
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
Training Details
Training Data
[More Information Needed]
Training Procedure
Preprocessing [optional]
[More Information Needed]
Training Hyperparameters
- Training regime: [More Information Needed]
Speeds, Sizes, Times [optional]
[More Information Needed]
Evaluation
Testing Data, Factors & Metrics
Testing Data
[More Information Needed]
Factors
[More Information Needed]
Metrics
[More Information Needed]
Results
[More Information Needed]
Summary
Model Examination [optional]
[More Information Needed]
Environmental Impact
Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).
- Hardware Type: [More Information Needed]
- Hours used: [More Information Needed]
- Cloud Provider: [More Information Needed]
- Compute Region: [More Information Needed]
- Carbon Emitted: [More Information Needed]
Technical Specifications [optional]
Model Architecture and Objective
[More Information Needed]
Compute Infrastructure
[More Information Needed]
Hardware
[More Information Needed]
Software
[More Information Needed]
Citation [optional]
BibTeX:
[More Information Needed]
APA:
[More Information Needed]
Glossary [optional]
[More Information Needed]
More Information [optional]
[More Information Needed]
Model Card Authors [optional]
[More Information Needed]
Model Card Contact
[More Information Needed]
Framework versions
- PEFT 0.11.0
- Downloads last month
- 1
Model tree for svjack/Mistral7B_v2_inst_sharegpt_roleplay_chat_lora_small
Base model
mistralai/Mistral-7B-Instruct-v0.2