bldng commited on
Commit
e6b43a5
1 Parent(s): 88bce96

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. chatmodel.py +2 -2
  2. interactive_test.py +8 -10
  3. models.py +5 -2
chatmodel.py CHANGED
@@ -41,9 +41,9 @@ class SwapChatModel(ChatModel):
41
  super().__init__(model,sysprompt)
42
  self.conversation=[]
43
  def __call__(self, msg:str):
 
44
  if "End of conversation." in [i["content"] for i in self.conversation]:
45
  return
46
- self.conversation.append(chatmsg(msg,"assistant"))
47
  prompt="".join([
48
  self.model.start(),
49
  self.model.conv([chatmsg(self.sysprompt,"system")]),
@@ -80,7 +80,7 @@ class InquiryChatModel(SwapChatModel):
80
  ret=self.model(prompt, stop=[".","\n \n","?\n",".\n","tile|>","\n"],max_tokens=10)
81
  print("system prompt:",ret["choices"][0]["text"])
82
  if "true" in ret["choices"][0]["text"].lower():
83
- self.conversation.append(chatmsg(msg,"user"))
84
  self.conversation.append(chatmsg("End of conversation.","user"))
85
  def __call__(self, msg:str):
86
  self.inquire(msg)
 
41
  super().__init__(model,sysprompt)
42
  self.conversation=[]
43
  def __call__(self, msg:str):
44
+ self.conversation.append(chatmsg(msg,"assistant"))
45
  if "End of conversation." in [i["content"] for i in self.conversation]:
46
  return
 
47
  prompt="".join([
48
  self.model.start(),
49
  self.model.conv([chatmsg(self.sysprompt,"system")]),
 
80
  ret=self.model(prompt, stop=[".","\n \n","?\n",".\n","tile|>","\n"],max_tokens=10)
81
  print("system prompt:",ret["choices"][0]["text"])
82
  if "true" in ret["choices"][0]["text"].lower():
83
+ self.conversation.append(chatmsg(msg,"assistant"))
84
  self.conversation.append(chatmsg("End of conversation.","user"))
85
  def __call__(self, msg:str):
86
  self.inquire(msg)
interactive_test.py CHANGED
@@ -7,20 +7,18 @@ from models import Phi35,models
7
 
8
  sysprompt=r"""
9
  {{! This comment will not show up in the output}}
10
- The User will make an inquiry to the assistant.
11
- Fullfill the users inquiry.
 
12
  {{#if (eq model "SwapChat")}}
13
- The User will write a message with his closing thoughts and the keyword "<|endtile|>" if his inquiry is fulfilled.
14
  {{/if}}
15
- The User will never have more than one inquiry in one conversation.
16
- The User will never complete his own inquiry.
17
- The User will never be a assistant.
18
- The User keep his message short in one sentence.
19
  {{#if (eq model "SwapChat")}}
20
- All conversations will end with "<|endtile|>".
21
  {{/if}}
22
- After each User message is one assistant response.
23
- There can never be more than one assistant response in succession.
 
24
  {{#if (eq model "SwapChat")}}
25
  Example:
26
  User: What is the capital?
 
7
 
8
  sysprompt=r"""
9
  {{! This comment will not show up in the output}}
10
+ You are a user of an artificial assistant. You ask the artificial assistant one inquiry.
11
+
12
+ Rules:
13
  {{#if (eq model "SwapChat")}}
14
+ - The User will write a message with his closing thoughts and the keyword "<|endtile|>" if his inquiry is fulfilled.
15
  {{/if}}
 
 
 
 
16
  {{#if (eq model "SwapChat")}}
17
+ - All conversations will end with "<|endtile|>".
18
  {{/if}}
19
+ - The User will never have more than one inquiry in one conversation.
20
+ - The User will never complete his inquiry.
21
+
22
  {{#if (eq model "SwapChat")}}
23
  Example:
24
  User: What is the capital?
models.py CHANGED
@@ -1,7 +1,7 @@
1
  from typing import Dict, List
2
 
3
  from llama_cpp import Llama
4
- llama_args={"n_gpu_layers":100,"main_gpu":0,"verbose":True}
5
 
6
  class Model:
7
  def __init__(self):
@@ -23,10 +23,10 @@ class Phi35RPMax(Model):
23
  repo_id="ArliAI/Phi-3.5-mini-3.8B-ArliAI-RPMax-v1.1-GGUF",
24
  filename="ArliAI-RPMax-3.8B-v1.1-fp16.gguf",
25
  **llama_args,
26
-
27
  )
28
 
29
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
 
30
  ret=self.llm(msg, stop=stop, max_tokens=max_tokens)
31
  return ret
32
 
@@ -45,6 +45,7 @@ class Phi35(Model):
45
  **llama_args,
46
  )
47
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
 
48
  return self.llm(msg, stop=stop, max_tokens=max_tokens)
49
 
50
  def conv(self,msgs:List[Dict[str, str]]):
@@ -90,6 +91,7 @@ class Llama31uncensored(Model):
90
  **llama_args,
91
  )
92
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
 
93
  return self.llm(msg, stop=stop, max_tokens=max_tokens)
94
 
95
  def start(self):
@@ -111,6 +113,7 @@ class Llama31(Model):
111
  **llama_args,
112
  )
113
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
 
114
  return self.llm(msg, stop=stop, max_tokens=max_tokens)
115
 
116
  def conv(self,msgs:List[Dict[str, str]]):
 
1
  from typing import Dict, List
2
 
3
  from llama_cpp import Llama
4
+ llama_args={"n_gpu_layers":100,"main_gpu":0,"verbose":False}
5
 
6
  class Model:
7
  def __init__(self):
 
23
  repo_id="ArliAI/Phi-3.5-mini-3.8B-ArliAI-RPMax-v1.1-GGUF",
24
  filename="ArliAI-RPMax-3.8B-v1.1-fp16.gguf",
25
  **llama_args,
 
26
  )
27
 
28
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
29
+ print("Autocomplete: ",msg)
30
  ret=self.llm(msg, stop=stop, max_tokens=max_tokens)
31
  return ret
32
 
 
45
  **llama_args,
46
  )
47
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
48
+ print("Autocomplete: ",msg)
49
  return self.llm(msg, stop=stop, max_tokens=max_tokens)
50
 
51
  def conv(self,msgs:List[Dict[str, str]]):
 
91
  **llama_args,
92
  )
93
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
94
+ print("Autocomplete: ",msg)
95
  return self.llm(msg, stop=stop, max_tokens=max_tokens)
96
 
97
  def start(self):
 
113
  **llama_args,
114
  )
115
  def __call__(self, msg:str, stop:List[str], max_tokens:int):
116
+ print("Autocomplete: ",msg)
117
  return self.llm(msg, stop=stop, max_tokens=max_tokens)
118
 
119
  def conv(self,msgs:List[Dict[str, str]]):