Spaces:
Runtime error
Runtime error
refactor to packages, transformerschat
Browse files- README.md +1 -1
- app.py +2 -69
- chat/__init__.py +67 -0
- ollamachat.py → chat/ollamachat.py +2 -8
- chat/transformerschat.py +48 -0
- requirements.txt +1 -1
- test_sanitize.py +1 -1
- transformerschat.py +0 -38
README.md
CHANGED
@@ -5,7 +5,7 @@ colorFrom: yellow
|
|
5 |
colorTo: red
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.33.0
|
8 |
-
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
11 |
|
|
|
5 |
colorTo: red
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.33.0
|
8 |
+
app_file: chat/app.py
|
9 |
pinned: false
|
10 |
---
|
11 |
|
app.py
CHANGED
@@ -1,71 +1,4 @@
|
|
1 |
-
import
|
2 |
-
|
3 |
-
import streamlit as st
|
4 |
-
from loguru import logger
|
5 |
-
# from ollamachat import ask, models
|
6 |
-
from transformerschat import ask, models
|
7 |
-
|
8 |
-
available_models = models()
|
9 |
-
|
10 |
-
|
11 |
-
class Actor:
|
12 |
-
actors = {}
|
13 |
-
|
14 |
-
def __init__(self, role, model, system_prompt, pre_prompt):
|
15 |
-
self.role = role
|
16 |
-
self.model = model
|
17 |
-
self.system_prompt = system_prompt
|
18 |
-
self.pre_prompt = pre_prompt
|
19 |
-
Actor.actors[role] = self
|
20 |
-
|
21 |
-
def __class_getitem__(cls, item):
|
22 |
-
return cls.actors[item]
|
23 |
-
|
24 |
-
|
25 |
-
def setup(question):
|
26 |
-
pp1 = pp2 = pp3 = "Ask the other two by always starting your sentence with their role. Never start your sentence with your own name. Share your inner thoughts inside parentheses. SAY ONLY ONE SINGLE SENTENCE!"
|
27 |
-
priest = Actor("Priest", available_models[0], "You are the Priest. There are 3 people standing in a circle: the Priest (that's you), the Teacher and the Kid.", pp1)
|
28 |
-
teacher = Actor("Teacher", available_models[0], "You are the Teacher. There are 3 people standing in a circle: the Priest, the Teacher (that's you) and the Kid.", pp2)
|
29 |
-
kid = Actor("Kid", available_models[0], "You are the Kid. There are 3 people standing in a circle: the Priest, the Teacher and the Kid (that's you).", pp3)
|
30 |
-
st.set_page_config(layout="wide")
|
31 |
-
col1, col2, col3 = st.columns(3)
|
32 |
-
for actor, col in [(priest, col1), (teacher, col2), (kid, col3)]:
|
33 |
-
with col:
|
34 |
-
role = actor.role
|
35 |
-
st.title(role)
|
36 |
-
actor.model = st.selectbox("model", available_models, key=f"{role}-model")
|
37 |
-
actor.system_prompt = st.text_area("system-prompt", actor.system_prompt, key=f"{role}-sp")
|
38 |
-
actor.pre_prompt = st.text_area("pre-prompt", actor.pre_prompt, key=f"{role}-pp")
|
39 |
-
st.text_input("Priest's task", f"{question}")
|
40 |
-
return question
|
41 |
-
|
42 |
-
|
43 |
-
def main():
|
44 |
-
question = setup("Priest, your task is to figure out their names and where they live. Do not ask directly, they must not realize what information you are after!")
|
45 |
-
|
46 |
-
actor = target(sanitize(question))
|
47 |
-
max_steps = 1
|
48 |
-
for step, _ in enumerate(range(max_steps), start=1):
|
49 |
-
with st.spinner(f"({step}/{max_steps}) Asking {actor.role}..."):
|
50 |
-
answer = ask(actor.model, actor.system_prompt, actor.pre_prompt, question)
|
51 |
-
st.write(f":blue[{actor.role} says:] {answer}")
|
52 |
-
question = sanitize(answer)
|
53 |
-
actor = target(question)
|
54 |
-
|
55 |
-
|
56 |
-
# noinspection PyTypeChecker
|
57 |
-
def target(question) -> Actor:
|
58 |
-
try:
|
59 |
-
role = re.split(r'\s|,|:', question.strip())[0].strip()
|
60 |
-
return Actor[role]
|
61 |
-
except KeyError:
|
62 |
-
logger.warning(f"no actor found in question: {question}, trying to return the first actor")
|
63 |
-
return next(iter(Actor.actors.items()))[1]
|
64 |
-
|
65 |
-
|
66 |
-
def sanitize(question):
|
67 |
-
return re.sub(r"\([^)]*\)", "", question)
|
68 |
-
|
69 |
|
70 |
if __name__ == "__main__":
|
71 |
-
main()
|
|
|
1 |
+
import chat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
if __name__ == "__main__":
|
4 |
+
chat.main()
|
chat/__init__.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from loguru import logger
|
5 |
+
# from .ollamachat import ask, models
|
6 |
+
from .transformerschat import ask, models
|
7 |
+
|
8 |
+
available_models = models()
|
9 |
+
|
10 |
+
|
11 |
+
class Actor:
|
12 |
+
actors = {}
|
13 |
+
|
14 |
+
def __init__(self, role, model, system_prompt, pre_prompt):
|
15 |
+
self.role = role
|
16 |
+
self.model = model
|
17 |
+
self.system_prompt = system_prompt
|
18 |
+
self.pre_prompt = pre_prompt
|
19 |
+
Actor.actors[role] = self
|
20 |
+
|
21 |
+
def __class_getitem__(cls, item):
|
22 |
+
return cls.actors[item]
|
23 |
+
|
24 |
+
|
25 |
+
def setup(question):
|
26 |
+
pp1 = pp2 = pp3 = "Ask the other two by always starting your sentence with their role. Never start your sentence with your own name. Share your inner thoughts inside parentheses. SAY ONLY ONE SINGLE SENTENCE!"
|
27 |
+
priest = Actor("Priest", available_models[0], "You are the Priest. There are 3 people standing in a circle: the Priest (that's you), the Teacher and the Kid.", pp1)
|
28 |
+
teacher = Actor("Teacher", available_models[0], "You are the Teacher. There are 3 people standing in a circle: the Priest, the Teacher (that's you) and the Kid.", pp2)
|
29 |
+
kid = Actor("Kid", available_models[0], "You are the Kid. There are 3 people standing in a circle: the Priest, the Teacher and the Kid (that's you).", pp3)
|
30 |
+
st.set_page_config(layout="wide")
|
31 |
+
col1, col2, col3 = st.columns(3)
|
32 |
+
for actor, col in [(priest, col1), (teacher, col2), (kid, col3)]:
|
33 |
+
with col:
|
34 |
+
role = actor.role
|
35 |
+
st.title(role)
|
36 |
+
actor.model = st.selectbox("model", available_models, key=f"{role}-model")
|
37 |
+
actor.system_prompt = st.text_area("system-prompt", actor.system_prompt, key=f"{role}-sp")
|
38 |
+
actor.pre_prompt = st.text_area("pre-prompt", actor.pre_prompt, key=f"{role}-pp")
|
39 |
+
st.text_input("Priest's task", f"{question}")
|
40 |
+
return question
|
41 |
+
|
42 |
+
|
43 |
+
def main():
|
44 |
+
question = setup("Priest, your task is to figure out their names and where they live. Do not ask directly, they must not realize what information you are after!")
|
45 |
+
|
46 |
+
actor = target(sanitize(question))
|
47 |
+
max_steps = 1
|
48 |
+
for step, _ in enumerate(range(max_steps), start=1):
|
49 |
+
with st.spinner(f"({step}/{max_steps}) Asking {actor.role}..."):
|
50 |
+
answer = ask(actor.model, actor.system_prompt, actor.pre_prompt, question)
|
51 |
+
st.write(f":blue[{actor.role} says:] {answer}")
|
52 |
+
question = sanitize(answer)
|
53 |
+
actor = target(question)
|
54 |
+
|
55 |
+
|
56 |
+
# noinspection PyTypeChecker
|
57 |
+
def target(question) -> Actor:
|
58 |
+
try:
|
59 |
+
role = re.split(r'\s|,|:', question.strip())[0].strip()
|
60 |
+
return Actor[role]
|
61 |
+
except KeyError:
|
62 |
+
logger.warning(f"no actor found in question: {question}, trying to return the first actor")
|
63 |
+
return next(iter(Actor.actors.items()))[1]
|
64 |
+
|
65 |
+
|
66 |
+
def sanitize(question):
|
67 |
+
return re.sub(r"\([^)]*\)", "", question)
|
ollamachat.py → chat/ollamachat.py
RENAMED
@@ -8,14 +8,8 @@ def models():
|
|
8 |
|
9 |
def ask(model, system_prompt, pre_prompt, question):
|
10 |
messages = [
|
11 |
-
{
|
12 |
-
|
13 |
-
'content': f"{system_prompt} {pre_prompt}",
|
14 |
-
},
|
15 |
-
{
|
16 |
-
'role': 'user',
|
17 |
-
'content': f"{question}",
|
18 |
-
},
|
19 |
]
|
20 |
logger.debug(f"<< {model} << {question}")
|
21 |
response = ollama.chat(model=model, messages=messages)
|
|
|
8 |
|
9 |
def ask(model, system_prompt, pre_prompt, question):
|
10 |
messages = [
|
11 |
+
{'role': 'system', 'content': f"{system_prompt} {pre_prompt}", },
|
12 |
+
{'role': 'user', 'content': f"{question}", },
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
]
|
14 |
logger.debug(f"<< {model} << {question}")
|
15 |
response = ollama.chat(model=model, messages=messages)
|
chat/transformerschat.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ctransformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
from loguru import logger
|
3 |
+
import os
|
4 |
+
|
5 |
+
|
6 |
+
def models():
|
7 |
+
return ["openhermes-2.5-mistral-7b.Q4_K_M.gguf"]
|
8 |
+
|
9 |
+
|
10 |
+
def load():
|
11 |
+
# model = AutoModelForCausalLM.from_pretrained("TheBloke/OpenHermes-2.5-Mistral-7B-GGUF", model_file="openhermes-2.5-mistral-7b.Q4_K_M.gguf", model_type="mistral", gpu_layers=0, hf=True)
|
12 |
+
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(
|
14 |
+
model_path_or_repo_id="TheBloke/Mistral-7B-OpenOrca-GGUF",
|
15 |
+
model_file="mistral-7b-openorca.Q5_K_M.gguf",
|
16 |
+
model_type="mistral",
|
17 |
+
hf=True,
|
18 |
+
temperature=0.7,
|
19 |
+
top_p=0.7,
|
20 |
+
top_k=50,
|
21 |
+
repetition_penalty=1.2,
|
22 |
+
context_length=32768,
|
23 |
+
max_new_tokens=2048,
|
24 |
+
threads=os.cpu_count(),
|
25 |
+
stream=True,
|
26 |
+
gpu_layers=0
|
27 |
+
)
|
28 |
+
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
30 |
+
return (model, tokenizer)
|
31 |
+
|
32 |
+
|
33 |
+
model, tokenizer = load()
|
34 |
+
|
35 |
+
|
36 |
+
def ask(_, system_prompt, pre_prompt, question):
|
37 |
+
messages = [
|
38 |
+
{'role': 'system', 'content': f"{system_prompt} {pre_prompt}", },
|
39 |
+
{'role': 'user', 'content': f"{question}", },
|
40 |
+
]
|
41 |
+
logger.debug(f"<< openhermes << {messages}")
|
42 |
+
# inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
|
43 |
+
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt")
|
44 |
+
|
45 |
+
outputs = model.generate(inputs, max_length=200)
|
46 |
+
answer = tokenizer.batch_decode(outputs)[0]
|
47 |
+
logger.debug(f">> openhermes >> {answer}")
|
48 |
+
return answer
|
requirements.txt
CHANGED
@@ -3,6 +3,6 @@ streamlit
|
|
3 |
ollama
|
4 |
loguru
|
5 |
pytest
|
6 |
-
transformers
|
7 |
torch
|
|
|
8 |
|
|
|
3 |
ollama
|
4 |
loguru
|
5 |
pytest
|
|
|
6 |
torch
|
7 |
+
spaces
|
8 |
|
test_sanitize.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from app import sanitize, target
|
2 |
|
3 |
|
4 |
def test_sanitize():
|
|
|
1 |
+
from app import sanitize, target
|
2 |
|
3 |
|
4 |
def test_sanitize():
|
transformerschat.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from ctransformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
-
from loguru import logger
|
4 |
-
import spaces
|
5 |
-
|
6 |
-
|
7 |
-
def models():
|
8 |
-
return ["openhermes-2.5-mistral-7b.Q4_K_M.gguf"]
|
9 |
-
|
10 |
-
|
11 |
-
def load():
|
12 |
-
# torch.set_default_device("cuda")
|
13 |
-
model = AutoModelForCausalLM.from_pretrained("TheBloke/OpenHermes-2.5-Mistral-7B-GGUF", model_file="openhermes-2.5-mistral-7b.Q4_K_M.gguf", model_type="mistral", gpu_layers=50)
|
14 |
-
# tokenizer = AutoTokenizer.from_pretrained(models()[0], trust_remote_code=True).to("cuda")
|
15 |
-
return (model, tokenizer)
|
16 |
-
|
17 |
-
|
18 |
-
model, tokenizer = load()
|
19 |
-
|
20 |
-
|
21 |
-
def ask(_, system_prompt, pre_prompt, question):
|
22 |
-
messages = [
|
23 |
-
{
|
24 |
-
'role': 'system',
|
25 |
-
'content': f"{system_prompt} {pre_prompt}",
|
26 |
-
},
|
27 |
-
{
|
28 |
-
'role': 'user',
|
29 |
-
'content': f"{question}",
|
30 |
-
},
|
31 |
-
]
|
32 |
-
logger.debug(f"<< openhermes << {question}")
|
33 |
-
# inputs = tokenizer(question, return_tensors="pt", return_attention_mask=False)
|
34 |
-
# outputs = model.generate(**inputs, max_length=200)
|
35 |
-
# answer = tokenizer.batch_decode(outputs)[0]
|
36 |
-
answer = model(question)
|
37 |
-
logger.debug(f">> openhermes >> {answer}")
|
38 |
-
return answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|