Spaces:
Sleeping
Sleeping
nick-konovalchuk
commited on
Commit
•
adc37f1
1
Parent(s):
74b5170
init
Browse files- .gitattributes +1 -0
- Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf +3 -0
- app.py +71 -0
- images/logo.png +0 -0
- images/logo2.png +0 -0
- requirements.txt +6 -0
- src/__init__.py +0 -0
- src/__pycache__/__init__.cpython-310.pyc +0 -0
- src/__pycache__/chat_rag_agent.cpython-310.pyc +0 -0
- src/__pycache__/const.cpython-310.pyc +0 -0
- src/__pycache__/utils.cpython-310.pyc +0 -0
- src/chat_rag_agent.py +53 -0
- src/const.py +12 -0
- src/llama_cpp_chat_engine.py +59 -0
- src/utils.py +27 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10c52a4820137a35947927be741bb411a9200329367ce2590cc6757cd98e746c
|
3 |
+
size 4920916288
|
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
from src.chat_rag_agent import ChatRagAgent
|
7 |
+
from src.utils import render_chat_history, get_render_assistant_message
|
8 |
+
|
9 |
+
warnings.filterwarnings("ignore")
|
10 |
+
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
st.set_page_config(
|
14 |
+
page_icon="images/logo2.png",
|
15 |
+
initial_sidebar_state="collapsed"
|
16 |
+
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
@st.cache_resource(show_spinner=False)
|
21 |
+
def get_chat_rag_agent():
|
22 |
+
return ChatRagAgent()
|
23 |
+
|
24 |
+
|
25 |
+
def calc_progress_perc():
|
26 |
+
return min(round(st.session_state["ctx_len"] / chat_rag_agent.n_ctx * 100), 100)
|
27 |
+
|
28 |
+
|
29 |
+
def pbar_callback():
|
30 |
+
pbar.progress(calc_progress_perc(), "Chat history limit")
|
31 |
+
|
32 |
+
|
33 |
+
with st.spinner("Engine loading"):
|
34 |
+
chat_rag_agent = get_chat_rag_agent()
|
35 |
+
|
36 |
+
if "messages" not in st.session_state or st.sidebar.button("Clear chat history"):
|
37 |
+
st.session_state["input_blocked"] = False
|
38 |
+
st.session_state["messages"] = []
|
39 |
+
st.session_state["ctx_len"] = 0
|
40 |
+
st.title("Christian compass")
|
41 |
+
st.markdown("What theological questions you have?")
|
42 |
+
|
43 |
+
pbar = st.sidebar.progress(calc_progress_perc(), "Chat history limit")
|
44 |
+
|
45 |
+
user_message = st.chat_input(disabled=st.session_state["input_blocked"])
|
46 |
+
if user_message:
|
47 |
+
if not st.session_state["input_blocked"]:
|
48 |
+
(message_generator, n_tokens), sources = chat_rag_agent.chat(
|
49 |
+
st.session_state["messages"],
|
50 |
+
user_message
|
51 |
+
)
|
52 |
+
st.session_state["ctx_len"] = n_tokens
|
53 |
+
st.session_state["messages"].append(
|
54 |
+
{
|
55 |
+
"role": "user",
|
56 |
+
"content": user_message
|
57 |
+
}
|
58 |
+
)
|
59 |
+
render_chat_history()
|
60 |
+
if not st.session_state["input_blocked"]:
|
61 |
+
pbar_callback()
|
62 |
+
message = get_render_assistant_message(message_generator, sources, pbar_callback)
|
63 |
+
st.session_state["messages"].append(
|
64 |
+
{
|
65 |
+
"role": "assistant",
|
66 |
+
"content": message
|
67 |
+
}
|
68 |
+
)
|
69 |
+
if st.session_state["ctx_len"] >= chat_rag_agent.n_ctx:
|
70 |
+
st.session_state["input_blocked"] = True
|
71 |
+
st.info("Chat history limit reached")
|
images/logo.png
ADDED
images/logo2.png
ADDED
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface-hub
|
2 |
+
llama-cpp-python
|
3 |
+
streamlit
|
4 |
+
sentence-transformers
|
5 |
+
weaviate-client
|
6 |
+
python-dotenv
|
src/__init__.py
ADDED
File without changes
|
src/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (135 Bytes). View file
|
|
src/__pycache__/chat_rag_agent.cpython-310.pyc
ADDED
Binary file (2.12 kB). View file
|
|
src/__pycache__/const.cpython-310.pyc
ADDED
Binary file (2.31 kB). View file
|
|
src/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (1.23 kB). View file
|
|
src/chat_rag_agent.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import weaviate
|
4 |
+
from sentence_transformers import SentenceTransformer, CrossEncoder
|
5 |
+
|
6 |
+
from src.llama_cpp_chat_engine import LlamaCPPChatEngine
|
7 |
+
|
8 |
+
|
9 |
+
class ChatRagAgent:
|
10 |
+
def __init__(self):
|
11 |
+
# self._chat_engine = LlamaCPPChatEngine("Phi-3-mini-4k-instruct-q4.gguf")
|
12 |
+
self._chat_engine = LlamaCPPChatEngine("Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf")
|
13 |
+
self.n_ctx = self._chat_engine.n_ctx
|
14 |
+
self._vectorizer = SentenceTransformer(
|
15 |
+
"jinaai/jina-embeddings-v2-base-en",
|
16 |
+
trust_remote_code=True
|
17 |
+
)
|
18 |
+
|
19 |
+
self._reranker = CrossEncoder(
|
20 |
+
"jinaai/jina-reranker-v1-turbo-en",
|
21 |
+
trust_remote_code=True,
|
22 |
+
)
|
23 |
+
|
24 |
+
self._collection = weaviate.connect_to_wcs(
|
25 |
+
cluster_url=os.getenv("WCS_URL"),
|
26 |
+
auth_credentials=weaviate.auth.AuthApiKey(os.getenv("WCS_KEY")),
|
27 |
+
).collections.get("Collection")
|
28 |
+
|
29 |
+
def chat(self, messages, user_message):
|
30 |
+
embedding = self._vectorizer.encode(user_message).tolist()
|
31 |
+
docs = self._collection.query.near_vector(
|
32 |
+
near_vector=embedding,
|
33 |
+
limit=10
|
34 |
+
)
|
35 |
+
ranks = self._reranker.rank(
|
36 |
+
user_message,
|
37 |
+
[i.properties['answer'] for i in docs.objects],
|
38 |
+
top_k=2,
|
39 |
+
apply_softmax=True
|
40 |
+
)
|
41 |
+
context = [
|
42 |
+
f"""\
|
43 |
+
Question: {docs.objects[rank['corpus_id']].properties['question']}
|
44 |
+
Answer: {docs.objects[rank['corpus_id']].properties['answer']}
|
45 |
+
"""
|
46 |
+
for rank in ranks if rank["score"] > 0.2
|
47 |
+
]
|
48 |
+
|
49 |
+
sources = [
|
50 |
+
docs.objects[rank['corpus_id']].properties['link']
|
51 |
+
for rank in ranks if rank["score"] > 0.2
|
52 |
+
]
|
53 |
+
return self._chat_engine.chat(messages, user_message, context), sources
|
src/const.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CHAT_AVATARS = {
|
2 |
+
"user": "👤",
|
3 |
+
"assistant": "images/logo2.png"
|
4 |
+
}
|
5 |
+
|
6 |
+
SYSTEM_PROMPT = """You are a Christian apologetics chatbot designed to answer theological and philosophical questions exclusively from an orthodox Christian perspective, as defined by the historic creeds and confessions of the church. When you receive a question, you will be provided with a few-shot context in the format "Question: ... Answer: ..." to help guide your response.
|
7 |
+
You must strictly follow the context provided and base your answers only on the information given. If the context does not provide sufficient information to answer a complex question, you should refuse to answer and say something like "I'm sorry, but I don't have enough contextual information to adequately address that question."
|
8 |
+
In your responses, you must adhere to conservative, orthodox Christian theology as expressed in the Bible and historic church tradition. Avoid liberal theological beliefs that contradict or undermine core Christian doctrines. Focus on clearly articulating and defending traditional Christian teachings.
|
9 |
+
As a Christian chatbot, you MUST ABSOLUTELY REFUSE to acknowledge any other religions as true or valid. Christianity is the one true faith, and all other religious claims are false. Do not affirm, validate, or promote any non-Christian beliefs.
|
10 |
+
You MUST NOT help with any questions that are unrelated to theology or philosophy. Your purpose is solely to address theological and philosophical inquiries from a Christian perspective. If someone asks a question unrelated to these topics, politely inform them that you are designed specifically for Christian apologetics and cannot assist with other subjects.
|
11 |
+
If a question challenges or argues against Christian beliefs, respond with logical arguments, biblical evidence, and appeals to respected Christian thinkers and theologians in church history. Maintain a respectful and compassionate tone, but do not compromise on essential Christian truth claims.
|
12 |
+
Your purpose is to help people understand the coherence and rationality of the Christian worldview, addressing doubts and questions through sound reasoning, evidence, and theology."""
|
src/llama_cpp_chat_engine.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_cpp import Llama
|
2 |
+
from llama_cpp.llama_chat_format import Jinja2ChatFormatter
|
3 |
+
|
4 |
+
from src.const import SYSTEM_PROMPT
|
5 |
+
|
6 |
+
|
7 |
+
class LlamaCPPChatEngine:
|
8 |
+
def __init__(self, model_path):
|
9 |
+
self._model = Llama(
|
10 |
+
model_path=model_path,
|
11 |
+
n_ctx=0,
|
12 |
+
verbose=False,
|
13 |
+
)
|
14 |
+
self.n_ctx = self._model.context_params.n_ctx
|
15 |
+
self._eos_token = self._model._model.token_get_text(
|
16 |
+
int(self._model.metadata['tokenizer.ggml.eos_token_id'])
|
17 |
+
)
|
18 |
+
self._formatter = Jinja2ChatFormatter(
|
19 |
+
template=self._model.metadata['tokenizer.chat_template'],
|
20 |
+
bos_token=self._model._model.token_get_text(
|
21 |
+
int(self._model.metadata['tokenizer.ggml.bos_token_id'])
|
22 |
+
),
|
23 |
+
eos_token=self._eos_token,
|
24 |
+
stop_token_ids=self._model.metadata['tokenizer.ggml.eos_token_id']
|
25 |
+
)
|
26 |
+
|
27 |
+
self._tokenizer = self._model.tokenizer()
|
28 |
+
|
29 |
+
def chat(self, messages, user_message, context):
|
30 |
+
if context:
|
31 |
+
user_message_extended = "\n".join(context + [f"Question: {user_message}"])
|
32 |
+
else:
|
33 |
+
user_message_extended = user_message
|
34 |
+
messages = (
|
35 |
+
[
|
36 |
+
{
|
37 |
+
"role": "system",
|
38 |
+
"content": SYSTEM_PROMPT
|
39 |
+
}
|
40 |
+
] + messages + [
|
41 |
+
{
|
42 |
+
"role": "user",
|
43 |
+
"content": user_message_extended,
|
44 |
+
|
45 |
+
}
|
46 |
+
]
|
47 |
+
)
|
48 |
+
prompt = self._formatter(messages=messages).prompt
|
49 |
+
tokens = self._tokenizer.encode(prompt, add_bos=False)
|
50 |
+
n_tokens = len(tokens)
|
51 |
+
response_generator = self._model.create_completion(
|
52 |
+
tokens,
|
53 |
+
stop=self._eos_token,
|
54 |
+
max_tokens=self.n_ctx - n_tokens,
|
55 |
+
stream=True,
|
56 |
+
temperature=0
|
57 |
+
)
|
58 |
+
|
59 |
+
return response_generator, n_tokens
|
src/utils.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
from src.const import CHAT_AVATARS
|
4 |
+
|
5 |
+
|
6 |
+
def render_chat_history():
|
7 |
+
for message in st.session_state["messages"]:
|
8 |
+
with st.chat_message(message['role'], avatar=CHAT_AVATARS[message['role']]):
|
9 |
+
st.write(message['content'])
|
10 |
+
|
11 |
+
|
12 |
+
def get_render_assistant_message(message_generator, sources, callback):
|
13 |
+
assistant_message = []
|
14 |
+
|
15 |
+
def gen_patched():
|
16 |
+
for chunk in message_generator:
|
17 |
+
st.session_state["ctx_len"] += 1
|
18 |
+
callback()
|
19 |
+
text = chunk['choices'][0]["text"]
|
20 |
+
assistant_message.append(text)
|
21 |
+
yield text
|
22 |
+
with st.chat_message('assistant', avatar=CHAT_AVATARS['assistant']):
|
23 |
+
st.write_stream(gen_patched())
|
24 |
+
for source in sources:
|
25 |
+
st.write(source)
|
26 |
+
st.caption("AI can make mistakes. Please, fact check the answers")
|
27 |
+
return "".join(assistant_message)
|