Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,108 +1,98 @@
|
|
1 |
import os
|
2 |
-
import csv
|
3 |
import streamlit as st
|
|
|
4 |
from langchain_community.vectorstores import FAISS
|
5 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
6 |
from langchain_huggingface import HuggingFaceEndpoint
|
7 |
from langchain.prompts import PromptTemplate
|
8 |
-
from langchain.chains import LLMChain
|
9 |
from huggingface_hub import login
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
#
|
12 |
login(token=st.secrets["HF_TOKEN"])
|
13 |
|
14 |
-
# Load FAISS index
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
allow_dangerous_deserialization=True
|
20 |
-
)
|
21 |
-
|
22 |
-
# Use session state for retriever
|
23 |
-
retriever = st.session_state.db.as_retriever(
|
24 |
-
search_type="mmr",
|
25 |
-
search_kwargs={'k': 1}
|
26 |
-
)
|
27 |
|
28 |
-
#
|
29 |
prompt_template = """
|
30 |
### [INST]
|
31 |
-
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH
|
32 |
-
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello your only answer will be Hi! comment puis-je vous aider?
|
33 |
-
Answer in
|
34 |
|
35 |
{context}
|
36 |
Vous devez répondre aux questions en français.
|
|
|
37 |
### QUESTION:
|
38 |
{question}
|
39 |
[/INST]
|
40 |
-
Answer in
|
41 |
Vous devez répondre aux questions en français.
|
42 |
"""
|
43 |
|
|
|
44 |
repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
repo_id=repo_id,
|
50 |
-
max_length=2048,
|
51 |
-
temperature=0.05,
|
52 |
-
huggingfacehub_api_token=st.secrets["HF_TOKEN"]
|
53 |
-
)
|
54 |
|
55 |
-
# Create prompt
|
56 |
prompt = PromptTemplate(
|
57 |
input_variables=["question"],
|
58 |
template=prompt_template,
|
59 |
)
|
60 |
-
llm_chain = LLMChain(llm=st.session_state.mistral_llm, prompt=prompt)
|
61 |
|
62 |
-
# Create
|
|
|
|
|
|
|
|
|
63 |
qa = RetrievalQA.from_chain_type(
|
64 |
-
llm=
|
65 |
chain_type="stuff",
|
66 |
retriever=retriever,
|
67 |
chain_type_kwargs={"prompt": prompt},
|
68 |
)
|
69 |
|
70 |
-
|
71 |
-
import streamlit as st
|
72 |
-
from datetime import datetime
|
73 |
-
import os
|
74 |
-
|
75 |
-
# PostgreSQL connection setup using secrets from Hugging Face Spaces
|
76 |
-
def create_connection():
|
77 |
-
conn = psycopg2.connect(
|
78 |
-
host=os.getenv("DB_HOST"),
|
79 |
-
database=os.getenv("DB_NAME"),
|
80 |
-
user=os.getenv("DB_USER"),
|
81 |
-
password=os.getenv("DB_PASSWORD"),
|
82 |
-
port=os.getenv("DB_PORT")
|
83 |
-
)
|
84 |
-
return conn
|
85 |
-
|
86 |
-
def create_table(conn):
|
87 |
-
with conn.cursor() as cur:
|
88 |
-
cur.execute('''
|
89 |
-
CREATE TABLE IF NOT EXISTS feedback (
|
90 |
-
id SERIAL PRIMARY KEY,
|
91 |
-
timestamp TIMESTAMP NOT NULL,
|
92 |
-
rating INTEGER NOT NULL,
|
93 |
-
comment TEXT NOT NULL
|
94 |
-
);
|
95 |
-
''')
|
96 |
-
conn.commit()
|
97 |
-
|
98 |
-
# Streamlit interface with improved aesthetics
|
99 |
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖")
|
100 |
|
101 |
-
#
|
102 |
def chatbot_response(user_input):
|
103 |
response = qa.run(user_input)
|
104 |
return response
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
# Create columns for logos
|
107 |
col1, col2, col3 = st.columns([2, 3, 2])
|
108 |
|
@@ -112,40 +102,13 @@ with col1:
|
|
112 |
with col3:
|
113 |
st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True)
|
114 |
|
115 |
-
#
|
116 |
st.markdown("""
|
117 |
<style>
|
118 |
-
.centered-text {
|
119 |
-
|
120 |
-
}
|
121 |
-
.centered-orange-text {
|
122 |
-
text-align: center;
|
123 |
-
color: darkorange;
|
124 |
-
}
|
125 |
-
.star-rating {
|
126 |
-
display: flex;
|
127 |
-
flex-direction: row-reverse;
|
128 |
-
justify-content: center;
|
129 |
-
cursor: pointer;
|
130 |
-
}
|
131 |
-
.star-rating input[type="radio"] {
|
132 |
-
display: none;
|
133 |
-
}
|
134 |
-
.star-rating label {
|
135 |
-
font-size: 2em;
|
136 |
-
color: #ddd;
|
137 |
-
padding: 0 5px;
|
138 |
-
transition: color 0.3s;
|
139 |
-
}
|
140 |
-
.star-rating input[type="radio"]:checked ~ label {
|
141 |
-
color: gold;
|
142 |
-
}
|
143 |
-
.star-rating input[type="radio"]:hover ~ label {
|
144 |
-
color: gold;
|
145 |
-
}
|
146 |
</style>
|
147 |
-
|
148 |
-
|
149 |
st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True)
|
150 |
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True)
|
151 |
|
@@ -153,54 +116,32 @@ st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Mé
|
|
153 |
user_input = st.text_input("You:", "")
|
154 |
submit_button = st.button("Ask 📨")
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
<input type="radio" id="1-star" name="rating" value="1"><label for="1-star">★</label>
|
174 |
-
</div>
|
175 |
-
"""
|
176 |
-
st.markdown(rating_html, unsafe_allow_html=True)
|
177 |
-
|
178 |
-
# Get the selected rating via JavaScript
|
179 |
-
rating = st.text_input("Selected Rating:", value="3", key="rating_input", label_visibility="hidden")
|
180 |
-
|
181 |
-
comment = st.text_area("Your Comment:")
|
182 |
-
|
183 |
-
# Submit feedback
|
184 |
-
feedback_button = st.button("Submit Feedback")
|
185 |
-
|
186 |
-
if feedback_button:
|
187 |
-
if comment.strip() == "":
|
188 |
-
st.warning("⚠ Please provide a comment.")
|
189 |
-
else:
|
190 |
-
st.success("Thank you for your feedback!")
|
191 |
-
|
192 |
-
# Store feedback in PostgreSQL
|
193 |
-
conn = create_connection()
|
194 |
-
with conn.cursor() as cur:
|
195 |
-
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
196 |
-
cur.execute('INSERT INTO feedback (timestamp, rating, comment) VALUES (%s, %s, %s)',
|
197 |
-
(timestamp, int(rating), comment))
|
198 |
-
conn.commit()
|
199 |
-
conn.close()
|
200 |
-
|
201 |
-
else:
|
202 |
-
st.warning("⚠ Please enter a message.")
|
203 |
|
204 |
# Motivational quote at the bottom
|
205 |
st.markdown("---")
|
206 |
-
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
|
|
2 |
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
from langchain_community.vectorstores import FAISS
|
5 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
6 |
from langchain_huggingface import HuggingFaceEndpoint
|
7 |
from langchain.prompts import PromptTemplate
|
8 |
+
from langchain.chains import LLMChain
|
9 |
from huggingface_hub import login
|
10 |
+
from langchain_community.document_loaders import TextLoader
|
11 |
+
from langchain_text_splitters import CharacterTextSplitter
|
12 |
+
from langchain_community.document_loaders import PyPDFLoader
|
13 |
+
from langchain.chains import RetrievalQA
|
14 |
|
15 |
+
# Authenticate with Hugging Face
|
16 |
login(token=st.secrets["HF_TOKEN"])
|
17 |
|
18 |
+
# Load FAISS index
|
19 |
+
db = FAISS.load_local("faiss_index", HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'), allow_dangerous_deserialization=True)
|
20 |
+
|
21 |
+
# Set up retriever
|
22 |
+
retriever = db.as_retriever(search_type="mmr", search_kwargs={'k': 1})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
+
# Prompt template for the LLM
|
25 |
prompt_template = """
|
26 |
### [INST]
|
27 |
+
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge. You answer in FRENCH.
|
28 |
+
Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour or Hello, your only answer will be Hi! comment puis-je vous aider?
|
29 |
+
Answer in French only.
|
30 |
|
31 |
{context}
|
32 |
Vous devez répondre aux questions en français.
|
33 |
+
|
34 |
### QUESTION:
|
35 |
{question}
|
36 |
[/INST]
|
37 |
+
Answer in French only.
|
38 |
Vous devez répondre aux questions en français.
|
39 |
"""
|
40 |
|
41 |
+
# Set up the LLM from Hugging Face
|
42 |
repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
43 |
|
44 |
+
mistral_llm = HuggingFaceEndpoint(
|
45 |
+
repo_id=repo_id, max_length=2048, temperature=0.05, huggingfacehub_api_token=st.secrets["HF_TOKEN"]
|
46 |
+
)
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
+
# Create prompt from prompt template
|
49 |
prompt = PromptTemplate(
|
50 |
input_variables=["question"],
|
51 |
template=prompt_template,
|
52 |
)
|
|
|
53 |
|
54 |
+
# Create LLM chain
|
55 |
+
llm_chain = LLMChain(llm=mistral_llm, prompt=prompt)
|
56 |
+
|
57 |
+
# Set up RetrievalQA chain
|
58 |
+
retriever.search_kwargs = {'k': 1}
|
59 |
qa = RetrievalQA.from_chain_type(
|
60 |
+
llm=mistral_llm,
|
61 |
chain_type="stuff",
|
62 |
retriever=retriever,
|
63 |
chain_type_kwargs={"prompt": prompt},
|
64 |
)
|
65 |
|
66 |
+
# Streamlit interface setup
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
st.set_page_config(page_title="Alter-IA Chat", page_icon="🤖")
|
68 |
|
69 |
+
# Function to handle user input and display chatbot response
|
70 |
def chatbot_response(user_input):
|
71 |
response = qa.run(user_input)
|
72 |
return response
|
73 |
|
74 |
+
# Function to save user feedback
|
75 |
+
def save_feedback(user_input, bot_response, rating, comment):
|
76 |
+
feedback = {
|
77 |
+
"user_input": user_input,
|
78 |
+
"bot_response": bot_response,
|
79 |
+
"rating": rating,
|
80 |
+
"comment": comment
|
81 |
+
}
|
82 |
+
|
83 |
+
# Check if the feedback file exists
|
84 |
+
feedback_file = "feedback.csv"
|
85 |
+
if os.path.exists(feedback_file):
|
86 |
+
# Load existing feedback and append new feedback
|
87 |
+
feedback_df = pd.read_csv(feedback_file)
|
88 |
+
feedback_df = feedback_df.append(feedback, ignore_index=True)
|
89 |
+
else:
|
90 |
+
# Create a new dataframe for the feedback
|
91 |
+
feedback_df = pd.DataFrame([feedback])
|
92 |
+
|
93 |
+
# Save feedback to CSV
|
94 |
+
feedback_df.to_csv(feedback_file, index=False)
|
95 |
+
|
96 |
# Create columns for logos
|
97 |
col1, col2, col3 = st.columns([2, 3, 2])
|
98 |
|
|
|
102 |
with col3:
|
103 |
st.image("Altereo logo 2023 original - eau et territoires durables.png", width=150, use_column_width=True)
|
104 |
|
105 |
+
# Adding centered header and subtitle
|
106 |
st.markdown("""
|
107 |
<style>
|
108 |
+
.centered-text { text-align: center; }
|
109 |
+
.centered-orange-text { text-align: center; color: darkorange; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
</style>
|
111 |
+
""", unsafe_allow_html=True)
|
|
|
112 |
st.markdown('<h3 class="centered-text">🤖 AlteriaChat 🤖 </h3>', unsafe_allow_html=True)
|
113 |
st.markdown('<p class="centered-orange-text">"Votre Réponse à Chaque Défi Méthodologique "</p>', unsafe_allow_html=True)
|
114 |
|
|
|
116 |
user_input = st.text_input("You:", "")
|
117 |
submit_button = st.button("Ask 📨")
|
118 |
|
119 |
+
# Handle user input and display response
|
120 |
+
if submit_button and user_input.strip():
|
121 |
+
bot_response = chatbot_response(user_input)
|
122 |
+
st.markdown("### Bot:")
|
123 |
+
st.text_area("", value=bot_response, height=300)
|
124 |
+
|
125 |
+
# Star rating system
|
126 |
+
st.markdown("### How would you rate the response?")
|
127 |
+
rating = st.slider("Rate from 1 star to 5 stars", min_value=1, max_value=5, value=3)
|
128 |
+
|
129 |
+
# Comment section
|
130 |
+
comment = st.text_area("Any comments or suggestions for improvement?", "")
|
131 |
+
|
132 |
+
# Save feedback when the user submits a rating and comment
|
133 |
+
if st.button("Submit Feedback"):
|
134 |
+
save_feedback(user_input, bot_response, rating, comment)
|
135 |
+
st.success("Thank you for your feedback!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
# Motivational quote at the bottom
|
138 |
st.markdown("---")
|
139 |
+
st.markdown("La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.")
|
140 |
+
|
141 |
+
# Section for the developer to review feedback
|
142 |
+
if st.checkbox("Show Feedback (Developer Only)"):
|
143 |
+
if os.path.exists("feedback.csv"):
|
144 |
+
feedback_df = pd.read_csv("feedback.csv")
|
145 |
+
st.dataframe(feedback_df)
|
146 |
+
else:
|
147 |
+
st.warning("No feedback available yet.")
|