Pecximenes commited on
Commit
7288353
1 Parent(s): 853a071

Adding v2 of rag-agent

Browse files
Dockerfile CHANGED
@@ -11,4 +11,4 @@ RUN pip install --no-cache-dir -r requirements.txt
11
 
12
  EXPOSE 7860
13
 
14
- CMD ["streamlit", "run", "interface/chatbot.py", "--server.port=7860"]
 
11
 
12
  EXPOSE 7860
13
 
14
+ CMD ["streamlit", "run", "interface/app.py", "--server.port=7860"]
agente/interface/app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import sys
3
+ import os
4
+
5
+ # Configuração do caminho para incluir as pastas necessárias
6
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
7
+
8
+ # Importando módulos necessários
9
+ from interface.chatbot import Chatbot
10
+
11
+
12
+ def main():
13
+ app = Chatbot()
14
+
15
+ with st.sidebar:
16
+ app.create_sidebar()
17
+
18
+ app.mount_chatbot()
19
+
20
+ if app.response:
21
+ app.add_to_history(app.response, role="user")
22
+ app.send_message_for_ai(app.response)
23
+
24
+ app.response = "" # Zerando a variável após uso
25
+
26
+ for message in st.session_state.chat_history:
27
+ st.chat_message(message["role"]).write(message["content"])
28
+
29
+
30
+
31
+ if __name__ == "__main__":
32
+ main()
agente/interface/chatbot.py CHANGED
@@ -16,7 +16,6 @@ st.set_page_config(page_title="Carlos AI Agent")
16
 
17
  load_dotenv()
18
 
19
-
20
  @st.cache_resource
21
  def connect_to_services():
22
  oa_client = OpenAI(
@@ -32,119 +31,122 @@ def connect_to_services():
32
 
33
  return oa_client, qdrant_client, edgedb_client
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- def send_message_for_ai(prompt, oa_client, qdrant_client, edgedb_client):
37
- embedding = oa_client.embeddings.create(
38
- input=[prompt],
39
- model=os.environ.get("OPENAI_MODEL_EMBEDDING")
40
- ).data[0].embedding
41
-
42
- child_texts = qdrant_client.search(
43
- collection_name=os.environ.get("COLLECTION_NAME"),
44
- query_vector=embedding,
45
- limit=3
46
- )
47
 
48
- contexts = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- for child_text in child_texts:
51
- parent_text = edgedb_client.query('''
52
- SELECT Pattern {
53
- content,
54
- url,
55
- parent_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  }
57
- FILTER .id = <uuid>$parent_id
58
- ''', parent_id=child_text.payload["parent_id"])[0]
59
- context = {
60
- "content": parent_text.content,
61
- "url": parent_text.url,
62
- "parent_id": parent_text.parent_id
63
- }
64
- contexts.append(context)
65
-
66
- # system_msg.write(
67
- # f"""Contexto: {contexts}""")
68
 
69
- stream_response = send_message(
70
- oa_client,
71
- contexts,
72
- prompt,
73
- []
74
- )
75
-
76
- return stream_response
77
-
78
-
79
- oa_client, qdrant_client, edgedb_client = connect_to_services()
80
-
81
-
82
- def display_chat():
83
- for msg in st.session_state.messages:
84
- st.chat_message(msg["role"]).write(msg["content"])
85
-
86
-
87
- def sidebar_content():
88
- st.image('https://www.gov.br/++theme++padrao_govbr/img/govbr-logo-large.png', width=200)
89
- st.header("Tópicos frequentes")
90
-
91
- # Botões de exemplo
92
- topics = [
93
- "Niveis da conta govbr.",
94
- "Dúvidas no reconhecimento facial.",
95
- "Dúvidas na autenticação dos bancos.",
96
- "Dúvidas para aumentar o nível com a cin."
97
- ]
98
-
99
- for topic in topics:
100
- st.button(topic)
101
-
102
- # Espaços em branco para organização
103
- for _ in range(5):
104
- st.write("")
105
-
106
- # Botão centralizado
107
- col1, col2, col3 = st.columns([1, 1, 1])
108
- with col2:
109
- st.button("VOLTAR")
110
-
111
-
112
- # Função principal de processamento de input do chat
113
- def process_user_input():
114
- if prompt := st.chat_input(placeholder="Digite sua mensagem"):
115
- # Armazenar mensagem do usuário
116
- st.session_state.messages.append({"role": "user", "content": prompt})
117
- st.chat_message("user").write(prompt)
118
-
119
- # Simulação de resposta do assistente
120
- response = send_message_for_ai(
121
- prompt, oa_client, qdrant_client, edgedb_client)
122
-
123
- # Armazenar e exibir resposta do assistente
124
- st.session_state.messages.append({"role": "assistant", "content": response})
125
- st.chat_message("assistant").write(response)
126
-
127
 
128
- #system_msg = st.chat_message("system")
129
-
130
-
131
- # Configuração inicial
132
- if "messages" not in st.session_state:
133
- st.session_state["messages"] = [{"role": "assistant", "content": "Como eu posso ajudar?"}]
134
-
135
- # Exibição da barra lateral
136
- with st.sidebar:
137
- sidebar_content()
 
 
 
 
 
138
 
139
- # Exibição do título e subtítulo
140
- st.title("Bem-vindo à ajuda do gov.br")
141
- st.caption("💬 Lorem ipsum dolor sit amet, consectetur adipiscing elit.")
142
 
143
- # Exibição do chat
144
- display_chat()
145
 
146
- # Processamento da entrada do usuário no chat
147
- process_user_input()
148
 
149
 
 
 
 
 
 
150
 
 
16
 
17
  load_dotenv()
18
 
 
19
  @st.cache_resource
20
  def connect_to_services():
21
  oa_client = OpenAI(
 
31
 
32
  return oa_client, qdrant_client, edgedb_client
33
 
34
+ class Chatbot():
35
+ def __init__(self):
36
+ # Configuração inicial do histórico de chat
37
+ if "chat_history" not in st.session_state:
38
+ st.session_state.chat_history = [{
39
+ "role": "assistant",
40
+ "content": "Como eu posso ajudar?"
41
+ }]
42
+
43
+ # Conteúdo dos botões do sidebar
44
+ if "topics" not in st.session_state:
45
+ st.session_state["topics"] = [
46
+ "Niveis da conta govbr.",
47
+ "Dúvidas no reconhecimento facial.",
48
+ "Como recuperar minha conta gov.br",
49
+ "Dúvidas para aumentar o nível com a cin."
50
+ ]
51
+
52
+ # Pergunta do usuário no chatbot
53
+ self.response = ""
54
 
55
+ self.oa_client, self.qdrant_client, self.edgedb_client = connect_to_services()
 
 
 
 
 
 
 
 
 
 
56
 
57
+
58
+ def mount_chatbot(self):
59
+ # Exibição do título e subtítulo
60
+ st.title("Bem-vindo à ajuda do gov.br")
61
+ st.caption("💬 Qual a sua dificuldade hoje? Estou aqui para ajudar!")
62
+
63
+ # Exibição do espaço para mandar mensagem
64
+ if user_query := st.chat_input(placeholder="Digite sua mensagem"):
65
+ self.add_to_history(user_query, role="user")
66
+
67
+ # Simulação de resposta do assistente
68
+ self.send_message_for_ai(user_query)
69
+ # response = self.send_message_for_ai(user_query)
70
+ # self.add_to_history(response, role="assistant")
71
+
72
+
73
+ def create_sidebar(self):
74
+ st.image('https://www.gov.br/++theme++padrao_govbr/img/govbr-logo-large.png', width=200)
75
+ st.header("Tópicos frequentes")
76
+
77
+ for topic in st.session_state.topics:
78
+ if st.button(topic, key=topic):
79
+ self.response = topic
80
 
81
+
82
+ # Espaços em branco para organização
83
+ for _ in range(5):
84
+ st.write("")
85
+
86
+ # Botão centralizado
87
+ col1, col2, col3 = st.columns([1, 2, 1])
88
+ with col2:
89
+ if st.button("LIMPAR HISTÓRICO"):
90
+ st.session_state.chat_history = [{
91
+ "role": "assistant",
92
+ "content": "Como eu posso ajudar?",
93
+ }]
94
+
95
+ def send_message_for_ai(self, prompt):
96
+ embedding = self.oa_client.embeddings.create(
97
+ input=[prompt],
98
+ model=os.environ.get("OPENAI_MODEL_EMBEDDING")
99
+ ).data[0].embedding
100
+
101
+ child_texts = self.qdrant_client.search(
102
+ collection_name=os.environ.get("COLLECTION_NAME"),
103
+ query_vector=embedding,
104
+ limit=3
105
+ )
106
+
107
+ contexts = []
108
+
109
+ for child_text in child_texts:
110
+ parent_text = self.edgedb_client.query('''
111
+ SELECT Pattern {
112
+ content,
113
+ url,
114
+ parent_id
115
+ }
116
+ FILTER .id = <uuid>$parent_id
117
+ ''', parent_id=child_text.payload["parent_id"])[0]
118
+ context = {
119
+ "content": parent_text.content,
120
+ "url": parent_text.url,
121
+ "parent_id": parent_text.parent_id
122
  }
123
+ contexts.append(context)
 
 
 
 
 
 
 
 
 
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ stream_response = send_message(
127
+ self.oa_client,
128
+ contexts,
129
+ prompt,
130
+ []
131
+ )
132
+
133
+ # Se a resposta for uma lista ou um gerador, você precisa processá-la
134
+ response_content = []
135
+ for chunk in stream_response:
136
+ content = chunk.choices[0].delta.content if chunk.choices[0].delta.content else ""
137
+ response_content.append(content)
138
+
139
+ # Junte todas as partes da resposta em uma única string
140
+ full_response = ''.join(response_content)
141
 
 
 
 
142
 
143
+ self.add_to_history(full_response, role="assistant")
 
144
 
 
 
145
 
146
 
147
+ def add_to_history(self, message, role="user"):
148
+ st.session_state.chat_history.append({
149
+ "role": role,
150
+ "content": message
151
+ })
152
 
agente/interface/main.py DELETED
@@ -1,91 +0,0 @@
1
- import os
2
- import sys
3
- from dotenv import load_dotenv
4
- from openai import OpenAI
5
- from qdrant_client import QdrantClient
6
- import redis
7
- import streamlit as st
8
-
9
- # Add the parent directory to the Python path
10
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
11
-
12
- from pipelines.message import send_message # noqa
13
-
14
- st.set_page_config(page_title="Carlos AI Agent")
15
-
16
-
17
- load_dotenv()
18
-
19
-
20
- @st.cache_resource
21
- def connect_to_services():
22
- oa_client = OpenAI(
23
- api_key=os.environ.get("OPENAI_API_KEY")
24
- )
25
-
26
- qdrant_client = QdrantClient(
27
- host=os.environ.get("QDRANT_HOST"),
28
- port=os.environ.get("QDRANT_PORT")
29
- )
30
-
31
- redis_client = redis.Redis(
32
- host=os.environ.get("REDIS_HOST"),
33
- port=os.environ.get("REDIS_PORT"),
34
- decode_responses=True
35
- )
36
-
37
- return oa_client, qdrant_client, redis_client
38
-
39
-
40
- def send_message_for_ai(prompt, oa_client, qdrant_client, redis_client):
41
- embedding = oa_client.embeddings.create(
42
- input=[prompt],
43
- model=os.environ.get("OPENAI_MODEL_EMBEDDING")
44
- ).data[0].embedding
45
-
46
- child_texts = qdrant_client.search(
47
- collection_name=os.environ.get("COLLECTION_NAME"),
48
- query_vector=embedding,
49
- limit=3
50
- )
51
-
52
- contexts = []
53
-
54
- for child_text in child_texts:
55
- parent_text = redis_client.hgetall(
56
- child_text.payload["parent_id"]
57
- )
58
- context = {
59
- "content": parent_text["content"],
60
- "url": parent_text["url"],
61
- "parent_id": parent_text["parent_id"]
62
- }
63
- contexts.append(context)
64
-
65
- system_msg.write(
66
- f"""Contexto: {contexts}""")
67
-
68
- stream_response = send_message(
69
- oa_client,
70
- contexts,
71
- prompt,
72
- []
73
- )
74
-
75
- return stream_response
76
-
77
-
78
- oa_client, qdrant_client, redis_client = connect_to_services()
79
-
80
- st.write("# Carlos AI Agent")
81
-
82
- prompt = st.chat_input("Digite sua pergunta:")
83
- user_msg = st.chat_message("user")
84
- assistant_msg = st.chat_message("assistant")
85
- system_msg = st.chat_message("system")
86
-
87
- if prompt:
88
- user_msg.write(prompt)
89
- response = send_message_for_ai(
90
- prompt, oa_client, qdrant_client, redis_client)
91
- assistant_msg.write_stream(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
agente/prompts/system.md CHANGED
@@ -17,6 +17,6 @@ Since you will receive a list with these contexts, you will answer based on them
17
 
18
  The format of your response should be in markdown. You can include images in your response by interpreting them based on the description provided for each image and responding in markdown format.
19
 
20
- If you do not find the answer in the given context, simply respond to the user with "Não encontrei sua resposta."
21
 
22
  Do not use information that is not in the given context.
 
17
 
18
  The format of your response should be in markdown. You can include images in your response by interpreting them based on the description provided for each image and responding in markdown format.
19
 
20
+ If you do not find the answer in the given context, simply respond to the user with "Esta informação não está em meu alcance."
21
 
22
  Do not use information that is not in the given context.