adowu commited on
Commit
cdd85c7
1 Parent(s): 2fac1aa

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +67 -0
  2. chatbot.py +45 -0
  3. config.py +22 -0
  4. database.py +131 -0
  5. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from database import KodeksProcessor
3
+ from chatbot import Chatbot
4
+ import os
5
+
6
+ def initialize_session_state():
7
+ if 'chatbot' not in st.session_state:
8
+ st.session_state.chatbot = Chatbot()
9
+ if 'messages' not in st.session_state:
10
+ st.session_state.messages = []
11
+
12
+ def main():
13
+ st.title("Asystent Prawny")
14
+
15
+ initialize_session_state()
16
+
17
+ # Inicjalizacja bazy danych (jeśli potrzebna)
18
+ if 'db_initialized' not in st.session_state:
19
+ with st.spinner("Inicjalizacja bazy danych..."):
20
+ processor = KodeksProcessor()
21
+ if not os.path.exists("chroma_db"):
22
+ processor.process_all_files("data/kodeksy")
23
+ st.session_state.db_initialized = True
24
+
25
+ # Przycisk do czyszczenia historii
26
+ if st.sidebar.button("Wyczyść historię"):
27
+ st.session_state.chatbot.clear_history()
28
+ st.session_state.messages = []
29
+ st.rerun()
30
+
31
+ # Wyświetlenie historii czatu
32
+ for message in st.session_state.messages:
33
+ with st.chat_message(message["role"]):
34
+ st.markdown(message["content"])
35
+
36
+ # Input użytkownika
37
+ if prompt := st.chat_input("Zadaj pytanie dotyczące prawa..."):
38
+ # Dodaj pytanie użytkownika do historii
39
+ st.session_state.messages.append({"role": "user", "content": prompt})
40
+
41
+ with st.chat_message("user"):
42
+ st.markdown(prompt)
43
+
44
+ # Wyszukaj odpowiednie fragmenty w bazie
45
+ processor = KodeksProcessor()
46
+ relevant_chunks = processor.search(prompt)
47
+
48
+ # Wygeneruj odpowiedź
49
+ with st.chat_message("assistant"):
50
+ message_placeholder = st.empty()
51
+ full_response = ""
52
+
53
+ context = st.session_state.chatbot.generate_context(
54
+ [{"text": doc} for doc in relevant_chunks['documents'][0]]
55
+ )
56
+
57
+ for response_chunk in st.session_state.chatbot.get_response(prompt, context):
58
+ full_response += response_chunk
59
+ message_placeholder.markdown(full_response + "▌")
60
+
61
+ message_placeholder.markdown(full_response)
62
+
63
+ # Dodaj odpowiedź asystenta do historii
64
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
65
+
66
+ if __name__ == "__main__":
67
+ main()
chatbot.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ from typing import List, Dict
3
+ from config import HF_TOKEN, MODEL_NAME, SYSTEM_PROMPT
4
+
5
+ class Chatbot:
6
+ def __init__(self):
7
+ self.client = InferenceClient(api_key=HF_TOKEN)
8
+ self.conversation_history = [
9
+ {"role": "system", "content": SYSTEM_PROMPT}
10
+ ]
11
+
12
+ def generate_context(self, relevant_chunks: List[Dict]) -> str:
13
+ context = "Kontekst z przepisów prawnych:\n\n"
14
+ for chunk in relevant_chunks:
15
+ context += f"{chunk['text']}\n\n"
16
+ return context
17
+
18
+ def get_response(self, user_input: str, context: str) -> str:
19
+ messages = self.conversation_history + [
20
+ {"role": "user", "content": f"Kontekst: {context}\n\nPytanie: {user_input}"}
21
+ ]
22
+
23
+ response = ""
24
+ stream = self.client.chat.completions.create(
25
+ model=MODEL_NAME,
26
+ messages=messages,
27
+ temperature=0.5,
28
+ max_tokens=8192,
29
+ top_p=0.7,
30
+ stream=True
31
+ )
32
+
33
+ for chunk in stream:
34
+ content = chunk.choices[0].delta.content
35
+ if content:
36
+ response += content
37
+ yield content
38
+
39
+ self.conversation_history.append({"role": "user", "content": user_input})
40
+ self.conversation_history.append({"role": "assistant", "content": response})
41
+
42
+ def clear_history(self):
43
+ self.conversation_history = [
44
+ {"role": "system", "content": SYSTEM_PROMPT}
45
+ ]
config.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ # Konfiguracja API
7
+ HF_TOKEN = os.getenv('HF_TOKEN')
8
+ MODEL_NAME = "Qwen/Qwen2.5-72B-Instruct"
9
+
10
+ # Konfiguracja bazy danych
11
+ CHUNK_SIZE = 1000
12
+ CHUNK_OVERLAP = 200
13
+ DATABASE_DIR = "chroma_db"
14
+
15
+ # Konfiguracja modelu embeddings
16
+ EMBEDDING_MODEL = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
17
+
18
+ # System prompt
19
+ SYSTEM_PROMPT = """Jesteś asystentem prawniczym specjalizującym się w polskim prawie.
20
+ Twoje odpowiedzi opierają się na aktualnych przepisach prawnych.
21
+ Zawsze cytuj konkretne artykuły i paragrafy z odpowiednich ustaw.
22
+ """
database.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from typing import List, Dict, Tuple
4
+ import chromadb
5
+ from chromadb.utils import embedding_functions
6
+ from config import CHUNK_SIZE, CHUNK_OVERLAP, DATABASE_DIR, EMBEDDING_MODEL
7
+
8
+ class KodeksProcessor:
9
+ def __init__(self):
10
+ self.client = chromadb.PersistentClient(path=DATABASE_DIR)
11
+ try:
12
+ self.collection = self.client.get_collection("kodeksy")
13
+ except:
14
+ self.collection = self.client.create_collection(
15
+ name="kodeksy",
16
+ embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(
17
+ model_name=EMBEDDING_MODEL
18
+ )
19
+ )
20
+
21
+ def extract_metadata(self, text: str) -> Dict:
22
+ metadata = {}
23
+ dz_u_match = re.search(r'Dz\.U\.(\\d{4})\.(\\d+)\.(\\d+)', text)
24
+ if dz_u_match:
25
+ metadata['dz_u'] = f"Dz.U.{dz_u_match.group(1)}.{dz_u_match.group(2)}.{dz_u_match.group(3)}"
26
+ metadata['rok'] = dz_u_match.group(1)
27
+
28
+ nazwa_match = re.search(r'USTAWA\\s+z dnia(.*?)\\n(.*?)\\n', text)
29
+ if nazwa_match:
30
+ metadata['data_ustawy'] = nazwa_match.group(1).strip()
31
+ metadata['nazwa'] = nazwa_match.group(2).strip()
32
+
33
+ return metadata
34
+
35
+ def split_header_and_content(self, text: str) -> Tuple[str, str]:
36
+ parts = text.split("USTAWA", 1)
37
+ if len(parts) > 1:
38
+ return parts[0], "USTAWA" + parts[1]
39
+ return "", text
40
+
41
+ def process_article(self, article_text: str) -> Dict:
42
+ art_num_match = re.match(r'Art\\.\\s*(\\d+)', article_text)
43
+ article_num = art_num_match.group(1) if art_num_match else ""
44
+
45
+ paragraphs = re.findall(r'§\\s*(\\d+)[.\\s]+(.*?)(?=§\\s*\\d+|$)', article_text, re.DOTALL)
46
+
47
+ if not paragraphs:
48
+ return {
49
+ "article_num": article_num,
50
+ "content": article_text.strip(),
51
+ "has_paragraphs": False
52
+ }
53
+
54
+ return {
55
+ "article_num": article_num,
56
+ "paragraphs": paragraphs,
57
+ "has_paragraphs": True
58
+ }
59
+
60
+ def split_into_chunks(self, text: str, metadata: Dict) -> List[Dict]:
61
+ chunks = []
62
+ chapters = re.split(r'(Rozdział \\d+\\n\\n[^\\n]+)\\n', text)
63
+ current_chapter = ""
64
+
65
+ for i, section in enumerate(chapters):
66
+ if section.startswith('Rozdział'):
67
+ current_chapter = section.strip()
68
+ continue
69
+
70
+ articles = re.split(r'(Art\\.\\s*\\d+.*?)(?=Art\\.\\s*\\d+|$)', section)
71
+
72
+ for article in articles:
73
+ if not article.strip():
74
+ continue
75
+
76
+ if article.startswith('Art.'):
77
+ processed_article = self.process_article(article)
78
+
79
+ chunk_metadata = {
80
+ **metadata,
81
+ "chapter": current_chapter,
82
+ "article": processed_article["article_num"]
83
+ }
84
+
85
+ if processed_article["has_paragraphs"]:
86
+ for par_num, par_content in processed_article["paragraphs"]:
87
+ chunks.append({
88
+ "text": f"Art. {processed_article['article_num']} § {par_num}. {par_content}",
89
+ "metadata": {**chunk_metadata, "paragraph": par_num}
90
+ })
91
+ else:
92
+ chunks.append({
93
+ "text": processed_article["content"],
94
+ "metadata": chunk_metadata
95
+ })
96
+
97
+ return chunks
98
+
99
+ def process_file(self, filepath: str) -> None:
100
+ print(f"Przetwarzanie pliku: {filepath}")
101
+
102
+ with open(filepath, 'r', encoding='utf-8') as file:
103
+ content = file.read()
104
+
105
+ header, main_content = self.split_header_and_content(content)
106
+ metadata = self.extract_metadata(main_content)
107
+ metadata['filename'] = os.path.basename(filepath)
108
+
109
+ chunks = self.split_into_chunks(main_content, metadata)
110
+
111
+ for i, chunk in enumerate(chunks):
112
+ self.collection.add(
113
+ documents=[chunk["text"]],
114
+ metadatas=[chunk["metadata"]],
115
+ ids=[f"{metadata['filename']}_{chunk['metadata']['article']}_{i}"]
116
+ )
117
+
118
+ print(f"Dodano {len(chunks)} chunków z pliku {metadata['filename']}")
119
+
120
+ def process_all_files(self, directory: str) -> None:
121
+ for filename in os.listdir(directory):
122
+ if filename.endswith('.txt'):
123
+ filepath = os.path.join(directory, filename)
124
+ self.process_file(filepath)
125
+
126
+ def search(self, query: str, n_results: int = 3) -> Dict:
127
+ results = self.collection.query(
128
+ query_texts=[query],
129
+ n_results=n_results
130
+ )
131
+ return results
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit
2
+ chromadb
3
+ sentence-transformers
4
+ huggingface-hub
5
+ python-dotenv