ChanYeon commited on
Commit
de9afe2
β€’
1 Parent(s): b4e2cf5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -41
app.py CHANGED
@@ -8,53 +8,51 @@ from langchain.chains import ConversationalRetrievalChain
8
  from htmlTemplates import css, bot_template, user_template
9
  from langchain.llms import LlamaCpp # For loading transformer models.
10
  from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
- import tempfile # μž„μ‹œ νŒŒμΌμ„ μƒμ„±ν•˜κΈ° μœ„ν•œ λΌμ΄λΈŒλŸ¬λ¦¬μž…λ‹ˆλ‹€.
12
  import os
13
- from huggingface_hub import hf_hub_download # Hugging Face Hubμ—μ„œ λͺ¨λΈμ„ λ‹€μš΄λ‘œλ“œν•˜κΈ° μœ„ν•œ ν•¨μˆ˜μž…λ‹ˆλ‹€.
14
 
15
- # PDF λ¬Έμ„œλ‘œλΆ€ν„° ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
16
  def get_pdf_text(pdf_docs):
17
- temp_dir = tempfile.TemporaryDirectory() # μž„μ‹œ 디렉토리λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
18
- temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # μž„μ‹œ 파일 경둜λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
19
- with open(temp_filepath, "wb") as f: # μž„μ‹œ νŒŒμΌμ„ λ°”μ΄λ„ˆλ¦¬ μ“°κΈ° λͺ¨λ“œλ‘œ μ—½λ‹ˆλ‹€.
20
- f.write(pdf_docs.getvalue()) # PDF λ¬Έμ„œμ˜ λ‚΄μš©μ„ μž„μ‹œ νŒŒμΌμ— μ”λ‹ˆλ‹€.
21
- pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoaderλ₯Ό μ‚¬μš©ν•΄ PDFλ₯Ό λ‘œλ“œν•©λ‹ˆλ‹€.
22
- pdf_doc = pdf_loader.load() # ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•©λ‹ˆλ‹€.
23
- return pdf_doc # μΆ”μΆœν•œ ν…μŠ€νŠΈλ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
24
-
25
- # 과제
26
- # μ•„λž˜ ν…μŠ€νŠΈ μΆ”μΆœ ν•¨μˆ˜λ₯Ό μž‘μ„±
27
  def get_text_file(docs):
28
- pass
29
-
 
30
  def get_csv_file(docs):
31
- pass
 
32
 
33
  def get_json_file(docs):
34
- pass
 
35
 
36
-
37
- # λ¬Έμ„œλ“€μ„ μ²˜λ¦¬ν•˜μ—¬ ν…μŠ€νŠΈ 청크둜 λ‚˜λˆ„λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
38
  def get_text_chunks(documents):
39
  text_splitter = RecursiveCharacterTextSplitter(
40
- chunk_size=1000, # 청크의 크기λ₯Ό μ§€μ •ν•©λ‹ˆλ‹€.
41
- chunk_overlap=200, # 청크 μ‚¬μ΄μ˜ 쀑볡을 μ§€μ •ν•©λ‹ˆλ‹€.
42
- length_function=len # ν…μŠ€νŠΈμ˜ 길이λ₯Ό μΈ‘μ •ν•˜λŠ” ν•¨μˆ˜λ₯Ό μ§€μ •ν•©λ‹ˆλ‹€.
43
  )
 
 
44
 
45
- documents = text_splitter.split_documents(documents) # λ¬Έμ„œλ“€μ„ 청크둜 λ‚˜λˆ•λ‹ˆλ‹€.
46
- return documents # λ‚˜λˆˆ 청크λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
47
-
48
-
49
- # ν…μŠ€νŠΈ μ²­ν¬λ“€λ‘œλΆ€ν„° 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
50
  def get_vectorstore(text_chunks):
51
- # μ›ν•˜λŠ” μž„λ² λ”© λͺ¨λΈμ„ λ‘œλ“œν•©λ‹ˆλ‹€.
52
  embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
53
- model_kwargs={'device': 'cpu'}) # μž„λ² λ”© λͺ¨λΈμ„ μ„€μ •ν•©λ‹ˆλ‹€.
54
- vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
55
- return vectorstore # μƒμ„±λœ 벑터 μŠ€ν† μ–΄λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
56
-
57
 
 
58
  def get_conversation_chain(vectorstore):
59
  model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF'
60
  model_basename = 'llama-2-7b-chat.Q2_K.gguf'
@@ -64,23 +62,19 @@ def get_conversation_chain(vectorstore):
64
  n_ctx=4086,
65
  input={"temperature": 0.75, "max_length": 2000, "top_p": 1},
66
  verbose=True, )
67
- # λŒ€ν™” 기둝을 μ €μž₯ν•˜κΈ° μœ„ν•œ λ©”λͺ¨λ¦¬λ₯Ό μƒμ„±ν•©λ‹ˆλ‹€.
68
  memory = ConversationBufferMemory(
69
  memory_key='chat_history', return_messages=True)
70
- # λŒ€ν™” 검색 체인을 μƒμ„±ν•©λ‹ˆλ‹€.
71
  conversation_chain = ConversationalRetrievalChain.from_llm(
72
  llm=llm,
73
  retriever=vectorstore.as_retriever(),
74
  memory=memory
75
  )
76
- return conversation_chain # μƒμ„±λœ λŒ€ν™” 체인을 λ°˜ν™˜ν•©λ‹ˆλ‹€.
77
 
78
- # μ‚¬μš©μž μž…λ ₯을 μ²˜λ¦¬ν•˜λŠ” ν•¨μˆ˜μž…λ‹ˆλ‹€.
79
  def handle_userinput(user_question):
80
  print('user_question => ', user_question)
81
- # λŒ€ν™” 체인을 μ‚¬μš©ν•˜μ—¬ μ‚¬μš©μž μ§ˆλ¬Έμ— λŒ€ν•œ 응닡을 μƒμ„±ν•©λ‹ˆλ‹€.
82
  response = st.session_state.conversation({'question': user_question})
83
- # λŒ€ν™” 기둝을 μ €μž₯ν•©λ‹ˆλ‹€.
84
  st.session_state.chat_history = response['chat_history']
85
 
86
  for i, message in enumerate(st.session_state.chat_history):
@@ -91,7 +85,6 @@ def handle_userinput(user_question):
91
  st.write(bot_template.replace(
92
  "{{MSG}}", message.content), unsafe_allow_html=True)
93
 
94
-
95
  def main():
96
  load_dotenv()
97
  st.set_page_config(page_title="Chat with multiple Files",
@@ -142,6 +135,5 @@ def main():
142
  st.session_state.conversation = get_conversation_chain(
143
  vectorstore)
144
 
145
-
146
  if __name__ == '__main__':
147
- main()
 
8
  from htmlTemplates import css, bot_template, user_template
9
  from langchain.llms import LlamaCpp # For loading transformer models.
10
  from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
+ import tempfile
12
  import os
13
+ from huggingface_hub import hf_hub_download
14
 
15
+ # PDF λ¬Έμ„œλ‘œλΆ€ν„° ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•˜λŠ” ν•¨μˆ˜
16
  def get_pdf_text(pdf_docs):
17
+ temp_dir = tempfile.TemporaryDirectory()
18
+ temp_filepath = os.path.join(temp_dir.name, pdf_docs.name)
19
+ with open(temp_filepath, "wb") as f:
20
+ f.write(pdf_docs.getvalue())
21
+ pdf_loader = PyPDFLoader(temp_filepath)
22
+ pdf_doc = pdf_loader.load()
23
+ return pdf_doc
24
+
25
+ # 과제 λΆ€λΆ„
 
26
  def get_text_file(docs):
27
+ text_loader = TextLoader(docs.name)
28
+ return text_loader.load()
29
+
30
  def get_csv_file(docs):
31
+ csv_loader = CSVLoader(docs.name)
32
+ return csv_loader.load()
33
 
34
  def get_json_file(docs):
35
+ json_loader = JSONLoader(docs.name)
36
+ return json_loader.load()
37
 
38
+ # λ¬Έμ„œλ“€μ„ μ²˜λ¦¬ν•˜μ—¬ ν…μŠ€νŠΈ 청크둜 λ‚˜λˆ„λŠ” ν•¨μˆ˜
 
39
  def get_text_chunks(documents):
40
  text_splitter = RecursiveCharacterTextSplitter(
41
+ chunk_size=1000,
42
+ chunk_overlap=200,
43
+ length_function=len
44
  )
45
+ documents = text_splitter.split_documents(documents)
46
+ return documents
47
 
48
+ # ν…μŠ€νŠΈ μ²­ν¬λ“€λ‘œλΆ€ν„° 벑터 μŠ€ν† μ–΄λ₯Ό μƒμ„±ν•˜λŠ” ν•¨μˆ˜
 
 
 
 
49
  def get_vectorstore(text_chunks):
 
50
  embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
51
+ model_kwargs={'device': 'cpu'})
52
+ vectorstore = FAISS.from_documents(text_chunks, embeddings)
53
+ return vectorstore
 
54
 
55
+ # λŒ€ν™” 체인 μ„€μ • ν•¨μˆ˜
56
  def get_conversation_chain(vectorstore):
57
  model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF'
58
  model_basename = 'llama-2-7b-chat.Q2_K.gguf'
 
62
  n_ctx=4086,
63
  input={"temperature": 0.75, "max_length": 2000, "top_p": 1},
64
  verbose=True, )
 
65
  memory = ConversationBufferMemory(
66
  memory_key='chat_history', return_messages=True)
 
67
  conversation_chain = ConversationalRetrievalChain.from_llm(
68
  llm=llm,
69
  retriever=vectorstore.as_retriever(),
70
  memory=memory
71
  )
72
+ return conversation_chain
73
 
74
+ # μ‚¬μš©μž μž…λ ₯ 처리 ν•¨μˆ˜
75
  def handle_userinput(user_question):
76
  print('user_question => ', user_question)
 
77
  response = st.session_state.conversation({'question': user_question})
 
78
  st.session_state.chat_history = response['chat_history']
79
 
80
  for i, message in enumerate(st.session_state.chat_history):
 
85
  st.write(bot_template.replace(
86
  "{{MSG}}", message.content), unsafe_allow_html=True)
87
 
 
88
  def main():
89
  load_dotenv()
90
  st.set_page_config(page_title="Chat with multiple Files",
 
135
  st.session_state.conversation = get_conversation_chain(
136
  vectorstore)
137
 
 
138
  if __name__ == '__main__':
139
+ main()