Spaces:
Sleeping
Sleeping
File size: 5,276 Bytes
b6a4e8d 0af2865 897ce6f 0af2865 2346d6a 0af2865 42c983e b6a4e8d c0f28c9 4b442ac 0af2865 29f1884 4b442ac 2346d6a e201b51 4b442ac 0af2865 4b442ac 0af2865 897ce6f 4a0d0a3 897ce6f 4b442ac 0af2865 4b442ac 0af2865 c151fbf d12a417 4a0d0a3 0af2865 4b442ac 0af2865 e201b51 4a0d0a3 b6a4e8d 2346d6a 21b2429 b6a4e8d 2346d6a 11f85bd 37717fd 4b442ac 4a0d0a3 897ce6f 8e54579 21b2429 4b442ac c9cf861 0af2865 89b285a 0af2865 4b442ac 0af2865 21b2429 c9cf861 b9e42d9 0af2865 5b978bc c9cf861 5b978bc 11ce526 b9e42d9 11ce526 acd9548 8b4f654 b9e42d9 c9cf861 b9e42d9 4b442ac 897ce6f e9813d0 897ce6f e9813d0 0af2865 e9813d0 0af2865 e9813d0 4a0d0a3 897ce6f e9813d0 0af2865 e9813d0 0af2865 e9813d0 0af2865 e9813d0 0af2865 e9813d0 c9cf861 4b442ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import os
import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models.gigachat import GigaChat
from htmlTemplates import css, bot_template, user_template
from langchain.llms import HuggingFaceHub, LlamaCpp
from huggingface_hub import snapshot_download, hf_hub_download
# from prompts import CONDENSE_QUESTION_PROMPT
repo_name = "IlyaGusev/saiga_mistral_7b_gguf"
model_name = "model-q4_K.gguf"
#snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(separator="\n",
chunk_size=1000, # 1000
chunk_overlap=200, # 200
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
#embeddings = OpenAIEmbeddings()
#embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
embeddings = HuggingFaceEmbeddings(model_name="intfloat/multilingual-e5-large")
#embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2")
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore, model_name):
# llm = LlamaCpp(model_path=model_name,
# temperature=0.1,
# top_k=30,
# top_p=0.9,
# streaming=True,
# n_ctx=2048,
# n_parts=1,
# echo=True
# )
# llm = ChatOpenAI()
llm = GigaChat(credentials=os.getenv("GIGACHAT_CREDENTIALS"),
verify_ssl_certs=False)
memory = ConversationBufferMemory(memory_key='chat_history',
input_key='question',
output_key='answer',
return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory,
return_source_documents=True
)
return conversation_chain
def handle_userinput(user_question):
if user_question == None:
user_question = "привет"
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
st.session_state.retrieved_text = response['source_documents']
for i, (message, text) in enumerate(zip(st.session_state.chat_history, st.session_state.retrieved_text)):
if i % 3 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
print(text)
st.write(bot_template.replace(
"{{MSG}}", str(text.page_content)), unsafe_allow_html=True)
#for text in enumerate(st.session_state.retrieved_text):
# st.write(text[1].page_content, '\n')
#print(response['source_documents'][0])
# main code
load_dotenv()
st.set_page_config(page_title="Chat with multiple PDFs",
page_icon=":books:")
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with multiple PDFs :books:")
user_question = st.text_input("Ask a question about your documents: ")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(vectorstore, model_name)
|