|
import streamlit as st |
|
from langchain.embeddings import HuggingFaceInstructEmbeddings |
|
from langchain.vectorstores import FAISS |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.document_loaders import DirectoryLoader, PyPDFLoader |
|
import os |
|
from PyPDF2 import PdfReader |
|
from transformers import pipeline |
|
from transformers import AutoModel |
|
|
|
|
|
from langchain.prompts import ChatPromptTemplate |
|
from langchain.schema import StrOutputParser |
|
from langchain.schema.runnable import RunnablePassthrough |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.llms import HuggingFaceHub |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_pdf_text(folder_path): |
|
text = "" |
|
|
|
for filename in os.listdir(folder_path): |
|
filepath = os.path.join(folder_path, filename) |
|
|
|
|
|
if os.path.isfile(filepath) and filename.lower().endswith(".pdf"): |
|
pdf_reader = PdfReader(filepath) |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
|
|
|
|
return text |
|
|
|
|
|
def get_text_chunks(text): |
|
|
|
text_splitter = CharacterTextSplitter( |
|
separator="\n", |
|
chunk_size=1000, |
|
chunk_overlap=200, |
|
length_function=len |
|
) |
|
chunks = text_splitter.split_text(text) |
|
return chunks |
|
|
|
|
|
def create_vectorstore_and_store(): |
|
folder_path = './files' |
|
pdf_text = get_pdf_text(folder_path) |
|
text_chunks = get_text_chunks(pdf_text) |
|
embeddings = HuggingFaceInstructEmbeddings(model_name="deutsche-telekom/bert-multi-english-german-squad2") |
|
|
|
|
|
vectorstoreDB = FAISS.from_texts(texts=text_chunks,embedding=embeddings) |
|
|
|
save_directory = "Store" |
|
|
|
vectorstoreDB.save_local(save_directory) |
|
print(vectorstoreDB) |
|
return None |
|
|
|
|
|
|
|
def get_vectorstore(): |
|
embeddings = HuggingFaceInstructEmbeddings(model_name="deutsche-telekom/bert-multi-english-german-squad2") |
|
|
|
|
|
save_directory = "Store" |
|
vectorstoreDB = FAISS.load_local(save_directory, embeddings) |
|
return vectorstoreDB |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
user_question = st.text_area("Stell mir eine Frage: ") |
|
|
|
retriever=get_vectorstore().as_retriever() |
|
retrieved_docs=retriever.invoke( |
|
user_question |
|
) |
|
if user_question: |
|
|
|
question=user_question |
|
st.text(user_question) |
|
context=""+retrieved_docs[0].page_content+retrieved_docs[1].page_content+retrieved_docs[3].page_content |
|
context=context.replace("\n", " ") |
|
context=context.replace("- ", "") |
|
st.text("Das ist der Textausschnitt der durch den Retriever herausgesucht wird:") |
|
st.text(context) |
|
|
|
|
|
qa_pipeline = pipeline("question-answering", model="deutsche-telekom/bert-multi-english-german-squad2", tokenizer="deutsche-telekom/bert-multi-english-german-squad2") |
|
|
|
|
|
|
|
answer = qa_pipeline(question=question, context=context) |
|
|
|
|
|
st.text("Basisantwort:") |
|
st.text(answer["answer"]) |
|
|
|
|
|
|
|
text2text_generator = pipeline("text2text-generation", model="google/flan-t5-xl") |
|
|
|
newText=text2text_generator("Formuliere einen neuen Satz. Frage: "+question+ " Antwort: " + answer["answer"]) |
|
st.text(newText) |
|
|
|
if __name__ == '__main__': |
|
main() |