Spaces:
Runtime error
Runtime error
mohamedalcafory
commited on
Commit
•
bfecf10
1
Parent(s):
cd66f46
Update app.py
Browse files
app.py
CHANGED
@@ -8,27 +8,32 @@ embeddings = SentenceTransformerEmbeddings(
|
|
8 |
model_kwargs={"trust_remote_code": True}
|
9 |
)
|
10 |
|
|
|
|
|
11 |
from langchain_community.vectorstores import FAISS
|
12 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
13 |
from langchain.document_loaders import TextLoader, PyPDFLoader
|
14 |
|
15 |
loader = PyPDFLoader("https://www.versusarthritis.org/media/24901/fibromyalgia-information-booklet-july2021.pdf")
|
16 |
documents = loader.load()
|
17 |
-
|
18 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
|
19 |
docs = text_splitter.split_documents(documents)
|
20 |
vector_store = FAISS.from_documents(docs, embeddings)
|
21 |
retriever = vector_store.as_retriever()
|
22 |
|
|
|
|
|
23 |
from langchain import hub
|
24 |
from langchain_core.output_parsers import StrOutputParser
|
25 |
from langchain_core.runnables import RunnablePassthrough
|
26 |
|
27 |
prompt = hub.pull("rlm/rag-prompt")
|
|
|
28 |
|
29 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
30 |
tokenizer = AutoTokenizer.from_pretrained("mohamedalcafory/PubMed_Llama3.1_Based_model")
|
31 |
model = AutoModelForCausalLM.from_pretrained("mohamedalcafory/PubMed_Llama3.1_Based_model")
|
|
|
32 |
|
33 |
from transformers import pipeline
|
34 |
from langchain_huggingface import HuggingFacePipeline
|
|
|
8 |
model_kwargs={"trust_remote_code": True}
|
9 |
)
|
10 |
|
11 |
+
print('Embeddings loaded successfully')
|
12 |
+
|
13 |
from langchain_community.vectorstores import FAISS
|
14 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
15 |
from langchain.document_loaders import TextLoader, PyPDFLoader
|
16 |
|
17 |
loader = PyPDFLoader("https://www.versusarthritis.org/media/24901/fibromyalgia-information-booklet-july2021.pdf")
|
18 |
documents = loader.load()
|
|
|
19 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
|
20 |
docs = text_splitter.split_documents(documents)
|
21 |
vector_store = FAISS.from_documents(docs, embeddings)
|
22 |
retriever = vector_store.as_retriever()
|
23 |
|
24 |
+
print('Retriever loaded successfully')
|
25 |
+
|
26 |
from langchain import hub
|
27 |
from langchain_core.output_parsers import StrOutputParser
|
28 |
from langchain_core.runnables import RunnablePassthrough
|
29 |
|
30 |
prompt = hub.pull("rlm/rag-prompt")
|
31 |
+
print(prompt)
|
32 |
|
33 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
34 |
tokenizer = AutoTokenizer.from_pretrained("mohamedalcafory/PubMed_Llama3.1_Based_model")
|
35 |
model = AutoModelForCausalLM.from_pretrained("mohamedalcafory/PubMed_Llama3.1_Based_model")
|
36 |
+
print('Model loaded successfully')
|
37 |
|
38 |
from transformers import pipeline
|
39 |
from langchain_huggingface import HuggingFacePipeline
|