Spaces:
Sleeping
Sleeping
File size: 3,950 Bytes
b2e325f d26c87a b2e325f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
"""
"""
# -*- coding: utf-8 -*-
import requests
import streamlit as st
import openai
# from openai import embeddings
import os
from dotenv import load_dotenv
import numpy as np
import pandas as pd
import csv
import tempfile
from tempfile import NamedTemporaryFile
import pathlib
from pathlib import Path
import re
from re import sub
import matplotlib.pyplot as plt
from itertools import product
from tqdm import tqdm_notebook, tqdm, trange
import time
from time import sleep
# import pretty_errors
import seaborn as sns
from matplotlib.pyplot import style
from rich import print
import warnings
import PyPDF2
from openai import OpenAI
client = OpenAI()
warnings.filterwarnings('ignore')
''' 以下加载本地知识的核心内容。'''
##! Install package, !pip install "unstructured[all-docs]", 需要完成这一步,否则会报错!
# from langchain.document_loaders import UnstructuredFileLoader ## older version.
from langchain_community.document_loaders.unstructured import UnstructuredFileLoader ## new version.
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
load_dotenv()
### 设置openai的API key
os.environ["OPENAI_API_KEY"] = os.environ['user_token']
openai.api_key = os.environ['user_token']
# filepath = "/Users/yunshi/Downloads/txt_dir/Sparks_of_AGI.pdf"
def langchain_localKB_construct(filepath, username):
print('开始构建Langchain知识库...')
# ''' 以下加载本地知识的核心内容。'''
##! Install package, !pip install "unstructured[all-docs]", 需要完成这一步,否则会报错!
## 加载文件
# filepath = "/Users/yunshi/Downloads/txt_dir/Sparks_of_AGI.pdf" ## a sample reference of a local PDF file.
# loader = UnstructuredFileLoader(filepath)
# from pathlib import Path
# filepath = Path(filepath)
print('now filepath:', filepath.name)
# loader = UnstructuredFileLoader(filepath.name) ### original code here.
loader = PyPDFLoader(filepath.name) ##NOTE: 只有PyPDFLoader才可以提取PDF的页数page信息。
# print('langchain loader:',loader)
docs = loader.load()
# print('docs now:', docs)
## 文本分割
# text_splitter = CharacterTextSplitter(chunk_size=5000, chunk_overlap=200)
docs = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200).split_documents(docs)
## 创建向量数据库
embedding_model_name = 'BAAI/bge-large-zh-v1.5'
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name) ## 这里是联网情况下连接huggingface后使用。
# from langchain.embeddings.openai import OpenAIEmbeddings
# embeddings = OpenAIEmbeddings(disallowed_special=()) ## 可能需要更新了。
# print('langchain embeddings:', embeddings)
vector_store = FAISS.from_documents(docs, embeddings)
# print(vector_store)
vector_store.save_local(f'./{username}/faiss_index')
return vector_store
# vector_store = langchain_KB_construct(filepath='/Users/yunshi/Downloads/txt_dir/Sparks_of_AGI.pdf')
# print(vs)
### 根据prompt来检索本地知识库并回答。
def langchain_RAG(prompt, username):
### 用langchain看框架接入本地知识库。
embeddings = OpenAIEmbeddings(disallowed_special=()) ## load embedding model again here.
vector_store = FAISS.load_local(f'./{username}/faiss_index', embeddings, allow_dangerous_deserialization=True)
docs = vector_store.similarity_search(prompt, k=5)
context = [doc.page_content for doc in docs]
total_prompt = f"已知信息:\n{context}\n 根据这些已知信息来回答问题:\n{prompt}"
# print('total prompt in local KB version:', total_prompt)
return total_prompt, docs
# langchain_RAG('what are main challenges of AGI?') |