Spaces:
Runtime error
Runtime error
from langchain.chat_models import ChatOpenAI | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.embeddings.cohere import CohereEmbeddings | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch | |
from langchain.vectorstores import Chroma | |
from PyPDF2 import PdfWriter | |
import gradio as gr | |
import os | |
from dotenv import load_dotenv | |
import openai | |
load_dotenv() | |
os.environ["OPENAI_API_KEY"] = os.environ['my_secret'] | |
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613", temperature=0) | |
loader = PyPDFLoader("/home/user/app/docs.pdf") | |
documents = loader.load() | |
text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=0) | |
texts = text_splitter.split_documents(documents) | |
#vector embedding | |
embeddings = OpenAIEmbeddings() | |
vector_store = Chroma.from_documents(texts, embeddings) | |
retriever = vector_store.as_retriever(search_kwargs={"k": 3}) | |
from langchain.prompts.chat import ( | |
ChatPromptTemplate, | |
SystemMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
) | |
system_template="""Your name is conanssam as a homeroom teacher for elementary school. You act like a nice teacher. | |
Never reveal that it is an AI. Generate answers for children. | |
If you don't know the answer, just say that "I don't know", don't try to make up an answer. | |
---------------- | |
{summaries} | |
You MUST answer in Korean and in Markdown format:""" | |
messages = [ | |
SystemMessagePromptTemplate.from_template(system_template), | |
HumanMessagePromptTemplate.from_template("{question}") | |
] | |
prompt = ChatPromptTemplate.from_messages(messages) | |
from langchain.chat_models import ChatOpenAI | |
from langchain.chains import RetrievalQAWithSourcesChain | |
chain_type_kwargs = {"prompt": prompt} | |
chain = RetrievalQAWithSourcesChain.from_chain_type( | |
llm=llm, | |
chain_type="stuff", | |
retriever=retriever, | |
return_source_documents=True, | |
chain_type_kwargs=chain_type_kwargs, | |
reduce_k_below_max_tokens=True, | |
verbose=True, | |
) | |
query = "ν볡ν μΈμμ΄λ?" | |
result = chain(query) | |
for doc in result['source_documents']: | |
print('λ΄μ© : ' + doc.page_content[0:100].replace('\n', ' ')) | |
print('νμΌ : ' + doc.metadata['source']) | |
print('νμ΄μ§ : ' + str(doc.metadata['page'])) | |
def respond(message, chat_history): # μ±ν λ΄μ μλ΅μ μ²λ¦¬νλ ν¨μλ₯Ό μ μν©λλ€. | |
result = chain(message) | |
bot_message = result['answer'] | |
# for i, doc in enumerate(result['source_documents']): | |
# bot_message += '[' + str(i+1) + '] ' + doc.metadata['source'] + '(' + str(doc.metadata['page']) + ') ' | |
chat_history.append((message, bot_message)) # μ±ν κΈ°λ‘μ μ¬μ©μμ λ©μμ§μ λ΄μ μλ΅μ μΆκ°ν©λλ€. | |
return "", chat_history # μμ λ μ±ν κΈ°λ‘μ λ°νν©λλ€. | |
with gr.Blocks(theme='gstaff/sketch') as demo: # gr.Blocks()λ₯Ό μ¬μ©νμ¬ μΈν°νμ΄μ€λ₯Ό μμ±ν©λλ€. | |
gr.Markdown("# μλ νμΈμ.μ΄λ±νμμ μν μ±GPTμ λλ€. \n λ΅λ³ μμ±μ μ‘°κΈ μκ°μ΄ μμλ μ μμ΅λλ€.") | |
chatbot = gr.Chatbot(label="μ±ν μ°½") # 'μ±ν μ°½'μ΄λΌλ λ μ΄λΈμ κ°μ§ μ±ν λ΄ μ»΄ν¬λνΈλ₯Ό μμ±ν©λλ€. | |
msg = gr.Textbox(label="μ λ ₯") # 'μ λ ₯'μ΄λΌλ λ μ΄λΈμ κ°μ§ ν μ€νΈλ°μ€λ₯Ό μμ±ν©λλ€. | |
clear = gr.Button("μ΄κΈ°ν") # 'μ΄κΈ°ν'λΌλ λ μ΄λΈμ κ°μ§ λ²νΌμ μμ±ν©λλ€. | |
msg.submit(respond, [msg, chatbot], [msg, chatbot]) # ν μ€νΈλ°μ€μ λ©μμ§λ₯Ό μ λ ₯νκ³ μ μΆνλ©΄ respond ν¨μκ° νΈμΆλλλ‘ ν©λλ€. | |
clear.click(lambda: None, None, chatbot, queue=False) # 'μ΄κΈ°ν' λ²νΌμ ν΄λ¦νλ©΄ μ±ν κΈ°λ‘μ μ΄κΈ°νν©λλ€. | |
demo.launch(debug=True) # μΈν°νμ΄μ€λ₯Ό μ€νν©λλ€. μ€ννλ©΄ μ¬μ©μλ 'μ λ ₯' ν μ€νΈλ°μ€μ λ©μμ§λ₯Ό μμ±νκ³ μ μΆν μ μμΌλ©°, 'μ΄κΈ°ν' λ²νΌμ ν΅ν΄ μ±ν κΈ°λ‘μ μ΄κΈ°ν ν μ μμ΅λλ€. | |