Spaces:
Runtime error
Runtime error
JUNGU
commited on
Commit
β’
d525c48
0
Parent(s):
Duplicate from JUNGU/talktosayno
Browse files- .gitattributes +35 -0
- README.md +14 -0
- app.py +98 -0
- docs.pdf +3 -0
- requirements.txt +6 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
docs.pdf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Talktosayno
|
3 |
+
emoji: π
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.34.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: openrail
|
11 |
+
duplicated_from: JUNGU/talktosayno
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chat_models import ChatOpenAI
|
2 |
+
from langchain.document_loaders import PyPDFLoader
|
3 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
4 |
+
from langchain.embeddings.cohere import CohereEmbeddings
|
5 |
+
from langchain.text_splitter import CharacterTextSplitter
|
6 |
+
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
|
7 |
+
from langchain.vectorstores import Chroma
|
8 |
+
from PyPDF2 import PdfWriter
|
9 |
+
import gradio as gr
|
10 |
+
import os
|
11 |
+
from dotenv import load_dotenv
|
12 |
+
import openai
|
13 |
+
|
14 |
+
load_dotenv()
|
15 |
+
|
16 |
+
os.environ["OPENAI_API_KEY"] = os.environ['my_secret']
|
17 |
+
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613", temperature=0)
|
18 |
+
loader = PyPDFLoader("/home/user/app/docs.pdf")
|
19 |
+
documents = loader.load()
|
20 |
+
|
21 |
+
text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=0)
|
22 |
+
texts = text_splitter.split_documents(documents)
|
23 |
+
|
24 |
+
#vector embedding
|
25 |
+
embeddings = OpenAIEmbeddings()
|
26 |
+
vector_store = Chroma.from_documents(texts, embeddings)
|
27 |
+
retriever = vector_store.as_retriever(search_kwargs={"k": 3})
|
28 |
+
|
29 |
+
|
30 |
+
from langchain.prompts.chat import (
|
31 |
+
ChatPromptTemplate,
|
32 |
+
SystemMessagePromptTemplate,
|
33 |
+
HumanMessagePromptTemplate,
|
34 |
+
)
|
35 |
+
|
36 |
+
system_template="""Your name is Sayno. You act like a successful rich man.
|
37 |
+
Never reveal that it is an AI. Use the following pieces of context to answer the users question shortly.
|
38 |
+
Given the following summaries of a long document and a question, create a final answer with references.
|
39 |
+
If you don't know the answer, just say that "I don't know", don't try to make up an answer.
|
40 |
+
----------------
|
41 |
+
{summaries}
|
42 |
+
|
43 |
+
You MUST answer in Korean and in Markdown format:"""
|
44 |
+
|
45 |
+
messages = [
|
46 |
+
SystemMessagePromptTemplate.from_template(system_template),
|
47 |
+
HumanMessagePromptTemplate.from_template("{question}")
|
48 |
+
]
|
49 |
+
|
50 |
+
prompt = ChatPromptTemplate.from_messages(messages)
|
51 |
+
|
52 |
+
from langchain.chat_models import ChatOpenAI
|
53 |
+
from langchain.chains import RetrievalQAWithSourcesChain
|
54 |
+
|
55 |
+
chain_type_kwargs = {"prompt": prompt}
|
56 |
+
|
57 |
+
chain = RetrievalQAWithSourcesChain.from_chain_type(
|
58 |
+
llm=llm,
|
59 |
+
chain_type="stuff",
|
60 |
+
retriever=retriever,
|
61 |
+
return_source_documents=True,
|
62 |
+
chain_type_kwargs=chain_type_kwargs,
|
63 |
+
reduce_k_below_max_tokens=True,
|
64 |
+
verbose=True,
|
65 |
+
)
|
66 |
+
|
67 |
+
query = "ν볡ν μΈμμ΄λ?"
|
68 |
+
result = chain(query)
|
69 |
+
|
70 |
+
|
71 |
+
for doc in result['source_documents']:
|
72 |
+
print('λ΄μ© : ' + doc.page_content[0:100].replace('\n', ' '))
|
73 |
+
print('νμΌ : ' + doc.metadata['source'])
|
74 |
+
print('νμ΄μ§ : ' + str(doc.metadata['page']))
|
75 |
+
|
76 |
+
|
77 |
+
def respond(message, chat_history): # μ±ν
λ΄μ μλ΅μ μ²λ¦¬νλ ν¨μλ₯Ό μ μν©λλ€.
|
78 |
+
|
79 |
+
result = chain(message)
|
80 |
+
|
81 |
+
bot_message = result['answer']
|
82 |
+
|
83 |
+
for i, doc in enumerate(result['source_documents']):
|
84 |
+
bot_message += '[' + str(i+1) + '] ' + doc.metadata['source'] + '(' + str(doc.metadata['page']) + ') '
|
85 |
+
|
86 |
+
chat_history.append((message, bot_message)) # μ±ν
κΈ°λ‘μ μ¬μ©μμ λ©μμ§μ λ΄μ μλ΅μ μΆκ°ν©λλ€.
|
87 |
+
|
88 |
+
return "", chat_history # μμ λ μ±ν
κΈ°λ‘μ λ°νν©λλ€.
|
89 |
+
|
90 |
+
with gr.Blocks(theme='gstaff/sketch') as demo: # gr.Blocks()λ₯Ό μ¬μ©νμ¬ μΈν°νμ΄μ€λ₯Ό μμ±ν©λλ€.
|
91 |
+
gr.Markdown("# μλ
νμΈμ. μΈμ΄λ
Έμ λνν΄λ³΄μΈμ. \n λ΅λ³ μμ±μ μ‘°κΈ μκ°μ΄ μμλ μ μμ΅λλ€.")
|
92 |
+
chatbot = gr.Chatbot(label="μ±ν
μ°½") # 'μ±ν
μ°½'μ΄λΌλ λ μ΄λΈμ κ°μ§ μ±ν
λ΄ μ»΄ν¬λνΈλ₯Ό μμ±ν©λλ€.
|
93 |
+
msg = gr.Textbox(label="μ
λ ₯") # 'μ
λ ₯'μ΄λΌλ λ μ΄λΈμ κ°μ§ ν
μ€νΈλ°μ€λ₯Ό μμ±ν©λλ€.
|
94 |
+
clear = gr.Button("μ΄κΈ°ν") # 'μ΄κΈ°ν'λΌλ λ μ΄λΈμ κ°μ§ λ²νΌμ μμ±ν©λλ€.
|
95 |
+
|
96 |
+
msg.submit(respond, [msg, chatbot], [msg, chatbot]) # ν
μ€νΈλ°μ€μ λ©μμ§λ₯Ό μ
λ ₯νκ³ μ μΆνλ©΄ respond ν¨μκ° νΈμΆλλλ‘ ν©λλ€.
|
97 |
+
clear.click(lambda: None, None, chatbot, queue=False) # 'μ΄κΈ°ν' λ²νΌμ ν΄λ¦νλ©΄ μ±ν
κΈ°λ‘μ μ΄κΈ°νν©λλ€.
|
98 |
+
demo.launch(debug=True) # μΈν°νμ΄μ€λ₯Ό μ€νν©λλ€. μ€ννλ©΄ μ¬μ©μλ 'μ
λ ₯' ν
μ€νΈλ°μ€μ λ©μμ§λ₯Ό μμ±νκ³ μ μΆν μ μμΌλ©°, 'μ΄κΈ°ν' λ²νΌμ ν΅ν΄ μ±ν
κΈ°λ‘μ μ΄κΈ°ν ν μ μμ΅λλ€.
|
docs.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dab840d01bd8582e930da5ccb74c032279e832ed02f7f938953e7f77730d1ad2
|
3 |
+
size 4232031
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
langchain
|
3 |
+
pypdf
|
4 |
+
chromadb
|
5 |
+
tiktoken
|
6 |
+
PyPDF2
|