Spaces:
Runtime error
Runtime error
Duplicate from hwchase17/langchain-demo
Browse filesCo-authored-by: Harrison Chase <[email protected]>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +93 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: LangChain Demo
|
3 |
+
colorFrom: purple
|
4 |
+
colorTo: gray
|
5 |
+
sdk: gradio
|
6 |
+
sdk_version: 3.15.0
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
license: apache-2.0
|
10 |
+
duplicated_from: hwchase17/langchain-demo
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Optional, Tuple
|
3 |
+
|
4 |
+
import gradio as gr
|
5 |
+
from langchain.chains import ConversationChain
|
6 |
+
from langchain.llms import OpenAI
|
7 |
+
|
8 |
+
|
9 |
+
def load_chain():
|
10 |
+
"""Logic for loading the chain you want to use should go here."""
|
11 |
+
llm = OpenAI(temperature=0)
|
12 |
+
chain = ConversationChain(llm=llm)
|
13 |
+
return chain
|
14 |
+
|
15 |
+
|
16 |
+
def set_openai_api_key(api_key: str):
|
17 |
+
"""Set the api key and return chain.
|
18 |
+
|
19 |
+
If no api_key, then None is returned.
|
20 |
+
"""
|
21 |
+
if api_key:
|
22 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
23 |
+
chain = load_chain()
|
24 |
+
os.environ["OPENAI_API_KEY"] = ""
|
25 |
+
return chain
|
26 |
+
|
27 |
+
|
28 |
+
def chat(
|
29 |
+
inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
|
30 |
+
):
|
31 |
+
"""Execute the chat functionality."""
|
32 |
+
history = history or []
|
33 |
+
# If chain is None, that is because no API key was provided.
|
34 |
+
if chain is None:
|
35 |
+
history.append((inp, "Please paste your OpenAI key to use"))
|
36 |
+
return history, history
|
37 |
+
# Run chain and append input.
|
38 |
+
output = chain.run(input=inp)
|
39 |
+
history.append((inp, output))
|
40 |
+
return history, history
|
41 |
+
|
42 |
+
|
43 |
+
block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
|
44 |
+
|
45 |
+
with block:
|
46 |
+
with gr.Row():
|
47 |
+
gr.Markdown("<h3><center>LangChain Demo</center></h3>")
|
48 |
+
|
49 |
+
openai_api_key_textbox = gr.Textbox(
|
50 |
+
placeholder="Paste your OpenAI API key (sk-...)",
|
51 |
+
show_label=False,
|
52 |
+
lines=1,
|
53 |
+
type="password",
|
54 |
+
)
|
55 |
+
|
56 |
+
chatbot = gr.Chatbot()
|
57 |
+
|
58 |
+
with gr.Row():
|
59 |
+
message = gr.Textbox(
|
60 |
+
label="What's your question?",
|
61 |
+
placeholder="What's the answer to life, the universe, and everything?",
|
62 |
+
lines=1,
|
63 |
+
)
|
64 |
+
submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
|
65 |
+
|
66 |
+
gr.Examples(
|
67 |
+
examples=[
|
68 |
+
"Hi! How's it going?",
|
69 |
+
"What should I do tonight?",
|
70 |
+
"Whats 2 + 2?",
|
71 |
+
],
|
72 |
+
inputs=message,
|
73 |
+
)
|
74 |
+
|
75 |
+
gr.HTML("Demo application of a LangChain chain.")
|
76 |
+
|
77 |
+
gr.HTML(
|
78 |
+
"<center>Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a></center>"
|
79 |
+
)
|
80 |
+
|
81 |
+
state = gr.State()
|
82 |
+
agent_state = gr.State()
|
83 |
+
|
84 |
+
submit.click(chat, inputs=[message, state, agent_state], outputs=[chatbot, state])
|
85 |
+
message.submit(chat, inputs=[message, state, agent_state], outputs=[chatbot, state])
|
86 |
+
|
87 |
+
openai_api_key_textbox.change(
|
88 |
+
set_openai_api_key,
|
89 |
+
inputs=[openai_api_key_textbox],
|
90 |
+
outputs=[agent_state],
|
91 |
+
)
|
92 |
+
|
93 |
+
block.launch(debug=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
gradio
|
3 |
+
langchain
|