thoristhor commited on
Commit
268a2a1
1 Parent(s): ee83059

Update app.py

Browse files

this is a test for embeddings tab

Files changed (1) hide show
  1. app.py +73 -7
app.py CHANGED
@@ -6,6 +6,13 @@ from langchain.chains import ConversationChain
6
  from langchain.llms import OpenAI
7
  from threading import Lock
8
 
 
 
 
 
 
 
 
9
 
10
  def load_chain():
11
  """Logic for loading the chain you want to use should go here."""
@@ -14,22 +21,36 @@ def load_chain():
14
  return chain
15
 
16
 
17
- def set_openai_api_key(api_key: str):
18
  """Set the api key and return chain.
19
  If no api_key, then None is returned.
20
  """
21
- if api_key:
22
  os.environ["OPENAI_API_KEY"] = api_key
23
- chain = load_chain()
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  os.environ["OPENAI_API_KEY"] = ""
25
- return chain
 
26
 
27
  class ChatWrapper:
28
 
29
  def __init__(self):
30
  self.lock = Lock()
31
  def __call__(
32
- self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
33
  ):
34
  """Execute the chat functionality."""
35
  self.lock.acquire()
@@ -42,6 +63,20 @@ class ChatWrapper:
42
  # Set OpenAI key
43
  import openai
44
  openai.api_key = api_key
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # Run chain and append input.
46
  output = chain.run(input=inp)
47
  history.append((inp, output))
@@ -51,11 +86,29 @@ class ChatWrapper:
51
  self.lock.release()
52
  return history, history
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  chat = ChatWrapper()
55
 
56
  block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
57
 
58
- with block:
 
59
  with gr.Row():
60
  gr.Markdown("<h3><center>LangChain Demo</center></h3>")
61
 
@@ -85,7 +138,20 @@ with block:
85
  inputs=message,
86
  )
87
 
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  state = gr.State()
91
  agent_state = gr.State()
 
6
  from langchain.llms import OpenAI
7
  from threading import Lock
8
 
9
+ from langchain.embeddings.openai import OpenAIEmbeddings
10
+ from langchain.text_splitter import CharacterTextSplitter
11
+ from langchain.vectorstores.faiss import FAISS
12
+ from langchain.docstore.document import Document
13
+ from langchain.chains.question_answering import load_qa_chain
14
+
15
+
16
 
17
  def load_chain():
18
  """Logic for loading the chain you want to use should go here."""
 
21
  return chain
22
 
23
 
24
+ def set_openai_api_key(api_key):
25
  """Set the api key and return chain.
26
  If no api_key, then None is returned.
27
  """
28
+ if api_key and api_key.startswith("sk-") and len(api_key) > 50:
29
  os.environ["OPENAI_API_KEY"] = api_key
30
+ print("\n\n ++++++++++++++ Setting OpenAI API key ++++++++++++++ \n\n")
31
+ print(str(datetime.datetime.now()) + ": Before OpenAI, OPENAI_API_KEY length: " + str(
32
+ len(os.environ["OPENAI_API_KEY"])))
33
+ llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
34
+ print(str(datetime.datetime.now()) + ": After OpenAI, OPENAI_API_KEY length: " + str(
35
+ len(os.environ["OPENAI_API_KEY"])))
36
+ chain, express_chain, memory = load_chain(TOOLS_DEFAULT_LIST, llm)
37
+
38
+ # Pertains to question answering functionality
39
+ embeddings = OpenAIEmbeddings()
40
+ qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
41
+
42
+ print(str(datetime.datetime.now()) + ": After load_chain, OPENAI_API_KEY length: " + str(
43
+ len(os.environ["OPENAI_API_KEY"])))
44
  os.environ["OPENAI_API_KEY"] = ""
45
+ return chain, express_chain, llm, embeddings, qa_chain, memory
46
+ return None, None, None, None, None, None
47
 
48
  class ChatWrapper:
49
 
50
  def __init__(self):
51
  self.lock = Lock()
52
  def __call__(
53
+ self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain], use_embeddings, monologue:bool
54
  ):
55
  """Execute the chat functionality."""
56
  self.lock.acquire()
 
63
  # Set OpenAI key
64
  import openai
65
  openai.api_key = api_key
66
+ if not monologue:
67
+ if use_embeddings:
68
+ if inp and inp.strip() != "":
69
+ if docsearch:
70
+ docs = docsearch.similarity_search(inp)
71
+ output = str(qa_chain.run(input_documents=docs, question=inp))
72
+ else:
73
+ output, hidden_text = "Please supply some text in the the Embeddings tab.", None
74
+ else:
75
+ output, hidden_text = "What's on your mind?", None
76
+ else:
77
+ output, hidden_text = run_chain(chain, inp, capture_hidden_text=trace_chain)
78
+ else:
79
+ output, hidden_text = inp, None
80
  # Run chain and append input.
81
  output = chain.run(input=inp)
82
  history.append((inp, output))
 
86
  self.lock.release()
87
  return history, history
88
 
89
+ # Pertains to question answering functionality
90
+ def update_embeddings(embeddings_text, embeddings, qa_chain):
91
+ if embeddings_text:
92
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
93
+ texts = text_splitter.split_text(embeddings_text)
94
+
95
+ docsearch = FAISS.from_texts(texts, embeddings)
96
+ print("Embeddings updated")
97
+ return docsearch
98
+
99
+
100
+ # Pertains to question answering functionality
101
+ def update_use_embeddings(widget, state):
102
+ if widget:
103
+ state = widget
104
+ return state
105
+
106
  chat = ChatWrapper()
107
 
108
  block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
109
 
110
+
111
+ with gt.Tab("Chat"):
112
  with gr.Row():
113
  gr.Markdown("<h3><center>LangChain Demo</center></h3>")
114
 
 
138
  inputs=message,
139
  )
140
 
141
+
142
+ with gr.Tab("Embeddings"):
143
+ embeddings_text_box = gr.Textbox(label="Enter text for embeddings and hit Create:",
144
+ lines=20)
145
+
146
+ with gr.Row():
147
+ use_embeddings_cb = gr.Checkbox(label="Use embeddings", value=False)
148
+ use_embeddings_cb.change(update_use_embeddings, inputs=[use_embeddings_cb, use_embeddings_state],
149
+ outputs=[use_embeddings_state])
150
+
151
+ embeddings_text_submit = gr.Button(value="Create", variant="secondary").style(full_width=False)
152
+ embeddings_text_submit.click(update_embeddings,
153
+ inputs=[embeddings_text_box, embeddings_state, qa_chain_state],
154
+ outputs=[docsearch_state])
155
 
156
  state = gr.State()
157
  agent_state = gr.State()