Du Mingzhe commited on
Commit
450f5f1
1 Parent(s): 178adb8
Files changed (2) hide show
  1. app.py +5 -23
  2. components.py +18 -17
app.py CHANGED
@@ -1,34 +1,16 @@
1
  from pinecone import Pinecone
2
- from datetime import datetime
3
  from openai import OpenAI
4
  import streamlit as st
5
  import uuid
6
 
7
- from components import PersonalIndex
8
 
9
  st.title("Talk with Mingzhe 🔮")
10
 
11
- llm_client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
12
- pinecone_client = PersonalIndex(index_token=st.secrets["PINECONE_API_KEY"], embedding_token=st.secrets["OPENAI_API_KEY"], index_name='mingzhe')
13
-
14
 
15
- def response_generate(prompt, history):
16
- messages = list()
17
- current_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
18
-
19
- # System Prompt
20
- messages += [{"role": "system", "content": f"1) You're Du Mingzhe, a computer science researcher. 2) Don't claim you are created by OpenAI. 3) Current time is {current_time}."}]
21
-
22
- # Session History
23
- messages += [{"role": h["role"], "content": h["content"]} for h in history]
24
-
25
-
26
- stream = llm_client.chat.completions.create(
27
- model = "gpt-4-1106-preview",
28
- messages = messages,
29
- stream=True,
30
- )
31
- return stream
32
 
33
  if "messages" not in st.session_state:
34
  st.session_state.messages = []
@@ -44,7 +26,7 @@ if prompt := st.chat_input("What's up?"):
44
  st.markdown(prompt)
45
 
46
  with st.chat_message("assistant"):
47
- stream = response_generate(prompt, st.session_state.messages)
48
  response = st.write_stream(stream)
49
  st.session_state.messages.append({"role": "assistant", "content": response})
50
 
 
1
  from pinecone import Pinecone
2
+
3
  from openai import OpenAI
4
  import streamlit as st
5
  import uuid
6
 
7
+ from components import PersonalIndexClient, LLMClient
8
 
9
  st.title("Talk with Mingzhe 🔮")
10
 
11
+ llm_client = LLMClient(api_key=st.secrets["OPENAI_API_KEY"], model_name="gpt-4-1106-preview")
12
+ pinecone_client = PersonalIndexClient(index_token=st.secrets["PINECONE_API_KEY"], embedding_token=st.secrets["OPENAI_API_KEY"], embedding_model_name='text-embedding-3-large', index_name='mingzhe')
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  if "messages" not in st.session_state:
16
  st.session_state.messages = []
 
26
  st.markdown(prompt)
27
 
28
  with st.chat_message("assistant"):
29
+ stream = llm_client.response_generate(prompt, st.session_state.messages)
30
  response = st.write_stream(stream)
31
  st.session_state.messages.append({"role": "assistant", "content": response})
32
 
components.py CHANGED
@@ -3,28 +3,31 @@
3
 
4
  from openai import OpenAI
5
  from pinecone import Pinecone
 
6
 
7
  class LLMClient():
8
- def __init__(self, api_key, model_name="gpt-3.5-turbo") -> None:
9
  super().__init__()
10
  self.model_name = model_name
11
  self.llm_client = OpenAI(api_key=api_key)
12
 
13
- def prompt_list_generate(self, query, history, web_results, personal_results):
14
- prompt_list = []
15
- prompt_list += [
16
- {"role": m["role"], "content": m["content"]} for m in history
17
- ],
18
- return prompt_list
19
 
20
- def generate(self, query, history=None, web_results=None, personal_results=None):
21
- prompt_list = self.prompt_list_generate(query, history, web_results, personal_results)
22
 
23
- response = self.llm_client.chat.completions.create(
 
 
 
 
24
  model = self.model_name,
25
- messages = prompt_list
 
26
  )
27
- return response.choices[0].message.content
28
 
29
  class EmbeddingModel(object):
30
  def __init__(self, embedding_token, model_name) -> None:
@@ -39,13 +42,13 @@ class EmbeddingModel(object):
39
  )
40
  return response.data[0].embedding
41
 
42
- class PersonalIndex(object):
43
- def __init__(self, index_token, embedding_token, index_name) -> None:
44
  self.index_token = index_token
45
  self.embedding_token = embedding_token
46
  self.index_name = index_name
47
 
48
- self.embedding_client = EmbeddingModel(embedding_token=self.embedding_token, model_name='text-embedding-3-large')
49
  self.index_client = Pinecone(api_key=self.index_token)
50
  self.index = self.index_client.Index(self.index_name)
51
 
@@ -59,8 +62,6 @@ class PersonalIndex(object):
59
  "metadata": instance['metadata'],
60
  }]
61
 
62
- print(instances)
63
-
64
  self.index.upsert(
65
  vectors = instances,
66
  namespace = namespace
 
3
 
4
  from openai import OpenAI
5
  from pinecone import Pinecone
6
+ from datetime import datetime
7
 
8
  class LLMClient():
9
+ def __init__(self, api_key, model_name) -> None:
10
  super().__init__()
11
  self.model_name = model_name
12
  self.llm_client = OpenAI(api_key=api_key)
13
 
14
+ def response_generate(self, prompt, history):
15
+ messages = list()
16
+ current_time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
 
 
 
17
 
18
+ # System Prompt
19
+ messages += [{"role": "system", "content": f"1) You're Du Mingzhe, a computer science researcher. 2) Don't claim you are created by OpenAI. 3) Current time is {current_time}."}]
20
 
21
+ # Session History
22
+ messages += [{"role": h["role"], "content": h["content"]} for h in history]
23
+
24
+
25
+ stream = self.llm_client.chat.completions.create(
26
  model = self.model_name,
27
+ messages = messages,
28
+ stream=True,
29
  )
30
+ return stream
31
 
32
  class EmbeddingModel(object):
33
  def __init__(self, embedding_token, model_name) -> None:
 
42
  )
43
  return response.data[0].embedding
44
 
45
+ class PersonalIndexClient(object):
46
+ def __init__(self, index_token, embedding_token, embedding_model_name, index_name) -> None:
47
  self.index_token = index_token
48
  self.embedding_token = embedding_token
49
  self.index_name = index_name
50
 
51
+ self.embedding_client = EmbeddingModel(embedding_token=self.embedding_token, model_name=embedding_model_name)
52
  self.index_client = Pinecone(api_key=self.index_token)
53
  self.index = self.index_client.Index(self.index_name)
54
 
 
62
  "metadata": instance['metadata'],
63
  }]
64
 
 
 
65
  self.index.upsert(
66
  vectors = instances,
67
  namespace = namespace