Leonardo Parente commited on
Commit
3b7279c
β€’
1 Parent(s): c774d4b

more requirements

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +44 -4
  3. requirements.txt +8 -2
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .streamlit/
app.py CHANGED
@@ -1,17 +1,57 @@
1
  import streamlit as st
 
2
  from langchain.memory import ConversationBufferMemory
3
  from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
 
 
 
 
4
  from langchain.llms.huggingface_pipeline import HuggingFacePipeline
 
5
 
6
  msgs = StreamlitChatMessageHistory()
7
  memory = ConversationBufferMemory(memory_key="history", chat_memory=msgs)
8
 
9
- hf = HuggingFacePipeline.from_model_id(
10
- model_id="gpt2",
11
- task="text-generation",
12
- pipeline_kwargs={"max_new_tokens": 10},
13
  )
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  st.title("πŸͺ©πŸ€–")
16
 
17
  if len(msgs.messages) == 0:
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
5
+ from langchain.chains import LLMChain
6
+ from langchain.prompts import PromptTemplate
7
+ from langchain.embeddings import VoyageEmbeddings
8
+ from langchain.vectorstores import SupabaseVectorStore
9
  from langchain.llms.huggingface_pipeline import HuggingFacePipeline
10
+ from st_supabase_connection import SupabaseConnection
11
 
12
  msgs = StreamlitChatMessageHistory()
13
  memory = ConversationBufferMemory(memory_key="history", chat_memory=msgs)
14
 
15
+ supabase_client = st.connection(
16
+ name="orbgpt",
17
+ type=SupabaseConnection,
18
+ ttl=None,
19
  )
20
 
21
+ embeddings = VoyageEmbeddings(model="voyage-01")
22
+ vector_store = SupabaseVectorStore(
23
+ embedding=embeddings,
24
+ client=supabase_client,
25
+ table_name="documents",
26
+ query_name="match_documents",
27
+ )
28
+
29
+
30
+ model_path = "01-ai/Yi-6B-Chat-8bits"
31
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
32
+ model = AutoModelForCausalLM.from_pretrained(
33
+ model_path, device_map="auto", torch_dtype="auto"
34
+ ).eval()
35
+ pipe = pipeline(
36
+ "text-generation",
37
+ model=model,
38
+ tokenizer=tokenizer,
39
+ max_new_tokens=10,
40
+ use_fast=False,
41
+ )
42
+ hf = HuggingFacePipeline(pipeline=pipe)
43
+
44
+ template = """Question: {question}
45
+
46
+ Answer: Let's think step by step."""
47
+ prompt = PromptTemplate.from_template(template)
48
+
49
+ chain = prompt | hf
50
+
51
+ question = "What is electroencephalography?"
52
+
53
+ st.text(chain.invoke({"question": question}))
54
+
55
  st.title("πŸͺ©πŸ€–")
56
 
57
  if len(msgs.messages) == 0:
requirements.txt CHANGED
@@ -1,4 +1,10 @@
1
  streamlit
2
- torch
3
  transformers
4
- langchain
 
 
 
 
 
 
 
 
1
  streamlit
 
2
  transformers
3
+ torch
4
+ sentencepiece
5
+ accelerate
6
+ auto-gptq
7
+ optimum
8
+ langchain
9
+ supabase
10
+ st-supabase-connection