Spaces:
Sleeping
Sleeping
pranavnair2
commited on
Commit
•
f204295
1
Parent(s):
370f045
Update app.py
Browse filesllm changed to openAI
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
from dotenv import load_dotenv
|
2 |
import streamlit as st
|
3 |
from langchain.chains import RetrievalQA
|
4 |
-
from langchain.llms import HuggingFaceHub
|
|
|
5 |
from langchain.vectorstores import Qdrant
|
6 |
import qdrant_client
|
7 |
import os
|
@@ -29,8 +30,8 @@ def get_vector_store():
|
|
29 |
return vector_store
|
30 |
|
31 |
def get_conversation_chain(vectorstore):
|
32 |
-
|
33 |
-
llm = HuggingFaceHub( repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
|
34 |
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
|
35 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
36 |
llm =llm,
|
|
|
1 |
from dotenv import load_dotenv
|
2 |
import streamlit as st
|
3 |
from langchain.chains import RetrievalQA
|
4 |
+
#from langchain.llms import HuggingFaceHub
|
5 |
+
from langchain.llms import OpenAI
|
6 |
from langchain.vectorstores import Qdrant
|
7 |
import qdrant_client
|
8 |
import os
|
|
|
30 |
return vector_store
|
31 |
|
32 |
def get_conversation_chain(vectorstore):
|
33 |
+
llm = ChatOpenAI()
|
34 |
+
#llm = HuggingFaceHub( repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
|
35 |
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
|
36 |
conversation_chain = ConversationalRetrievalChain.from_llm(
|
37 |
llm =llm,
|