Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from langchain.chains import GraphQAChain
|
|
5 |
from langchain_community.document_loaders import TextLoader
|
6 |
from langchain.text_splitter import CharacterTextSplitter
|
7 |
from langchain_community.vectorstores import Pinecone
|
8 |
-
from
|
9 |
from langchain.schema.runnable import RunnablePassthrough
|
10 |
from langchain.schema.output_parser import StrOutputParser
|
11 |
from langchain.prompts import PromptTemplate
|
@@ -20,6 +20,7 @@ from oauth2client.service_account import ServiceAccountCredentials
|
|
20 |
|
21 |
os.system("pip install sentence-transformers")
|
22 |
os.system("pip install gspread oauth2client")
|
|
|
23 |
|
24 |
# Google Sheets Setup
|
25 |
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
|
@@ -42,7 +43,7 @@ def add_review(question, rag_response, graphrag_response, feedback):
|
|
42 |
store_feedback_in_sheet(feedback, question, rag_response, graphrag_response)
|
43 |
return load_data()
|
44 |
|
45 |
-
# RAG
|
46 |
text_path = r"./text_chunks.txt"
|
47 |
loader = TextLoader(text_path, encoding='utf-8')
|
48 |
documents = loader.load()
|
@@ -51,8 +52,6 @@ docs = text_splitter.split_documents(documents)
|
|
51 |
|
52 |
embeddings = HuggingFaceEmbeddings()
|
53 |
|
54 |
-
from langchain.llms import HuggingFaceHub
|
55 |
-
|
56 |
# Define the repo ID and connect to Mixtral model on Huggingface
|
57 |
repo_id = "meta-llama/Meta-Llama-3-8B"
|
58 |
llm = HuggingFaceHub(
|
@@ -132,8 +131,8 @@ def fetch_relationships(tx):
|
|
132 |
def populate_networkx_graph():
|
133 |
G = nx.Graph()
|
134 |
with driver.session() as session:
|
135 |
-
nodes = session.
|
136 |
-
relationships = session.
|
137 |
for node in nodes:
|
138 |
G.add_node(node['id'], labels=node['labels'])
|
139 |
for relationship in relationships:
|
@@ -178,13 +177,18 @@ with gr.Blocks() as demo:
|
|
178 |
rag_output = gr.Textbox(label="Model A", interactive=False)
|
179 |
graphrag_output = gr.Textbox(label="Model B", interactive=False)
|
180 |
feedback = gr.Radio(label="Which response is better?", choices=["A ดีกว่า", "B ดีกว่า", "เท่ากัน", "แย่ทั้งคู่"])
|
|
|
181 |
submit_feedback = gr.Button(value="Submit Feedback")
|
182 |
|
|
|
|
|
|
|
|
|
183 |
# Function to handle question submission and display responses
|
184 |
-
submit_btn.click(fn=compare_models, inputs=[question_input]
|
185 |
|
186 |
# Function to handle feedback submission and update data display
|
187 |
-
submit_feedback.click(fn=add_review, inputs=[question_input, rag_output, graphrag_output, feedback]
|
188 |
|
189 |
# Load initial data display
|
190 |
demo.load(fn=load_data, inputs=None, outputs=[data, count])
|
|
|
5 |
from langchain_community.document_loaders import TextLoader
|
6 |
from langchain.text_splitter import CharacterTextSplitter
|
7 |
from langchain_community.vectorstores import Pinecone
|
8 |
+
from langchain_huggingface import HuggingFaceEmbeddings, HuggingFaceHub # Updated imports
|
9 |
from langchain.schema.runnable import RunnablePassthrough
|
10 |
from langchain.schema.output_parser import StrOutputParser
|
11 |
from langchain.prompts import PromptTemplate
|
|
|
20 |
|
21 |
os.system("pip install sentence-transformers")
|
22 |
os.system("pip install gspread oauth2client")
|
23 |
+
os.system("pip install -U langchain-huggingface langchain-community")
|
24 |
|
25 |
# Google Sheets Setup
|
26 |
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
|
|
|
43 |
store_feedback_in_sheet(feedback, question, rag_response, graphrag_response)
|
44 |
return load_data()
|
45 |
|
46 |
+
# RAG Setup
|
47 |
text_path = r"./text_chunks.txt"
|
48 |
loader = TextLoader(text_path, encoding='utf-8')
|
49 |
documents = loader.load()
|
|
|
52 |
|
53 |
embeddings = HuggingFaceEmbeddings()
|
54 |
|
|
|
|
|
55 |
# Define the repo ID and connect to Mixtral model on Huggingface
|
56 |
repo_id = "meta-llama/Meta-Llama-3-8B"
|
57 |
llm = HuggingFaceHub(
|
|
|
131 |
def populate_networkx_graph():
|
132 |
G = nx.Graph()
|
133 |
with driver.session() as session:
|
134 |
+
nodes = session.execute_read(fetch_nodes) # Updated to use execute_read
|
135 |
+
relationships = session.execute_read(fetch_relationships) # Updated to use execute_read
|
136 |
for node in nodes:
|
137 |
G.add_node(node['id'], labels=node['labels'])
|
138 |
for relationship in relationships:
|
|
|
177 |
rag_output = gr.Textbox(label="Model A", interactive=False)
|
178 |
graphrag_output = gr.Textbox(label="Model B", interactive=False)
|
179 |
feedback = gr.Radio(label="Which response is better?", choices=["A ดีกว่า", "B ดีกว่า", "เท่ากัน", "แย่ทั้งคู่"])
|
180 |
+
comments = gr.Textbox(label="Comments", lines=10, placeholder="Any additional comments?")
|
181 |
submit_feedback = gr.Button(value="Submit Feedback")
|
182 |
|
183 |
+
with gr.Column():
|
184 |
+
data = gr.Dataframe(label="Most recently created 10 rows")
|
185 |
+
count = gr.Number(label="Total number of reviews")
|
186 |
+
|
187 |
# Function to handle question submission and display responses
|
188 |
+
submit_btn.click(fn=compare_models, inputs=[question_input])
|
189 |
|
190 |
# Function to handle feedback submission and update data display
|
191 |
+
submit_feedback.click(fn=add_review, inputs=[question_input, rag_output, graphrag_output, feedback])
|
192 |
|
193 |
# Load initial data display
|
194 |
demo.load(fn=load_data, inputs=None, outputs=[data, count])
|