Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import markdown
|
|
5 |
import matplotlib.pyplot as plt
|
6 |
import io
|
7 |
import base64
|
|
|
8 |
|
9 |
# Preload models
|
10 |
models = {
|
@@ -35,9 +36,53 @@ def generate_score_chart(score):
|
|
35 |
buf.seek(0)
|
36 |
return base64.b64encode(buf.getvalue()).decode()
|
37 |
|
38 |
-
def
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
def answer_question(model_name, file, question, status):
|
43 |
status = "Loading model..."
|
@@ -61,6 +106,11 @@ def answer_question(model_name, file, question, status):
|
|
61 |
result = model(question=question, context=context)
|
62 |
answer = result['answer']
|
63 |
score = result['score']
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
# Generate the score chart
|
66 |
score_chart = generate_score_chart(score)
|
@@ -68,11 +118,11 @@ def answer_question(model_name, file, question, status):
|
|
68 |
# Explain score
|
69 |
score_explanation = f"The confidence score ranges from 0 to 1, where a higher score indicates higher confidence in the answer's correctness. In this case, the score is {score:.2f}. A score closer to 1 implies the model is very confident about the answer."
|
70 |
|
71 |
-
# Generate the report
|
72 |
-
|
73 |
|
74 |
status = "Model loaded"
|
75 |
-
return
|
76 |
|
77 |
# Define the Gradio interface
|
78 |
with gr.Blocks() as interface:
|
@@ -96,11 +146,11 @@ with gr.Blocks() as interface:
|
|
96 |
question_input = gr.Textbox(lines=2, placeholder="Enter your question here...", label="Question")
|
97 |
|
98 |
with gr.Row():
|
99 |
-
answer_output = gr.
|
100 |
score_output = gr.Textbox(label="Confidence Score")
|
101 |
explanation_output = gr.Textbox(label="Score Explanation")
|
102 |
chart_output = gr.Image(label="Score Chart")
|
103 |
-
|
104 |
|
105 |
with gr.Row():
|
106 |
submit_button = gr.Button("Submit")
|
@@ -113,7 +163,7 @@ with gr.Blocks() as interface:
|
|
113 |
submit_button.click(
|
114 |
on_submit,
|
115 |
inputs=[model_dropdown, file_input, question_input],
|
116 |
-
outputs=[answer_output, score_output, explanation_output, chart_output,
|
117 |
)
|
118 |
|
119 |
if __name__ == "__main__":
|
|
|
5 |
import matplotlib.pyplot as plt
|
6 |
import io
|
7 |
import base64
|
8 |
+
from fpdf import FPDF
|
9 |
|
10 |
# Preload models
|
11 |
models = {
|
|
|
36 |
buf.seek(0)
|
37 |
return base64.b64encode(buf.getvalue()).decode()
|
38 |
|
39 |
+
def highlight_relevant_text(context, start, end):
|
40 |
+
highlighted_text = (
|
41 |
+
context[:start] +
|
42 |
+
'<mark style="background-color: yellow;">' +
|
43 |
+
context[start:end] +
|
44 |
+
'</mark>' +
|
45 |
+
context[end:]
|
46 |
+
)
|
47 |
+
return highlighted_text
|
48 |
+
|
49 |
+
def generate_pdf_report(question, answer, score, score_explanation, score_chart, highlighted_context):
|
50 |
+
pdf = FPDF()
|
51 |
+
pdf.add_page()
|
52 |
+
|
53 |
+
pdf.set_font("Arial", size=12)
|
54 |
+
pdf.multi_cell(0, 10, f"Question: {question}")
|
55 |
+
pdf.ln()
|
56 |
+
|
57 |
+
pdf.set_font("Arial", size=12)
|
58 |
+
pdf.multi_cell(0, 10, f"Answer: {answer}")
|
59 |
+
pdf.ln()
|
60 |
+
|
61 |
+
pdf.set_font("Arial", size=12)
|
62 |
+
pdf.multi_cell(0, 10, f"Confidence Score: {score}")
|
63 |
+
pdf.ln()
|
64 |
+
|
65 |
+
pdf.set_font("Arial", size=12)
|
66 |
+
pdf.multi_cell(0, 10, f"Score Explanation: {score_explanation}")
|
67 |
+
pdf.ln()
|
68 |
+
|
69 |
+
pdf.set_font("Arial", size=12)
|
70 |
+
pdf.multi_cell(0, 10, "Highlighted Context:")
|
71 |
+
pdf.ln()
|
72 |
+
pdf.set_font("Arial", size=10)
|
73 |
+
pdf.multi_cell(0, 10, highlighted_context)
|
74 |
+
pdf.ln()
|
75 |
+
|
76 |
+
# Add score chart image to PDF
|
77 |
+
score_chart_image = io.BytesIO(base64.b64decode(score_chart))
|
78 |
+
pdf.image(score_chart_image, x=10, y=pdf.get_y(), w=100)
|
79 |
+
|
80 |
+
# Save PDF to memory
|
81 |
+
pdf_output = io.BytesIO()
|
82 |
+
pdf.output(pdf_output)
|
83 |
+
pdf_output.seek(0)
|
84 |
+
|
85 |
+
return pdf_output
|
86 |
|
87 |
def answer_question(model_name, file, question, status):
|
88 |
status = "Loading model..."
|
|
|
106 |
result = model(question=question, context=context)
|
107 |
answer = result['answer']
|
108 |
score = result['score']
|
109 |
+
start = result['start']
|
110 |
+
end = result['end']
|
111 |
+
|
112 |
+
# Highlight relevant text
|
113 |
+
highlighted_context = highlight_relevant_text(context, start, end)
|
114 |
|
115 |
# Generate the score chart
|
116 |
score_chart = generate_score_chart(score)
|
|
|
118 |
# Explain score
|
119 |
score_explanation = f"The confidence score ranges from 0 to 1, where a higher score indicates higher confidence in the answer's correctness. In this case, the score is {score:.2f}. A score closer to 1 implies the model is very confident about the answer."
|
120 |
|
121 |
+
# Generate the PDF report
|
122 |
+
pdf_report = generate_pdf_report(question, answer, f"{score:.2f}", score_explanation, score_chart, highlighted_context)
|
123 |
|
124 |
status = "Model loaded"
|
125 |
+
return highlighted_context, f"{score:.2f}", score_explanation, score_chart, pdf_report, status
|
126 |
|
127 |
# Define the Gradio interface
|
128 |
with gr.Blocks() as interface:
|
|
|
146 |
question_input = gr.Textbox(lines=2, placeholder="Enter your question here...", label="Question")
|
147 |
|
148 |
with gr.Row():
|
149 |
+
answer_output = gr.HTML(label="Highlighted Answer")
|
150 |
score_output = gr.Textbox(label="Confidence Score")
|
151 |
explanation_output = gr.Textbox(label="Score Explanation")
|
152 |
chart_output = gr.Image(label="Score Chart")
|
153 |
+
pdf_output = gr.File(label="Download PDF Report")
|
154 |
|
155 |
with gr.Row():
|
156 |
submit_button = gr.Button("Submit")
|
|
|
163 |
submit_button.click(
|
164 |
on_submit,
|
165 |
inputs=[model_dropdown, file_input, question_input],
|
166 |
+
outputs=[answer_output, score_output, explanation_output, chart_output, pdf_output, status_output]
|
167 |
)
|
168 |
|
169 |
if __name__ == "__main__":
|