import gradio as gr
from transformers import pipeline
from transformers_interpret import SequenceClassificationExplainer
from bs4 import BeautifulSoup
# Setup model
classifier = pipeline("text-classification", model="simonschoe/TransformationTransformer")
explainer = SequenceClassificationExplainer(classifier.model, classifier.tokenizer)
legend = """
Legend: Generic Transformation
"""
def classify(_input):
"""
wrapper method to compute label 1 probability and explanation for given input
"""
result = classifier(_input)[0]
score = result['score']
if result['label'] == 'LABEL_0':
score = 1-score
# getting visualization
attributions = explainer(_input)
html = explainer.visualize().__html__()
soup = BeautifulSoup(html, 'html.parser')
explanation = soup.find_all('td')[-1].__str__().replace('td', 'div')
# adding legend to word importance explanation
result_html = explanation + legend
return score, result_html
app = gr.Blocks()
with app:
gr.Markdown("# Transformation Intensity Classifier")
gr.Markdown("## Detect Transformation Sentences in Quarterly Earnings Conference Calls")
with gr.Row():
with gr.Column():
text_in = gr.Textbox(lines=1, placeholder="Insert text", label="Input Sentence")
with gr.Row():
compute_bt = gr.Button("Classify")
score_out = gr.Number(label="Score", interactive=False)
html_out = gr.HTML(label="Explanation")
with gr.Column():
gr.Markdown(
"""
#### Project Description
Placeholder
"""
)
gr.Markdown(
"""
#### App usage
The model is intented to be used for **sequence classification**: It encodes the input sentence (entered in the textbox on the left) in a dense vector space and runs it through a deep neural network classifier (*Distill-RoBERTa*).
It returns a confidence score that indicates the probability of the sentence containing a discussion on transformation activities. A value of 1 (0) signals a high confidence of the sentence being transformation-related (generic). A score in the range of [0.25; 0.75] implies that the model is rather undecided about the correct label.
In addition, the app returns the tokenized version of the sentence, alongside word importances that are indicated by color codes. Those visuals illustrates the ability of the context-aware classifier to simultaneously pay attention to various parts in the input sentence to derive a final label.
"""
)
gr.Examples(
examples=[
["If we look at the plans for 2018, it is to introduce 650 new products, which is an absolute all- time high."],
["We have been doing kind of an integrated campaign, so it's TV, online, we do the Google Ad Words - all those different elements together."],
["So that turned out to be beneficial for us, and I think, we'll just see how the market and interest rates move over the course of the year,"]
],
inputs=[text_in],
outputs=[score_out, html_out],
fn=classify,
cache_examples=True
)
gr.Markdown(
"""
TIClassifier by X and Y
"""
)
compute_bt.click(classify, inputs=[text_in], outputs=[score_out, html_out])
app.launch()