Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ from evaluate.utils import launch_gradio_widget
|
|
6 |
import gradio as gr
|
7 |
import torch
|
8 |
import classify
|
|
|
9 |
from whisper.model import Whisper
|
10 |
from whisper.tokenizer import get_tokenizer
|
11 |
from speechbrain.pretrained.interfaces import foreign_class
|
@@ -48,7 +49,7 @@ def classify_emotion(audio):
|
|
48 |
return emo_dict[text_lab[0]], emostr
|
49 |
|
50 |
# Create a Gradio interface with audio file and text inputs
|
51 |
-
def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class):
|
52 |
# Transcribe the audio file using Whisper ASR
|
53 |
if audio_file != None:
|
54 |
transcribed_text = pipe(audio_file)["text"]
|
@@ -56,6 +57,10 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class):
|
|
56 |
else:
|
57 |
transcribed_text = text_input
|
58 |
if classify_anxiety != "misophonia":
|
|
|
|
|
|
|
|
|
59 |
#### Toxicity Classifier ####
|
60 |
|
61 |
toxicity_module = evaluate.load("toxicity", "facebook/roberta-hate-speech-dynabench-r4-target")
|
@@ -68,6 +73,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class):
|
|
68 |
# emo call
|
69 |
if emo_class != None:
|
70 |
classify_emotion(audio_file)
|
|
|
71 |
#### Text classification #####
|
72 |
|
73 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
@@ -121,13 +127,14 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class):
|
|
121 |
with gr.Blocks() as iface:
|
122 |
with gr.Column():
|
123 |
anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
|
124 |
-
|
|
|
125 |
with gr.Column():
|
126 |
aud_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File")
|
127 |
text = gr.Textbox(label="Enter Text", placeholder="Enter text here...")
|
128 |
submit_btn = gr.Button(label="Run")
|
129 |
with gr.Column():
|
130 |
out_text = gr.Textbox()
|
131 |
-
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class], outputs=out_text)
|
132 |
|
133 |
iface.launch()
|
|
|
6 |
import gradio as gr
|
7 |
import torch
|
8 |
import classify
|
9 |
+
import replace_explitives
|
10 |
from whisper.model import Whisper
|
11 |
from whisper.tokenizer import get_tokenizer
|
12 |
from speechbrain.pretrained.interfaces import foreign_class
|
|
|
49 |
return emo_dict[text_lab[0]], emostr
|
50 |
|
51 |
# Create a Gradio interface with audio file and text inputs
|
52 |
+
def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection):
|
53 |
# Transcribe the audio file using Whisper ASR
|
54 |
if audio_file != None:
|
55 |
transcribed_text = pipe(audio_file)["text"]
|
|
|
57 |
else:
|
58 |
transcribed_text = text_input
|
59 |
if classify_anxiety != "misophonia":
|
60 |
+
# explitive call
|
61 |
+
if replace_explitives != None && emo_class == None:
|
62 |
+
transcribed_text = replace_explitives.sub_explitives(transcribed_text, explitive_selection)
|
63 |
+
|
64 |
#### Toxicity Classifier ####
|
65 |
|
66 |
toxicity_module = evaluate.load("toxicity", "facebook/roberta-hate-speech-dynabench-r4-target")
|
|
|
73 |
# emo call
|
74 |
if emo_class != None:
|
75 |
classify_emotion(audio_file)
|
76 |
+
|
77 |
#### Text classification #####
|
78 |
|
79 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
|
|
127 |
with gr.Blocks() as iface:
|
128 |
with gr.Column():
|
129 |
anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
|
130 |
+
explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
|
131 |
+
emo_class = gr.Radio(choices=["negaitve emotionality"], label="label", info="Select if you would like explitives to be considered anxiety-indiucing in the case of anger/ negative emotionality.")
|
132 |
with gr.Column():
|
133 |
aud_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File")
|
134 |
text = gr.Textbox(label="Enter Text", placeholder="Enter text here...")
|
135 |
submit_btn = gr.Button(label="Run")
|
136 |
with gr.Column():
|
137 |
out_text = gr.Textbox()
|
138 |
+
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference], outputs=out_text)
|
139 |
|
140 |
iface.launch()
|