mskov commited on
Commit
eabbe21
1 Parent(s): a5a144e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -66,7 +66,7 @@ def slider_logic(slider):
66
  return threshold
67
 
68
  # Create a Gradio interface with audio file and text inputs
69
- def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection, slider, intervention):
70
  # Transcribe the audio file using Whisper ASR
71
  if audio_file != None:
72
  transcribed_text = pipe(audio_file)["text"]
@@ -119,8 +119,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
119
  affirm = positive_affirmations()
120
  else:
121
  affirm = ""
122
-
123
- print("output column: ", holder)
124
  return toxicity_score, classification_output, transcribed_text, affirm
125
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
126
  else:
@@ -175,7 +174,6 @@ with gr.Blocks() as iface:
175
  anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
176
  explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
177
  emo_class = gr.Radio(choices=["negaitve emotionality"], label="label", info="Select if you would like explitives to be considered anxiety-indiucing in the case of anger/ negative emotionality.")
178
- intervention_type = gr.Dropdown(choices=["Therapy App", "Audio File", "Text Message"])
179
  sense_slider = gr.Slider(minimum=1, maximum=5, step=1.0, label="How readily do you want the tool to intervene? 1 = in extreme cases and 5 = at every opportunity")
180
  with gr.Column():
181
  aud_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File")
@@ -186,6 +184,6 @@ with gr.Blocks() as iface:
186
  out_class = gr.Textbox()
187
  out_text = gr.Textbox()
188
  out_affirm = gr.Textbox()
189
- submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, out_affirm])
190
 
191
  iface.launch()
 
66
  return threshold
67
 
68
  # Create a Gradio interface with audio file and text inputs
69
+ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection, slider):
70
  # Transcribe the audio file using Whisper ASR
71
  if audio_file != None:
72
  transcribed_text = pipe(audio_file)["text"]
 
119
  affirm = positive_affirmations()
120
  else:
121
  affirm = ""
122
+
 
123
  return toxicity_score, classification_output, transcribed_text, affirm
124
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
125
  else:
 
174
  anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
175
  explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
176
  emo_class = gr.Radio(choices=["negaitve emotionality"], label="label", info="Select if you would like explitives to be considered anxiety-indiucing in the case of anger/ negative emotionality.")
 
177
  sense_slider = gr.Slider(minimum=1, maximum=5, step=1.0, label="How readily do you want the tool to intervene? 1 = in extreme cases and 5 = at every opportunity")
178
  with gr.Column():
179
  aud_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File")
 
184
  out_class = gr.Textbox()
185
  out_text = gr.Textbox()
186
  out_affirm = gr.Textbox()
187
+ submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider], outputs=[out_val, out_class, out_text, out_affirm])
188
 
189
  iface.launch()