mskov commited on
Commit
59bfc5c
1 Parent(s): e7cf2e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -1
app.py CHANGED
@@ -109,11 +109,16 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
109
  print("class output ", type(classification_output))
110
  # classification_df = pd.DataFrame.from_dict(classification_output)
111
  print("keys ", classification_output.keys())
 
 
 
 
112
 
113
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
114
  if toxicity_score > threshold:
115
  print("threshold exceeded!!")
116
- return toxicity_score, classification_output, transcribed_text
 
117
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
118
  else:
119
  threshold = slider_logic(slider)
 
109
  print("class output ", type(classification_output))
110
  # classification_df = pd.DataFrame.from_dict(classification_output)
111
  print("keys ", classification_output.keys())
112
+
113
+ formatted_classification_output = ""
114
+ for label, score in classification_output.items():
115
+ formatted_classification_output += f"{label}: {score:.4f}\n"
116
 
117
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
118
  if toxicity_score > threshold:
119
  print("threshold exceeded!!")
120
+
121
+ return toxicity_score, formatted_classification_output, transcribed_text
122
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
123
  else:
124
  threshold = slider_logic(slider)