FpOliveira commited on
Commit
fdd8b27
1 Parent(s): e0551bc

feature:add app

Browse files
Files changed (3) hide show
  1. README.md +6 -6
  2. app.py +127 -0
  3. requirements.txt +4 -0
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
  title: Portuguese Hate Speech Classifier
3
- emoji: 📚
4
- colorFrom: purple
5
- colorTo: blue
6
  sdk: gradio
7
- sdk_version: 4.26.0
8
  app_file: app.py
9
- pinned: false
10
- license: cc-by-4.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Portuguese Hate Speech Classifier
3
+ emoji: 🤗🤬
4
+ colorFrom: green
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.7.1
8
  app_file: app.py
9
+ pinned: true
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
+ from collections import Counter
5
+ from scipy.special import softmax
6
+
7
+ article_string = "Author: <a href=\"https://huggingface.co/FpOliveira\">Felipe Ramos de Oliveira</a>. Read more about our <a href=\"https://github.com/Silly-Machine/TuPi-Portuguese-Hate-Speech-Dataset\">The Portuguese hate speech dataset (TuPI) </a>."
8
+
9
+ app_title = "Portuguese hate speech classifier (Binary) - Classificador de discurso de ódio em português (Binário)"
10
+
11
+ app_description = """
12
+ EN: This application employs multiple natural language models to identify hate speech in portuguese. You have the option to enter your own phrases by filling in the "Text" field or choosing one of the examples provided below.
13
+ \nPT: Esta aplicativo emprega múltiplos modelos de linguagem natural para identificar discuros de odio em português. Você tem a opção de inserir suas próprias frases preenchendo o campo "Text" ou escolhendo um dos exemplos abaixo
14
+ """
15
+
16
+ app_examples = [
17
+ ["bom dia flor do dia!!!"],
18
+ ["o ódio é muito grande no coração da ex-deputada federal joise hasselmann contra a família bolsonaro"],
19
+ ["mano deus me livre q nojo da porra!🤮🤮🤮🤮🤮"],
20
+ ["obrigada princesa, porra, tô muito feliz snrsss 🤩🤩🤩❤️"],
21
+ ["mds mas o viado vir responder meus status falando q a taylor foi racista foi o auge 😂😂"],
22
+ ["Pra ser minha inimiga no mínimo tem que ter um rostinho bonito e delicado, não se considere minha rival com essa sua cara de cavalo não, feia, cara de traveco, cabeçuda, queixo quadrado 🤣🤣"]
23
+ ]
24
+
25
+ output_textbox_component_description = """
26
+ EN: This box will display hate speech results based on the average score of multiple models.
27
+ PT: Esta caixa exibirá resultados da classicação de discurso de ódio com base na pontuação média de vários modelos.
28
+ """
29
+
30
+ output_json_component_description = { "breakdown": """
31
+ This box presents a detailed breakdown of the evaluation for each model.
32
+ """,
33
+ "detalhamento": """
34
+ (Esta caixa apresenta um detalhamento da avaliação para cada modelo.)
35
+ """ }
36
+
37
+ short_score_descriptions = {
38
+ 0: "Not hate",
39
+ 1: "Hate"
40
+ }
41
+
42
+ score_descriptions = {
43
+ 0: "This text is not a hate speech.",
44
+ 1: "This text is a hate speech.",
45
+ }
46
+
47
+ score_descriptions_pt = {
48
+ 1: "Este texto contem discurso de ódio",
49
+ 0: "Este texto não contem discurso de ódio",
50
+ }
51
+
52
+ model_list = [
53
+ "FpOliveira/tupi-bert-large-portuguese-cased",
54
+ "FpOliveira/tupi-bert-base-portuguese-cased",
55
+ "FpOliveira/tupi-gpt2-small",
56
+ ]
57
+
58
+ user_friendly_name = {
59
+ "FpOliveira/tupi-bert-large-portuguese-cased": "BERTimbau large (TuPi)",
60
+ "FpOliveira/tupi-bert-base-portuguese-cased": "BERTimbau base (TuPi)",
61
+ "FpOliveira/tupi-gpt2-small":"GPT2 small (TuPi)",
62
+ }
63
+
64
+ reverse_user_friendly_name = { v:k for k,v in user_friendly_name.items() }
65
+
66
+ user_friendly_name_list = list(user_friendly_name.values())
67
+
68
+ model_array = []
69
+
70
+ for model_name in model_list:
71
+ row = {}
72
+ row["name"] = model_name
73
+ row["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
74
+ row["model"] = AutoModelForSequenceClassification.from_pretrained(model_name)
75
+ model_array.append(row)
76
+
77
+ def most_frequent(array):
78
+ occurence_count = Counter(array)
79
+ return occurence_count.most_common(1)[0][0]
80
+
81
+
82
+ def predict(s1, chosen_model):
83
+ if not chosen_model:
84
+ chosen_model = user_friendly_name_list[0]
85
+ scores = {}
86
+ full_chosen_model_name = reverse_user_friendly_name[chosen_model]
87
+ for row in model_array:
88
+ name = row["name"]
89
+ if name != full_chosen_model_name:
90
+ continue
91
+ else:
92
+ tokenizer = row["tokenizer"]
93
+ model = row["model"]
94
+ model_input = tokenizer(*([s1],), padding=True, return_tensors="pt")
95
+ with torch.no_grad():
96
+ output = model(**model_input)
97
+ logits = output[0][0].detach().numpy()
98
+ logits = softmax(logits).tolist()
99
+ break
100
+ def get_description(idx):
101
+ description = score_descriptions[idx]
102
+ description_pt = score_descriptions_pt[idx]
103
+ final_description = description + "\n \n" + description_pt
104
+ return final_description
105
+
106
+ max_pos = logits.index(max(logits))
107
+ markdown_description = get_description(max_pos)
108
+ scores = { short_score_descriptions[k]:v for k,v in enumerate(logits) }
109
+
110
+ return scores, markdown_description
111
+
112
+
113
+ inputs = [
114
+ gr.Textbox(label="Text", value=app_examples[0][0]),
115
+ gr.Dropdown(label="Model", choices=user_friendly_name_list, value=user_friendly_name_list[0])
116
+ ]
117
+
118
+ outputs = [
119
+ gr.Label(label="Result"),
120
+ gr.Markdown(),
121
+ ]
122
+
123
+
124
+ gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=app_title,
125
+ description=app_description,
126
+ examples=app_examples,
127
+ article = article_string).launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ transformers
4
+ scipy