Rzhishchev commited on
Commit
e8c2dc9
1 Parent(s): 5d4b507

Update toxic.py

Browse files
Files changed (1) hide show
  1. toxic.py +27 -26
toxic.py CHANGED
@@ -6,29 +6,30 @@ from transformers import AutoTokenizer, AutoModelForSequenceClassification
6
  def app():
7
  st.title('Toxic Comment Classifier')
8
  st.write('This is the toxic comment classifier page.')
9
- model_checkpoint = 'cointegrated/rubert-tiny-toxicity'
10
- tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
11
- model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
12
-
13
- if torch.cuda.is_available():
14
- model.cuda()
15
-
16
- def text2toxicity(text, aggregate=True):
17
- """ Calculate toxicity of a text (if aggregate=True) or a vector of toxicity aspects (if aggregate=False)"""
18
- with torch.no_grad():
19
- inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True).to(model.device)
20
- proba = torch.sigmoid(model(**inputs).logits).cpu().numpy()
21
- if isinstance(text, str):
22
- proba = proba[0]
23
- if aggregate:
24
- return 1 - proba.T[0] * (1 - proba.T[-1])
25
- return proba
26
-
27
- st.title("Toxicity Detector")
28
-
29
- user_input = st.text_area("Enter text to check for toxicity:", "Капец ты гнида")
30
- if st.button("Analyze"):
31
- toxicity_score = text2toxicity(user_input, True)
32
- st.write(f"Toxicity Score: {toxicity_score:.4f}")
33
- if toxicity_score > 0.5:
34
- st.write("Warning: The text seems to be toxic!")
 
 
6
  def app():
7
  st.title('Toxic Comment Classifier')
8
  st.write('This is the toxic comment classifier page.')
9
+
10
+ model_checkpoint = 'cointegrated/rubert-tiny-toxicity'
11
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
12
+ model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
13
+
14
+ if torch.cuda.is_available():
15
+ model.cuda()
16
+
17
+ def text2toxicity(text, aggregate=True):
18
+ """ Calculate toxicity of a text (if aggregate=True) or a vector of toxicity aspects (if aggregate=False)"""
19
+ with torch.no_grad():
20
+ inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True).to(model.device)
21
+ proba = torch.sigmoid(model(**inputs).logits).cpu().numpy()
22
+ if isinstance(text, str):
23
+ proba = proba[0]
24
+ if aggregate:
25
+ return 1 - proba.T[0] * (1 - proba.T[-1])
26
+ return proba
27
+
28
+ st.title("Toxicity Detector")
29
+
30
+ user_input = st.text_area("Enter text to check for toxicity:", "Капец ты гнида")
31
+ if st.button("Analyze"):
32
+ toxicity_score = text2toxicity(user_input, True)
33
+ st.write(f"Toxicity Score: {toxicity_score:.4f}")
34
+ if toxicity_score > 0.5:
35
+ st.write("Warning: The text seems to be toxic!")