File size: 2,432 Bytes
10f417b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import load_model_pt
import interpret_model_pt


def sub_pipeline(raw_input, pretrained_model):
    tokenizer, model = load_model_pt.load_models_from_pretrained(pretrained_model)
    output_ = load_model_pt.load_pipeline(raw_input, pretrained_model)
    words_weightages = interpret_model_pt.explainer(raw_input, model, tokenizer)
    return output_, words_weightages

def bias_checker(input_statement):
    pretrained_model_basic_check = "valurank/distilroberta-bias"
    pretrained_model_political = "valurank/distilroberta-mbfc-bias"
    pretrained_model_gender = "monologg/koelectra-base-v3-gender-bias"

    raw_input = input_statement
    # print("Checking if the input has any primary bias ?..")
    output_stmt_zero, words_interpreted = sub_pipeline(raw_input, pretrained_model_basic_check)
    print(output_stmt_zero)
    return_var = " "
    interpret_var = " "

    if (output_stmt_zero["label"] == "BIASED" and output_stmt_zero["score"] >= 0.7) or (output_stmt_zero["label"] == "NEUTRAL" and output_stmt_zero["score"] < 0.6):
        # print(output_stmt_zero)
        # print("\n The statement seems biased, lets investigate ! \n")
        # print(words_interpreted)
        # print("\n Checking for political propaganda... \n")
        output_stmt_political, words_interpreted_political = sub_pipeline(raw_input, pretrained_model_political)
        # print(output_stmt_political, "\n")
        # print(words_interpreted_political, "\n")
        # print("\n Let's check for gender bias, shall we ? \n")
        output_stmt_gender, words_interpreted_gender = sub_pipeline(raw_input, pretrained_model_gender)
        # print(output_stmt_gender, "\n")
        # print(words_interpreted_gender, "\n")
        return_var = ("Generic:", output_stmt_zero,"\n","Gender:", output_stmt_gender,"\n","Political:", output_stmt_political)
        interpret_var = ("Generic:", words_interpreted, "\n", "Gender:", words_interpreted_gender, "\n","Political:", words_interpreted_political)
    else:
        # print("The statement seems ok as of now, please input another statement!")
        return_var = "The statement seems ok as of now, please input another statement!"
        interpret_var = " "

    return return_var, interpret_var


if __name__=="__main__":
    input_stmt = "Nevertheless, Trump and other Republicans have tarred the protests as havens for terrorists intent on destroying property."
    bias_checker(input_stmt)