tybrs commited on
Commit
c430c1e
1 Parent(s): 209034b

Update bias_auc.py

Browse files

updated _KWARGS_DESCRIPTION

Files changed (1) hide show
  1. bias_auc.py +37 -4
bias_auc.py CHANGED
@@ -13,7 +13,7 @@ classifier’s score distribution can vary across designated groups.
13
  The following are computed:
14
 
15
  - BNSP (Background Negative, Subgroup Positive); and
16
- - BPSN (Background Positive, Subgroup Negative) AUC metrics
17
 
18
  """
19
 
@@ -28,9 +28,42 @@ _CITATION = """\
28
  """
29
 
30
  _KWARGS_DESCRIPTION = """\
31
- target list[list[str]]: list containing list of group targeted for each item
32
- label list[int]: list containing label index for each item
33
- output list[list[float]]: list of model output values for each
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  """
35
 
36
  class BiasAUC(evaluate.Metric):
 
13
  The following are computed:
14
 
15
  - BNSP (Background Negative, Subgroup Positive); and
16
+ - BPSN (Background Positive, Subgroup Negative) AUC
17
 
18
  """
19
 
 
28
  """
29
 
30
  _KWARGS_DESCRIPTION = """\
31
+
32
+ Args:
33
+ target list[list[str]]: list containing list of group targeted for each item
34
+ label list[int]: list containing label index for each item
35
+ output list[list[float]]: list of model output values for each
36
+
37
+ Returns (for each subgroup in target):
38
+ 'Subgroup' : Subgroup AUC score,
39
+ 'BPSN' : BPSN (Background Positive, Subgroup Negative) AUC,
40
+ 'BNSP' : BNSP (Background Negative, Subgroup Positive) AUC score,
41
+
42
+
43
+ Example:
44
+ >>> from evaluate import load
45
+
46
+ >>> target = [['Islam'],
47
+ ... ['Sexuality'],
48
+ ... ['Sexuality'],
49
+ ... ['Islam']]
50
+
51
+ >>> label = [0, 0, 1, 1]
52
+
53
+ >>> output = [[0.44452348351478577, 0.5554765462875366],
54
+ ... [0.4341845214366913, 0.5658154487609863],
55
+ ... [0.400595098733902, 0.5994048714637756],
56
+ ... [0.3840397894382477, 0.6159601807594299]]
57
+
58
+ >>> metric = load('Intel/bias_auc')
59
+ >>> metric.add_batch(target=target,
60
+ label=label,
61
+ output=output)
62
+ >>> metric.compute(target=a,
63
+ label=b,
64
+ output=c,
65
+ subgroups = None)
66
+
67
  """
68
 
69
  class BiasAUC(evaluate.Metric):