Update tool.py
#1
by
ethomas-tw
- opened
tool.py
CHANGED
@@ -27,7 +27,7 @@ title_container = st.container()
|
|
27 |
title_container.image(image, width = 300)
|
28 |
title_container.title("Responsible AI Institute Corporate AI Policy Assessment Tool")
|
29 |
title_container.write(
|
30 |
-
"
|
31 |
)
|
32 |
|
33 |
file_upload = st.file_uploader(
|
@@ -44,100 +44,102 @@ download_report = top_container.empty()
|
|
44 |
scores_tab, details_tab, history_tab = st.tabs(["Scores", "Details", "Version History"])
|
45 |
|
46 |
with scores_tab:
|
47 |
-
st.
|
48 |
-
|
49 |
-
"
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
govern_col, map_col, measure_col, manage_col = st.columns(4)
|
54 |
-
|
55 |
-
govern_df, map_df, measure_df, manage_df = load_results()
|
56 |
-
n_govern = len(govern_df)
|
57 |
-
n_map = len(measure_df)
|
58 |
-
n_measure = len(measure_df)
|
59 |
-
n_manage = len(manage_df)
|
60 |
-
|
61 |
-
govern_metric = govern_col.metric(
|
62 |
-
"Govern Score",
|
63 |
-
"0 %",
|
64 |
-
"0 %",
|
65 |
-
"off",
|
66 |
-
help="GOVERN is a cross-cutting function that is infused throughout AI risk management and enables the other functions of the process. Aspects of GOVERN, especially those related to compliance or evaluation, should be integrated into each of the other functions.",
|
67 |
-
)
|
68 |
-
map_metric = map_col.metric(
|
69 |
-
"Map Score",
|
70 |
-
"0 %",
|
71 |
-
"0 %",
|
72 |
-
"off",
|
73 |
-
help="The MAP function establishes the context to frame risks related to an AI system.",
|
74 |
-
)
|
75 |
-
measure_metric = measure_col.metric(
|
76 |
-
"Measure Score",
|
77 |
-
"0 %",
|
78 |
-
"0 %",
|
79 |
-
"off",
|
80 |
-
help="The MEASURE function employs quantitative, qualitative, or mixed-method tools, techniques, and methodologies to analyze, assess, benchmark, and monitor AI risk and related impacts.",
|
81 |
-
)
|
82 |
-
manage_metric = manage_col.metric(
|
83 |
-
"Manage Score",
|
84 |
-
"0 %",
|
85 |
-
"0 %",
|
86 |
-
"off",
|
87 |
-
help="The MANAGE function entails allocating risk resources to mapped and measured risks on a regular basis and as defined by the GOVERN function.",
|
88 |
-
)
|
89 |
-
|
90 |
-
st.write("## 7 NIST Dimensions")
|
91 |
-
|
92 |
-
VaR_col, Saf_col, SaR_col, AaT_col = st.columns(4)
|
93 |
-
EaI_col, PE_col, Fai_col, Sco_col = st.columns(4)
|
94 |
-
with VaR_col:
|
95 |
-
VaR_metric = VaR_col.metric(
|
96 |
-
"Valid and Reliable",
|
97 |
-
"☆☆☆☆☆",
|
98 |
-
help="Validation is the “confirmation, through the provision of objective evidence, that the requirements for a specific intended use or application have been fulfilled” (Source: ISO 9000:2015). Reliability is defined in the same standard as the “ability of an item to perform as required, without failure, for a given time interval, under given conditions” (Source: ISO/IEC TS 5723:2022)",
|
99 |
-
)
|
100 |
-
with Saf_col:
|
101 |
-
Saf_metric = Saf_col.metric(
|
102 |
-
"Safe",
|
103 |
-
"☆☆☆☆☆",
|
104 |
-
help="AI systems should “not under defined conditions, lead to a state in which human life, health, property, or the environment is endangered” (Source: ISO/IEC TS 5723:2022)",
|
105 |
)
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
)
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
"
|
121 |
-
"
|
122 |
-
help="Explainability refers to a representation of the mechanisms underlying AI systems’ operation, whereas interpretability refers to the meaning of AI systems’ output in the context of their designed functional purposes.",
|
123 |
)
|
124 |
-
|
125 |
-
|
126 |
-
"
|
127 |
-
"
|
128 |
-
|
|
|
129 |
)
|
130 |
-
|
131 |
-
|
132 |
-
"
|
133 |
-
"
|
134 |
-
|
|
|
135 |
)
|
136 |
-
|
137 |
-
|
138 |
-
"
|
|
|
|
|
|
|
139 |
)
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
|
142 |
|
143 |
with details_tab:
|
@@ -200,7 +202,8 @@ def fill_data(sleep_time=0):
|
|
200 |
govern_score = 0
|
201 |
for i, row in govern_df.iterrows():
|
202 |
govern_expander.write(row["Statement"])
|
203 |
-
govern_expander.
|
|
|
204 |
govern_score += row["Score"]
|
205 |
metric = int(govern_score / n_govern * 100)
|
206 |
delta = int(row["Score"] / n_govern * 100)
|
@@ -215,7 +218,8 @@ def fill_data(sleep_time=0):
|
|
215 |
map_score = 0
|
216 |
for i, row in map_df.iterrows():
|
217 |
map_expander.write(row["Statement"])
|
218 |
-
map_expander.
|
|
|
219 |
map_score += row["Score"]
|
220 |
metric = int(map_score / n_map * 100)
|
221 |
delta = int(row["Score"] / n_map * 100)
|
@@ -230,7 +234,8 @@ def fill_data(sleep_time=0):
|
|
230 |
measure_score = 0
|
231 |
for i, row in measure_df.iterrows():
|
232 |
measure_expander.write(row["Statement"])
|
233 |
-
measure_expander.
|
|
|
234 |
measure_score += row["Score"]
|
235 |
metric = int(measure_score / n_measure * 100)
|
236 |
delta = int(row["Score"] / n_measure * 100)
|
@@ -245,7 +250,8 @@ def fill_data(sleep_time=0):
|
|
245 |
manage_score = 0
|
246 |
for i, row in manage_df.iterrows():
|
247 |
manage_expander.write(row["Statement"])
|
248 |
-
manage_expander.
|
|
|
249 |
manage_score += row["Score"]
|
250 |
metric = int(manage_score / n_manage * 100)
|
251 |
delta = int(row["Score"] / n_manage * 100)
|
@@ -304,7 +310,25 @@ def fill_data(sleep_time=0):
|
|
304 |
help="Fairness in AI includes concerns for equality and equity by addressing issues such as harmful bias and discrimination.",
|
305 |
)
|
306 |
Sco_metric.metric("Total Score", "25/35", help="Sum of all 7 NIST dimension scores")
|
307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
|
309 |
def process():
|
310 |
fill_data(sleep_time=SLEEP_TIME)
|
|
|
27 |
title_container.image(image, width = 300)
|
28 |
title_container.title("Responsible AI Institute Corporate AI Policy Assessment Tool")
|
29 |
title_container.write(
|
30 |
+
"##### Evaluate your Corporate AI policies with NIST AI RMF and ISO/IEC 42001."
|
31 |
)
|
32 |
|
33 |
file_upload = st.file_uploader(
|
|
|
44 |
scores_tab, details_tab, history_tab = st.tabs(["Scores", "Details", "Version History"])
|
45 |
|
46 |
with scores_tab:
|
47 |
+
scores_container = st.container()
|
48 |
+
with scores_container:
|
49 |
+
st.write("# Scores")
|
50 |
+
st.write(
|
51 |
+
"NIST AI RMF Documentation: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.100-1.pdf"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
)
|
53 |
+
st.write("## AI RMF Core Categories")
|
54 |
+
|
55 |
+
govern_col, map_col, measure_col, manage_col = st.columns(4)
|
56 |
+
|
57 |
+
govern_df, map_df, measure_df, manage_df = load_results()
|
58 |
+
n_govern = len(govern_df)
|
59 |
+
n_map = len(measure_df)
|
60 |
+
n_measure = len(measure_df)
|
61 |
+
n_manage = len(manage_df)
|
62 |
+
|
63 |
+
govern_metric = govern_col.metric(
|
64 |
+
"Govern Score",
|
65 |
+
"0 %",
|
66 |
+
"0 %",
|
67 |
+
"off",
|
68 |
+
help="GOVERN is a cross-cutting function that is infused throughout AI risk management and enables the other functions of the process. Aspects of GOVERN, especially those related to compliance or evaluation, should be integrated into each of the other functions.",
|
|
|
69 |
)
|
70 |
+
map_metric = map_col.metric(
|
71 |
+
"Map Score",
|
72 |
+
"0 %",
|
73 |
+
"0 %",
|
74 |
+
"off",
|
75 |
+
help="The MAP function establishes the context to frame risks related to an AI system.",
|
76 |
)
|
77 |
+
measure_metric = measure_col.metric(
|
78 |
+
"Measure Score",
|
79 |
+
"0 %",
|
80 |
+
"0 %",
|
81 |
+
"off",
|
82 |
+
help="The MEASURE function employs quantitative, qualitative, or mixed-method tools, techniques, and methodologies to analyze, assess, benchmark, and monitor AI risk and related impacts.",
|
83 |
)
|
84 |
+
manage_metric = manage_col.metric(
|
85 |
+
"Manage Score",
|
86 |
+
"0 %",
|
87 |
+
"0 %",
|
88 |
+
"off",
|
89 |
+
help="The MANAGE function entails allocating risk resources to mapped and measured risks on a regular basis and as defined by the GOVERN function.",
|
90 |
)
|
91 |
+
|
92 |
+
st.write("## 7 NIST Dimensions")
|
93 |
+
|
94 |
+
VaR_col, Saf_col, SaR_col, AaT_col = st.columns(4)
|
95 |
+
EaI_col, PE_col, Fai_col, Sco_col = st.columns(4)
|
96 |
+
with VaR_col:
|
97 |
+
VaR_metric = VaR_col.metric(
|
98 |
+
"Valid and Reliable",
|
99 |
+
"☆☆☆☆☆",
|
100 |
+
help="Validation is the “confirmation, through the provision of objective evidence, that the requirements for a specific intended use or application have been fulfilled” (Source: ISO 9000:2015). Reliability is defined in the same standard as the “ability of an item to perform as required, without failure, for a given time interval, under given conditions” (Source: ISO/IEC TS 5723:2022)",
|
101 |
+
)
|
102 |
+
with Saf_col:
|
103 |
+
Saf_metric = Saf_col.metric(
|
104 |
+
"Safe",
|
105 |
+
"☆☆☆☆☆",
|
106 |
+
help="AI systems should “not under defined conditions, lead to a state in which human life, health, property, or the environment is endangered” (Source: ISO/IEC TS 5723:2022)",
|
107 |
+
)
|
108 |
+
with SaR_col:
|
109 |
+
SaR_metric = SaR_col.metric(
|
110 |
+
"Secure and Resilient",
|
111 |
+
"☆☆☆☆☆",
|
112 |
+
help="AI systems, as well as the ecosystems in which they are deployed, may be said to be resilient if they can withstand unexpected adverse events or unexpected changes in their environment or use – or if they can maintain their functions and structure in the face of internal and external change and degrade safely and gracefully when this is necessary (Adapted from: ISO/IEC TS 5723:2022)",
|
113 |
+
)
|
114 |
+
with AaT_col:
|
115 |
+
AaT_metric = AaT_col.metric(
|
116 |
+
"Accountable and Transparent",
|
117 |
+
"☆☆☆☆☆",
|
118 |
+
help="Trustworthy AI depends upon accountability. Accountability presupposes transparency. Transparency reflects the extent to which information about an AI system and its outputs is available to individuals interacting with such a system – regardless of whether they are even aware that they are doing so.",
|
119 |
+
)
|
120 |
+
with EaI_col:
|
121 |
+
EaI_metric = EaI_col.metric(
|
122 |
+
"Explainable and Interpretable",
|
123 |
+
"☆☆☆☆☆",
|
124 |
+
help="Explainability refers to a representation of the mechanisms underlying AI systems’ operation, whereas interpretability refers to the meaning of AI systems’ output in the context of their designed functional purposes.",
|
125 |
+
)
|
126 |
+
with PE_col:
|
127 |
+
PE_metric = PE_col.metric(
|
128 |
+
"Privacy-Enhanced",
|
129 |
+
"☆☆☆☆☆",
|
130 |
+
help="Privacy refers generally to the norms and practices that help to safeguard human autonomy, identity, and dignity.",
|
131 |
+
)
|
132 |
+
with Fai_col:
|
133 |
+
Fai_metric = Fai_col.metric(
|
134 |
+
"Fair",
|
135 |
+
"☆☆☆☆☆",
|
136 |
+
help="Fairness in AI includes concerns for equality and equity by addressing issues such as harmful bias and discrimination.",
|
137 |
+
)
|
138 |
+
with Sco_col:
|
139 |
+
Sco_metric = Sco_col.metric(
|
140 |
+
"Total Score", "0/35", help="Sum of all 7 NIST dimension scores"
|
141 |
+
)
|
142 |
+
# st.metric("Rating","🥇",help="")
|
143 |
|
144 |
|
145 |
with details_tab:
|
|
|
202 |
govern_score = 0
|
203 |
for i, row in govern_df.iterrows():
|
204 |
govern_expander.write(row["Statement"])
|
205 |
+
govern_expander.markdown(f'<h1 style="color:#09ab3b;font-size:24px;">{row["Score"]}</h1>', unsafe_allow_html=True)
|
206 |
+
govern_expander.write("")
|
207 |
govern_score += row["Score"]
|
208 |
metric = int(govern_score / n_govern * 100)
|
209 |
delta = int(row["Score"] / n_govern * 100)
|
|
|
218 |
map_score = 0
|
219 |
for i, row in map_df.iterrows():
|
220 |
map_expander.write(row["Statement"])
|
221 |
+
map_expander.markdown(f'<h1 style="color:#09ab3b;font-size:24px;">{row["Score"]}</h1>', unsafe_allow_html=True)
|
222 |
+
map_expander.write("")
|
223 |
map_score += row["Score"]
|
224 |
metric = int(map_score / n_map * 100)
|
225 |
delta = int(row["Score"] / n_map * 100)
|
|
|
234 |
measure_score = 0
|
235 |
for i, row in measure_df.iterrows():
|
236 |
measure_expander.write(row["Statement"])
|
237 |
+
measure_expander.markdown(f'<h1 style="color:#09ab3b;font-size:24px;">{row["Score"]}</h1>', unsafe_allow_html=True)
|
238 |
+
measure_expander.write("")
|
239 |
measure_score += row["Score"]
|
240 |
metric = int(measure_score / n_measure * 100)
|
241 |
delta = int(row["Score"] / n_measure * 100)
|
|
|
250 |
manage_score = 0
|
251 |
for i, row in manage_df.iterrows():
|
252 |
manage_expander.write(row["Statement"])
|
253 |
+
manage_expander.markdown(f'<h1 style="color:#09ab3b;font-size:24px;">{row["Score"]}</h1>', unsafe_allow_html=True)
|
254 |
+
manage_expander.write("")
|
255 |
manage_score += row["Score"]
|
256 |
metric = int(manage_score / n_manage * 100)
|
257 |
delta = int(row["Score"] / n_manage * 100)
|
|
|
310 |
help="Fairness in AI includes concerns for equality and equity by addressing issues such as harmful bias and discrimination.",
|
311 |
)
|
312 |
Sco_metric.metric("Total Score", "25/35", help="Sum of all 7 NIST dimension scores")
|
313 |
+
df = pd.DataFrame(
|
314 |
+
{
|
315 |
+
"Valid and Reliable": ["🔵🔵🔵⚪⚪"],
|
316 |
+
"Safe": ["🔵🔵🔵🔵⚪"],
|
317 |
+
"Secure and Resilient": ["🔵🔵🔵⚪⚪"],
|
318 |
+
"Accountable and Transparent": ["🔵🔵🔵🔵⚪"],
|
319 |
+
"Explainable and Interpretable": ["🔵🔵🔵🔵⚪"],
|
320 |
+
"Privacy-Enhanced": ["🔵🔵🔵🔵⚪"],
|
321 |
+
"Fair": ["🔵🔵🔵⚪⚪"],
|
322 |
+
"Total Score (/35)": [25],
|
323 |
+
"Govern ": ["85%"],
|
324 |
+
"Map ": ["96%"],
|
325 |
+
"Measure": ["75%"],
|
326 |
+
"Manage ": ["71%"],
|
327 |
+
"Rating": ["🥇"]
|
328 |
+
}
|
329 |
+
).set_index("Rating").sort_index(ascending=False)
|
330 |
+
scores_container.header("Summary")
|
331 |
+
scores_container.dataframe(df, column_config={"widgets": st.column_config.Column(width="medium")})
|
332 |
|
333 |
def process():
|
334 |
fill_data(sleep_time=SLEEP_TIME)
|