|
import streamlit as st |
|
import pandas as pd |
|
import time |
|
from PIL import Image |
|
|
|
|
|
|
|
N_STATEMENTS = 72 |
|
SLEEP_TIME = 0.2 |
|
|
|
st.set_page_config(layout="wide") |
|
|
|
|
|
@st.cache_data |
|
def load_results(): |
|
df = pd.read_csv("Results/Similarity_Scores.csv") |
|
govern_df = df[0:19] |
|
map_df = df[19:32] |
|
measure_df = df[32:50] |
|
manage_df = df[50:72] |
|
return govern_df, measure_df, map_df, manage_df |
|
|
|
|
|
image = Image.open("rai_raise_main_badge_pos_102.png", width = '5') |
|
|
|
title_container = st.container() |
|
title_container.image(image) |
|
title_container.title("Responsible AI Institute Corporate AI Policy Assessment Tool") |
|
title_container.write( |
|
"This service utilizes LLMs to enable automated understanding of how well a companyβs Generative AI policy aligns with the NIST AI RMF." |
|
) |
|
|
|
file_upload = st.file_uploader( |
|
"Please upload a pdf of your company policy:", type=["pdf"] |
|
) |
|
|
|
top_container = st.container() |
|
|
|
bar = top_container.empty() |
|
process_report = top_container.empty() |
|
download_report = top_container.empty() |
|
|
|
|
|
scores_tab, details_tab, history_tab = st.tabs(["Scores", "Details", "Version History"]) |
|
|
|
with scores_tab: |
|
st.write("# Scores") |
|
st.write( |
|
"NIST AI RMF Documentation: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.100-1.pdf" |
|
) |
|
st.write("## AI RMF Core Categories") |
|
|
|
govern_col, map_col, measure_col, manage_col = st.columns(4) |
|
|
|
govern_df, map_df, measure_df, manage_df = load_results() |
|
n_govern = len(govern_df) |
|
n_map = len(measure_df) |
|
n_measure = len(measure_df) |
|
n_manage = len(manage_df) |
|
|
|
govern_metric = govern_col.metric( |
|
"Govern Score", |
|
"0 %", |
|
"0 %", |
|
"off", |
|
help="GOVERN is a cross-cutting function that is infused throughout AI risk management and enables the other functions of the process. Aspects of GOVERN, especially those related to compliance or evaluation, should be integrated into each of the other functions.", |
|
) |
|
map_metric = map_col.metric( |
|
"Map Score", |
|
"0 %", |
|
"0 %", |
|
"off", |
|
help="The MAP function establishes the context to frame risks related to an AI system.", |
|
) |
|
measure_metric = measure_col.metric( |
|
"Measure Score", |
|
"0 %", |
|
"0 %", |
|
"off", |
|
help="The MEASURE function employs quantitative, qualitative, or mixed-method tools, techniques, and methodologies to analyze, assess, benchmark, and monitor AI risk and related impacts.", |
|
) |
|
manage_metric = manage_col.metric( |
|
"Manage Score", |
|
"0 %", |
|
"0 %", |
|
"off", |
|
help="The MANAGE function entails allocating risk resources to mapped and measured risks on a regular basis and as defined by the GOVERN function.", |
|
) |
|
|
|
st.write("## 7 NIST Dimensions") |
|
|
|
VaR_col, Saf_col, SaR_col, AaT_col = st.columns(4) |
|
EaI_col, PE_col, Fai_col, Sco_col = st.columns(4) |
|
with VaR_col: |
|
VaR_metric = VaR_col.metric( |
|
"Valid and Reliable", |
|
"βββββ", |
|
help="Validation is the βconfirmation, through the provision of objective evidence, that the requirements for a specific intended use or application have been fulfilledβ (Source: ISO 9000:2015). Reliability is defined in the same standard as the βability of an item to perform as required, without failure, for a given time interval, under given conditionsβ (Source: ISO/IEC TS 5723:2022)", |
|
) |
|
with Saf_col: |
|
Saf_metric = Saf_col.metric( |
|
"Safe", |
|
"βββββ", |
|
help="AI systems should βnot under defined conditions, lead to a state in which human life, health, property, or the environment is endangeredβ (Source: ISO/IEC TS 5723:2022)", |
|
) |
|
with SaR_col: |
|
SaR_metric = SaR_col.metric( |
|
"Secure and Resilient", |
|
"βββββ", |
|
help="AI systems, as well as the ecosystems in which they are deployed, may be said to be resilient if they can withstand unexpected adverse events or unexpected changes in their environment or use β or if they can maintain their functions and structure in the face of internal and external change and degrade safely and gracefully when this is necessary (Adapted from: ISO/IEC TS 5723:2022)", |
|
) |
|
with AaT_col: |
|
AaT_metric = AaT_col.metric( |
|
"Accountable and Transparent", |
|
"βββββ", |
|
help="Trustworthy AI depends upon accountability. Accountability presupposes transparency. Transparency reflects the extent to which information about an AI system and its outputs is available to individuals interacting with such a system β regardless of whether they are even aware that they are doing so.", |
|
) |
|
with EaI_col: |
|
EaI_metric = EaI_col.metric( |
|
"Explainable and Interpretable", |
|
"βββββ", |
|
help="Explainability refers to a representation of the mechanisms underlying AI systemsβ operation, whereas interpretability refers to the meaning of AI systemsβ output in the context of their designed functional purposes.", |
|
) |
|
with PE_col: |
|
PE_metric = PE_col.metric( |
|
"Privacy-Enhanced", |
|
"βββββ", |
|
help="Privacy refers generally to the norms and practices that help to safeguard human autonomy, identity, and dignity.", |
|
) |
|
with Fai_col: |
|
Fai_metric = Fai_col.metric( |
|
"Fair", |
|
"βββββ", |
|
help="Fairness in AI includes concerns for equality and equity by addressing issues such as harmful bias and discrimination.", |
|
) |
|
with Sco_col: |
|
Sco_metric = Sco_col.metric( |
|
"Total Score", "0/35", help="Sum of all 7 NIST dimension scores" |
|
) |
|
|
|
|
|
|
|
with details_tab: |
|
st.write("# Details") |
|
st.write( |
|
"NIST AI RMF Documentation: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.100-1.pdf" |
|
) |
|
st.write("## AI RMF Core Categories") |
|
govern_expander = st.expander("Govern", expanded=True) |
|
map_expander = st.expander("Map", expanded=True) |
|
measure_expander = st.expander("Measure", expanded=True) |
|
manage_expander = st.expander("Manage", expanded=True) |
|
|
|
govern_expander.write( |
|
"GOVERN is a cross-cutting function that is infused throughout AI risk management and enables the other functions of the process. Aspects of GOVERN, especially those related to compliance or evaluation, should be integrated into each of the other functions." |
|
) |
|
map_expander.write( |
|
"The MAP function establishes the context to frame risks related to an AI system." |
|
) |
|
measure_expander.write( |
|
"The MEASURE function employs quantitative, qualitative, or mixed-method tools, techniques, and methodologies to analyze, assess, benchmark, and monitor AI risk and related impacts." |
|
) |
|
manage_expander.write( |
|
"The MANAGE function entails allocating risk resources to mapped and measured risks on a regular basis and as defined by the GOVERN function." |
|
) |
|
|
|
|
|
with history_tab: |
|
st.write("# Version History") |
|
df = pd.DataFrame( |
|
{ |
|
"Version": [1, 2, 3, 4], |
|
"Date (mm/dd/yyyy)": [ |
|
"10/23/2023", |
|
"11/01/2023", |
|
"11/27/2023", |
|
"12/06/2023", |
|
], |
|
"Time (24h)": ["03:14:15", "09:26:53", "05:08:09", "07:09:50"], |
|
"Valid and Reliable": ["π΅π΅βͺβͺβͺ", "π΅π΅βͺβͺβͺ", "π΅π΅π΅βͺβͺ", "π΅π΅π΅βͺβͺ"], |
|
"Safe": ["π΅π΅π΅βͺβͺ", "π΅π΅π΅π΅βͺ", "π΅π΅π΅π΅βͺ", "π΅π΅π΅π΅βͺ"], |
|
"Secure and Resilient": ["π΅π΅βͺβͺβͺ", "π΅π΅βͺβͺβͺ", "π΅π΅βͺβͺβͺ", "π΅π΅π΅βͺβͺ"], |
|
"Accountable and Transparent": ["π΅π΅π΅βͺβͺ", "π΅π΅π΅βͺβͺ", "π΅π΅π΅π΅βͺ", "π΅π΅π΅π΅βͺ"], |
|
"Explainable and Interpretable": ["π΅π΅π΅βͺβͺ", "π΅π΅π΅βͺβͺ", "π΅π΅π΅βͺβͺ", "π΅π΅π΅π΅βͺ"], |
|
"Privacy-Enhanced": ["π΅π΅π΅βͺβͺ", "π΅π΅βͺβͺβͺ", "π΅π΅π΅βͺβͺ", "π΅π΅π΅π΅βͺ"], |
|
"Fair": ["π΅π΅βͺβͺβͺ", "π΅π΅βͺβͺβͺ", "π΅π΅βͺβͺβͺ", "π΅π΅π΅βͺβͺ"], |
|
"Total Score (/35)": [18, 18, 21, 25], |
|
"Govern ": ["72%", "78%", "80%", "85%"], |
|
"Map ": ["89%", "90%", "93%", "96%"], |
|
"Measure": ["63%", "66%", "68%", "75%"], |
|
"Manage ": ["60%", "64%", "67%", "71%"], |
|
"Rating": ["π₯", "π₯", "π₯", "π₯"] |
|
} |
|
).set_index("Version").sort_index(ascending=False) |
|
|
|
st.dataframe(df, column_config={"widgets": st.column_config.Column(width="medium")}) |
|
|
|
|
|
def fill_data(sleep_time=0): |
|
govern_score = 0 |
|
for i, row in govern_df.iterrows(): |
|
govern_expander.write(row["Statement"]) |
|
govern_expander.write(row["Score"]) |
|
govern_score += row["Score"] |
|
metric = int(govern_score / n_govern * 100) |
|
delta = int(row["Score"] / n_govern * 100) |
|
govern_metric.metric( |
|
"Govern Score", |
|
f"{metric} %", |
|
f"{delta} %", |
|
help="GOVERN is a cross-cutting function that is infused throughout AI risk management and enables the other functions of the process. Aspects of GOVERN, especially those related to compliance or evaluation, should be integrated into each of the other functions.", |
|
) |
|
time.sleep(sleep_time) |
|
|
|
map_score = 0 |
|
for i, row in map_df.iterrows(): |
|
map_expander.write(row["Statement"]) |
|
map_expander.write(row["Score"]) |
|
map_score += row["Score"] |
|
metric = int(map_score / n_map * 100) |
|
delta = int(row["Score"] / n_map * 100) |
|
map_metric.metric( |
|
"Map Score", |
|
f"{metric} %", |
|
f"{delta} %", |
|
help="The MAP function establishes the context to frame risks related to an AI system.", |
|
) |
|
time.sleep(sleep_time) |
|
|
|
measure_score = 0 |
|
for i, row in measure_df.iterrows(): |
|
measure_expander.write(row["Statement"]) |
|
measure_expander.write(row["Score"]) |
|
measure_score += row["Score"] |
|
metric = int(measure_score / n_measure * 100) |
|
delta = int(row["Score"] / n_measure * 100) |
|
measure_metric.metric( |
|
"Measure Score", |
|
f"{metric} %", |
|
f"{delta} %", |
|
help="The MEASURE function employs quantitative, qualitative, or mixed-method tools, techniques, and methodologies to analyze, assess, benchmark, and monitor AI risk and related impacts.", |
|
) |
|
time.sleep(sleep_time) |
|
|
|
manage_score = 0 |
|
for i, row in manage_df.iterrows(): |
|
manage_expander.write(row["Statement"]) |
|
manage_expander.write(row["Score"]) |
|
manage_score += row["Score"] |
|
metric = int(manage_score / n_manage * 100) |
|
delta = int(row["Score"] / n_manage * 100) |
|
manage_metric.metric( |
|
"Manage Score", |
|
f"{metric} %", |
|
f"{delta} %", |
|
help="The MANAGE function entails allocating risk resources to mapped and measured risks on a regular basis and as defined by the GOVERN function.", |
|
) |
|
time.sleep(sleep_time) |
|
VaR_metric.metric( |
|
"Valid and Reliable", |
|
"β
β
β
ββ", |
|
help="Validation is the βconfirmation, through the provision of objective evidence, that the requirements for a specific intended use or application have been fulfilledβ (Source: ISO 9000:2015). Reliability is defined in the same standard as the βability of an item to perform as required, without failure, for a given time interval, under given conditionsβ (Source: ISO/IEC TS 5723:2022)", |
|
) |
|
Sco_metric.metric("Total Score", "3/35", help="Sum of all 7 NIST dimension scores") |
|
time.sleep(sleep_time * 3) |
|
Saf_metric.metric( |
|
"Safe", |
|
"β
β
β
β
β", |
|
help="AI systems should βnot under defined conditions, lead to a state in which human life, health, property, or the environment is endangeredβ (Source: ISO/IEC TS 5723:2022)", |
|
) |
|
Sco_metric.metric("Total Score", "7/35", help="Sum of all 7 NIST dimension scores") |
|
time.sleep(sleep_time * 3) |
|
SaR_metric.metric( |
|
"Secure and Resilient", |
|
"β
β
β
ββ", |
|
help="AI systems, as well as the ecosystems in which they are deployed, may be said to be resilient if they can withstand unexpected adverse events or unexpected changes in their environment or use β or if they can maintain their functions and structure in the face of internal and external change and degrade safely and gracefully when this is necessary (Adapted from: ISO/IEC TS 5723:2022)", |
|
) |
|
Sco_metric.metric("Total Score", "10/35", help="Sum of all 7 NIST dimension scores") |
|
time.sleep(sleep_time * 3) |
|
AaT_metric.metric( |
|
"Accountable and Transparent", |
|
"β
β
β
β
β", |
|
help="Trustworthy AI depends upon accountability. Accountability presupposes transparency. Transparency reflects the extent to which information about an AI system and its outputs is available to individuals interacting with such a system β regardless of whether they are even aware that they are doing so.", |
|
) |
|
Sco_metric.metric("Total Score", "14/35", help="Sum of all 7 NIST dimension scores") |
|
time.sleep(sleep_time * 3) |
|
EaI_metric.metric( |
|
"Explainable and Interpretable", |
|
"β
β
β
β
β", |
|
help="Explainability refers to a representation of the mechanisms underlying AI systemsβ operation, whereas interpretability refers to the meaning of AI systemsβ output in the context of their designed functional purposes.", |
|
) |
|
Sco_metric.metric("Total Score", "18/35", help="Sum of all 7 NIST dimension scores") |
|
time.sleep(sleep_time * 3) |
|
PE_metric.metric( |
|
"Privacy-Enhanced", |
|
"β
β
β
β
β", |
|
help="Privacy refers generally to the norms and practices that help to safeguard human autonomy, identity, and dignity.", |
|
) |
|
Sco_metric.metric("Total Score", "22/35", help="Sum of all 7 NIST dimension scores") |
|
time.sleep(sleep_time * 3) |
|
Fai_metric.metric( |
|
"Fair", |
|
"β
β
β
ββ", |
|
help="Fairness in AI includes concerns for equality and equity by addressing issues such as harmful bias and discrimination.", |
|
) |
|
Sco_metric.metric("Total Score", "25/35", help="Sum of all 7 NIST dimension scores") |
|
|
|
|
|
def process(): |
|
fill_data(sleep_time=SLEEP_TIME) |
|
st.session_state["complete"] = True |
|
|
|
|
|
if file_upload is not None: |
|
process_report = top_container.button("Process Report", on_click=process) |
|
else: |
|
process_report = top_container.button("Process Report", disabled=True) |
|
st.session_state["complete"] = False |
|
|
|
|
|
if "complete" not in st.session_state or st.session_state["complete"] is False: |
|
download_report = top_container.button("Download Report", disabled=True) |
|
else: |
|
fill_data() |
|
with open("Report.pdf", "rb") as f: |
|
download_report = top_container.download_button( |
|
"Download Report", f, file_name="Report.pdf" |
|
) |
|
|