2494 lines
122 KiB
Text
2494 lines
122 KiB
Text
|
||
|
||
<!DOCTYPE html>
|
||
<html lang="en">
|
||
<head>
|
||
<meta charset="utf-8" />
|
||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||
<title>
|
||
|
||
|
||
Manage
|
||
|
||
|
||
|
||
|
||
|
||
|
||
</title>
|
||
|
||
|
||
|
||
<link rel="stylesheet"
|
||
type="text/css"
|
||
href="/dist/custom_fontawesome.ac60244596e1.css">
|
||
<link rel="stylesheet"
|
||
type="text/css"
|
||
href="/dist/application.ca391f6cb6ce.css">
|
||
<link rel="stylesheet" type="text/css" href="/dist/content.124706b6abde.css">
|
||
|
||
|
||
|
||
<script nonce="Bt+wUFLLXpM971wHzwK4Pw=="
|
||
type="text/javascript"
|
||
src="/dist/uswds-init.min.0c5600cc9db1.js"></script>
|
||
|
||
|
||
|
||
<meta name="description"
|
||
content="" />
|
||
|
||
<link rel="canonical" href="/airmf-resources/playbook/manage/" />
|
||
<meta property="og:url" content="/airmf-resources/playbook/manage/" />
|
||
|
||
|
||
|
||
|
||
<meta name="twitter:card" content="summary" />
|
||
<meta name="twitter:title"
|
||
content="Manage " />
|
||
<meta name="twitter:description"
|
||
content="">
|
||
|
||
|
||
|
||
<meta property="og:type" content="website" />
|
||
<meta property="og:title" content="Manage" />
|
||
|
||
<meta property="og:description"
|
||
content="" />
|
||
<meta property="og:site_name" content="" />
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
</head>
|
||
<body class="
|
||
|
||
layout-styleguide
|
||
|
||
">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<a class="usa-skipnav" href="#main-content">Skip to main content</a>
|
||
<div class="app-content">
|
||
|
||
<section class="usa-banner site-banner"
|
||
aria-label="Official website of the United States government">
|
||
<div class="usa-accordion">
|
||
<header class="usa-banner__header">
|
||
<div class="usa-banner__inner">
|
||
<div class="grid-col-auto">
|
||
<img aria-hidden="true"
|
||
class="usa-banner__header-flag"
|
||
src="/img/us_flag_small.png"
|
||
alt="Small US Flag" />
|
||
</div>
|
||
<div class="grid-col-fill tablet:grid-col-auto" aria-hidden="true">
|
||
<p class="usa-banner__header-text">An official website of the United States government</p>
|
||
<p class="usa-banner__header-action">Here’s how you know</p>
|
||
</div>
|
||
<button type="button"
|
||
class="usa-accordion__button usa-banner__button"
|
||
aria-expanded="false"
|
||
aria-controls="gov-banner-default-default">
|
||
<span class="usa-banner__button-text">Here’s how you know</span>
|
||
</button>
|
||
</div>
|
||
</header>
|
||
<div class="usa-banner__content usa-accordion__content"
|
||
id="gov-banner-default-default">
|
||
<div class="grid-row grid-gap-lg">
|
||
<div class="usa-banner__guidance tablet:grid-col-6">
|
||
<img class="usa-banner__icon usa-media-block__img"
|
||
src="/img/icon-dot-gov.svg"
|
||
role="img"
|
||
alt="Small Dot Gov Icon"
|
||
aria-hidden="true" />
|
||
<div class="usa-media-block__body">
|
||
<p>
|
||
<strong>Official websites use .gov</strong>
|
||
<br />
|
||
A
|
||
<strong>.gov</strong> website belongs to an official government
|
||
organization in the United States.
|
||
</p>
|
||
</div>
|
||
</div>
|
||
<div class="usa-banner__guidance tablet:grid-col-6">
|
||
<img class="usa-banner__icon usa-media-block__img"
|
||
src="/img/icon-https.svg"
|
||
role="img"
|
||
alt="Small HTTPS Lock Icon"
|
||
aria-hidden="true" />
|
||
<div class="usa-media-block__body">
|
||
<p>
|
||
<strong>Secure .gov websites use HTTPS</strong>
|
||
<br />
|
||
A
|
||
<strong>lock</strong> (
|
||
<span class="icon-lock">
|
||
<svg xmlns="http://www.w3.org/2000/svg"
|
||
width="52"
|
||
height="64"
|
||
viewBox="0 0 52 64"
|
||
class="usa-banner__lock-image"
|
||
role="img"
|
||
aria-labelledby="banner-lock-description-default"
|
||
focusable="false">
|
||
<title id="banner-lock-title-default">Lock</title>
|
||
<desc id="banner-lock-description-default">Locked padlock icon</desc>
|
||
<path fill="#000000" fill-rule="evenodd" d="M26 0c10.493 0 19 8.507 19 19v9h3a4 4 0 0 1 4 4v28a4 4 0 0 1-4 4H4a4 4 0 0 1-4-4V32a4 4 0 0 1 4-4h3v-9C7 8.507 15.507 0 26 0zm0 8c-5.979 0-10.843 4.77-10.996 10.712L15 19v9h22v-9c0-6.075-4.925-11-11-11z" />
|
||
</svg>
|
||
</span>) or <strong>https://</strong> means you’ve safely connected to
|
||
the .gov website. Share sensitive information only on official,
|
||
secure websites.
|
||
</p>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</section>
|
||
|
||
|
||
<a class="usa-skipnav" href="#main-content">Skip to main content</a>
|
||
|
||
|
||
<header class="usa-header usa-header--extended site-header site-header--dark"
|
||
role="banner">
|
||
<div class="usa-navbar site-header__navbar">
|
||
<a href="https://www.nist.gov"
|
||
title="National Institute of Standards and Technology"
|
||
aria-label="Home">
|
||
<img src="/img/nist_logo_brand_white.svg"
|
||
role="img"
|
||
class="nist-logo"
|
||
alt="National Institute of Standards and Technology logo" />
|
||
</a>
|
||
<button type="button" class="usa-menu-btn">Menu</button>
|
||
</div>
|
||
</header>
|
||
<div class="usa-overlay"></div>
|
||
|
||
<nav aria-label="Main Site navigation" class="usa-nav site-nav">
|
||
<div class="usa-nav__inner site-nav__inner nav-header">
|
||
<button type="button" class="usa-nav__close">
|
||
<img src="/img/usa-icons/close.svg" role="img" alt="Close" />
|
||
</button>
|
||
<ul class="usa-nav__primary usa-accordion airc-nav-list">
|
||
<li class="usa-nav__primary-item">
|
||
<div class="usa-logo site-logo" id="-logo">
|
||
<em class="usa-logo__text site-logo__text site-nav-text">
|
||
<a href="/" title="Trustworthy & Responsible AI Resource Center">
|
||
<span aria-hidden="true" class="site-title--short">AIRMF</span>
|
||
<span class="site-title--long">Trustworthy & Responsible AI Resource Center</span>
|
||
</a>
|
||
</em>
|
||
</div>
|
||
</li>
|
||
<!-- Mobile-nav -->
|
||
|
||
|
||
<ul class="usa-nav__primary usa-nav__primary--mobile usa-accordion emp-mobilenav"
|
||
id="airc-mobile-bar">
|
||
|
||
|
||
<li class="usa-nav__primary-item is-current ">
|
||
<a href="/"
|
||
|
||
class="usa-current"
|
||
>
|
||
|
||
<span>Home</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
<li class="usa-nav__primary-item is-current ">
|
||
<a href="/airmf-resources/"
|
||
class="usa-current
|
||
sb-menu
|
||
|
||
"
|
||
id="mentgl_4"
|
||
>
|
||
|
||
<span>AI RMF Resources</span>
|
||
<span class="caret"></span>
|
||
</a>
|
||
|
||
|
||
|
||
|
||
<ul class="usa-sidenav__sublist" id="mentgl_4">
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/airmf/"
|
||
class="
|
||
sb-menu
|
||
"
|
||
|
||
id="mentgl_5"
|
||
|
||
>
|
||
<span>AI RMF</span>
|
||
<span class="caret"></span>
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item is-current ">
|
||
|
||
<a href="/airmf-resources/playbook/"
|
||
class="usa-current
|
||
sb-menu
|
||
"
|
||
|
||
id="mentgl_18"
|
||
|
||
>
|
||
<span>Playbook</span>
|
||
<span class="caret"></span>
|
||
</a>
|
||
|
||
|
||
|
||
|
||
<ul class="usa-sidenav__sublist" id="mentgl_18">
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/playbook/govern/"
|
||
|
||
class=""
|
||
>
|
||
<span>Govern</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/playbook/map/"
|
||
|
||
class=""
|
||
>
|
||
<span>Map</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/playbook/measure/"
|
||
|
||
class=""
|
||
>
|
||
<span>Measure</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item is-current ">
|
||
|
||
<a href="/airmf-resources/playbook/manage/"
|
||
|
||
class="usa-current"
|
||
>
|
||
<span>Manage</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/playbook/audit-log/"
|
||
|
||
class=""
|
||
>
|
||
<span>Audit Log</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/playbook/faq/"
|
||
|
||
class=""
|
||
>
|
||
<span>FAQ</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
<style nonce="Bt+wUFLLXpM971wHzwK4Pw==">
|
||
ul.usa-sidenav__sublist::marker,
|
||
li.usa-nav__submenu-item::marker {
|
||
content: ' ';
|
||
font-size: 1.2em;
|
||
}
|
||
</style>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/roadmap/"
|
||
|
||
class=""
|
||
>
|
||
<span>Roadmap</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/usecases/"
|
||
|
||
class=""
|
||
>
|
||
<span>Example of Use Cases</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-nav__submenu-item ">
|
||
|
||
<a href="/airmf-resources/crosswalks/"
|
||
|
||
class=""
|
||
>
|
||
<span>Crosswalk Documents</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
<style nonce="Bt+wUFLLXpM971wHzwK4Pw==">
|
||
ul.usa-sidenav__sublist::marker,
|
||
li.usa-nav__submenu-item::marker {
|
||
content: ' ';
|
||
font-size: 1.2em;
|
||
}
|
||
</style>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
<li class="usa-nav__primary-item ">
|
||
<a href="/glossary/"
|
||
|
||
class=""
|
||
>
|
||
|
||
<span>Glossary</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
<li class="usa-nav__primary-item ">
|
||
<a href="/technical-reports/"
|
||
|
||
class=""
|
||
>
|
||
|
||
<span>Technical Reports</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
<!-- end Mobile nav -->
|
||
</ul>
|
||
|
||
|
||
|
||
<div class="usa-nav__secondary">
|
||
<form id="search_form-mobile"
|
||
class="site-search usa-search usa-search--small flex-fill"
|
||
action="/search"
|
||
accept-charset="UTF-8"
|
||
method="get">
|
||
<!-- input name="utf8" type="hidden" value="✓" /-->
|
||
<input type="hidden" name="affiliate" id="affiliate-mobile" value="uswds" />
|
||
<div role="search">
|
||
<label class="usa-sr-only" for="query-mobile">Search the AIRC Website</label>
|
||
<input id="query-mobile"
|
||
class="usa-input usagov-search-autocomplete"
|
||
name="query"
|
||
type="search"
|
||
placeholder="Search AIRC Website"
|
||
autocomplete="off" />
|
||
<button class="site-search__button usa-button margin-top-0"
|
||
type="submit"
|
||
name="commit">
|
||
<img src="/img/usa-icons-bg/search--white.svg"
|
||
class="usa-search__submit-icon"
|
||
alt="Search">
|
||
</button>
|
||
</div>
|
||
</form>
|
||
</div>
|
||
</div>
|
||
</nav>
|
||
|
||
|
||
|
||
|
||
|
||
<div class="default-container">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<nav class="usa-breadcrumb site-breadcrumbs" aria-label="Breadcrumbs">
|
||
|
||
<ol class="usa-breadcrumb__list">
|
||
|
||
|
||
|
||
|
||
<li class="usa-breadcrumb__list-item">
|
||
<span class="usa-breadcrumb__link">
|
||
|
||
|
||
<a href="/">Home</a>
|
||
|
||
</span>
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-breadcrumb__list-item">
|
||
<span class="usa-breadcrumb__link">
|
||
|
||
|
||
<a href="/airmf-resources/">AI RMF Resources</a>
|
||
|
||
</span>
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-breadcrumb__list-item">
|
||
<span class="usa-breadcrumb__link">
|
||
|
||
|
||
<a href="/airmf-resources/playbook/">Playbook</a>
|
||
|
||
</span>
|
||
</li>
|
||
|
||
|
||
<li class="usa-breadcrumb__link active">Manage</li>
|
||
</ol>
|
||
|
||
</nav>
|
||
|
||
|
||
|
||
|
||
|
||
<div id="user-content-root">
|
||
|
||
|
||
<aside class="sidenav emp-sidenav padding-top-1"
|
||
id="page-side-navigation"
|
||
aria-label="Side Navigation">
|
||
<ul class="site-sidenav usa-sidenav usa-accordion" id="airc-sidebar">
|
||
|
||
|
||
|
||
<li class="usa-sidenav__item is-current">
|
||
|
||
<a href="/"
|
||
|
||
class="usa-current"
|
||
>
|
||
<span>Home</span>
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sidenav__item is-current">
|
||
|
||
<a href="/airmf-resources/"
|
||
class="usa-current
|
||
sb-menu
|
||
|
||
"
|
||
id="ddtoggle_4"
|
||
>
|
||
<span>AI RMF Resources</span>
|
||
<span class="caret"></span>
|
||
</a>
|
||
|
||
|
||
|
||
|
||
<ul class="usa-sidenav__sublist" id="ddtoggle_4">
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/airmf/"
|
||
class="
|
||
sb-menu
|
||
"
|
||
|
||
id="ddtoggle_5"
|
||
|
||
>
|
||
<span>AI RMF</span>
|
||
<span class="caret"></span>
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item is-current">
|
||
|
||
<a href="/airmf-resources/playbook/"
|
||
class="usa-current
|
||
sb-menu
|
||
"
|
||
|
||
id="ddtoggle_18"
|
||
|
||
>
|
||
<span>Playbook</span>
|
||
<span class="caret"></span>
|
||
</a>
|
||
|
||
|
||
|
||
<ul class="usa-sidenav__sublist" id="ddtoggle_18">
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/playbook/govern/"
|
||
|
||
class=""
|
||
>
|
||
<span>Govern</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/playbook/map/"
|
||
|
||
class=""
|
||
>
|
||
<span>Map</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/playbook/measure/"
|
||
|
||
class=""
|
||
>
|
||
<span>Measure</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item is-current">
|
||
|
||
<a href="/airmf-resources/playbook/manage/"
|
||
|
||
class="usa-current"
|
||
>
|
||
<span>Manage</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/playbook/audit-log/"
|
||
|
||
class=""
|
||
>
|
||
<span>Audit Log</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/playbook/faq/"
|
||
|
||
class=""
|
||
>
|
||
<span>FAQ</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/roadmap/"
|
||
|
||
class=""
|
||
>
|
||
<span>Roadmap</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/usecases/"
|
||
|
||
class=""
|
||
>
|
||
<span>Example of Use Cases</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sitenav__item ">
|
||
|
||
<a href="/airmf-resources/crosswalks/"
|
||
|
||
class=""
|
||
>
|
||
<span>Crosswalk Documents</span>
|
||
|
||
</a>
|
||
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sidenav__item">
|
||
|
||
<a href="/glossary/"
|
||
|
||
class=""
|
||
>
|
||
<span>Glossary</span>
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
|
||
|
||
<li class="usa-sidenav__item">
|
||
|
||
<a href="/technical-reports/"
|
||
|
||
class=""
|
||
>
|
||
<span>Technical Reports</span>
|
||
|
||
</a>
|
||
|
||
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</aside>
|
||
|
||
<div class="usa-in-page-nav-container site-in-page-nav-container">
|
||
|
||
|
||
<aside class="usa-in-page-nav">
|
||
</aside>
|
||
|
||
<main id="main-content" class="main-content">
|
||
|
||
|
||
<div class="grid-container">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<style>
|
||
aside.usa-in-page-nav li.usa-in-page-nav__item--sub-item {
|
||
margin-left: .25rem;
|
||
}
|
||
</style>
|
||
|
||
|
||
|
||
<div class="pbindex grid-container" id="pbindex-Manage">
|
||
<ul class="usa-button-group flex-justify-end">
|
||
<li class="usa-button-group__item">
|
||
|
||
<button id="pbindex-button-expand"
|
||
class="usa-button usa-button--outline pbindex-event">Expand All</button>
|
||
</li>
|
||
<li class="usa-button-group__item">
|
||
<button id="pbindex-button-collapse"
|
||
class="usa-button usa-button--outline pbindex-event">Collapse All</button>
|
||
</li>
|
||
</ul>
|
||
<h1>Manage</h1>
|
||
|
||
|
||
|
||
|
||
|
||
<ul id="Manage" class="pbindex__top-ul">
|
||
|
||
<li>
|
||
<h2 class="pbindex__top__heading">Manage 1</h2>
|
||
<p class="usa-intro pbindex__top__title">AI risks based on assessments and other analytical output from the Map and Measure functions are prioritized, responded to, and managed.</p>
|
||
<ul class="pbindex__subcat-ul">
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%201.1"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 1.1"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 1.1
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">A determination is made as to whether the AI system achieves its intended purpose and stated objectives and whether its development or deployment should proceed.</p>
|
||
|
||
<div id="button-MANAGE 1.1"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>AI systems may not necessarily be the right solution for a given business task or problem. A standard risk management practice is to formally weigh an AI system’s negative risks against its benefits, and to determine if the AI system is an appropriate solution. Tradeoffs among trustworthiness characteristics —such as deciding to deploy a system based on system performance vs system transparency–may require regular assessment throughout the AI lifecycle.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Consider trustworthiness characteristics when evaluating AI systems’ negative risks and benefits.</li>
|
||
<li>Utilize TEVV outputs from map and measure functions when considering risk treatment.</li>
|
||
<li>Regularly track and monitor negative risks and benefits throughout the AI system lifecycle including in post-deployment monitoring.</li>
|
||
<li>Regularly assess and document system performance relative to trustworthiness characteristics and tradeoffs between negative risks and opportunities.</li>
|
||
<li>Evaluate tradeoffs in connection with real-world use cases and impacts and as enumerated in Map function outcomes.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>How do the technical specifications and requirements align with the AI system’s goals and objectives?</li>
|
||
<li>To what extent are the metrics consistent with system goals, objectives, and constraints, including ethical and compliance considerations?</li>
|
||
<li>What goals and objectives does the entity expect to achieve by designing, developing, and/or deploying the AI system?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a> </li>
|
||
<li>WEF Companion to the Model AI Governance Framework – Implementation and Self-Assessment Guide for Organizations <a href="https://www.pdpc.gov.sg/-/media/files/pdpc/pdf-files/resource-for-organisation/ai/sgisago.ashx">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Arvind Narayanan. How to recognize AI snake oil. Retrieved October 15, 2022. <a href="https://www.cs.princeton.edu/~arvindn/talks/MIT-STS-AI-snakeoil.pdf">URL</a></p>
|
||
<p>Board of Governors of the Federal Reserve System. SR 11-7: Guidance on Model Risk Management. (April 4, 2011). <a href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm">URL</a></p>
|
||
<p>Emanuel Moss, Elizabeth Watkins, Ranjit Singh, Madeleine Clare Elish, Jacob Metcalf. 2021. Assembling Accountability: Algorithmic Impact Assessment for the Public Interest. (June 29, 2021). <a href="https://ssrn.com/abstract=3877437 or http://dx.doi.org/10.2139/ssrn.3877437">URL</a></p>
|
||
<p>Fraser, Henry L and Bello y Villarino, Jose-Miguel, Where Residual Risks Reside: A Comparative Approach to Art 9(4) of the European Union's Proposed AI Regulation (September 30, 2021). <a href="https://ssrn.com/abstract=3960461">LINK</a>, <a href="http://dx.doi.org/10.2139/ssrn.3960461">URL</a></p>
|
||
<p>Microsoft. 2022. Microsoft Responsible AI Impact Assessment Template. (June 2022). <a href="https://blogs.microsoft.com/wp-content/uploads/prod/sites/5/2022/06/Microsoft-RAI-Impact-Assessment-Template.pdf">URL</a></p>
|
||
<p>Office of the Comptroller of the Currency. 2021. Comptroller's Handbook: Model Risk Management, Version 1.0, August 2021. <a href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html">URL</a></p>
|
||
<p>Solon Barocas, Asia J. Biega, Benjamin Fish, et al. 2020. When not to design, build, or deploy. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency (FAT* '20). Association for Computing Machinery, New York, NY, USA, 695. <a href="https://doi.org/10.1145/3351095.3375691">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%201.2"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 1.2"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 1.2
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Treatment of documented AI risks is prioritized based on impact, likelihood, or available resources or methods.</p>
|
||
|
||
<div id="button-MANAGE 1.2"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Risk refers to the composite measure of an event’s probability of occurring and the magnitude (or degree) of the consequences of the corresponding events. The impacts, or consequences, of AI systems can be positive, negative, or both and can result in opportunities or risks. </p>
|
||
<p>Organizational risk tolerances are often informed by several internal and external factors, including existing industry practices, organizational values, and legal or regulatory requirements. Since risk management resources are often limited, organizations usually assign them based on risk tolerance. AI risks that are deemed more serious receive more oversight attention and risk management resources.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Assign risk management resources relative to established risk tolerance. AI systems with lower risk tolerances receive greater oversight, mitigation and management resources. </li>
|
||
<li>Document AI risk tolerance determination practices and resource decisions.</li>
|
||
<li>Regularly review risk tolerances and re-calibrate, as needed, in accordance with information from AI system monitoring and assessment .</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>Did your organization implement a risk management system to address risks involved in deploying the identified AI solution (e.g. personnel risk or changes to commercial objectives)?</li>
|
||
<li>What assessments has the entity conducted on data security and privacy impacts associated with the AI system?</li>
|
||
<li>Does your organization have an existing governance structure that can be leveraged to oversee the organization’s use of AI?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>WEF Companion to the Model AI Governance Framework – Implementation and Self-Assessment Guide for Organizations <a href="https://www.pdpc.gov.sg/-/media/files/pdpc/pdf-files/resource-for-organisation/ai/sgisago.ashx">URL</a></li>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Arvind Narayanan. How to recognize AI snake oil. Retrieved October 15, 2022. <a href="https://www.cs.princeton.edu/~arvindn/talks/MIT-STS-AI-snakeoil.pdf">URL</a></p>
|
||
<p>Board of Governors of the Federal Reserve System. SR 11-7: Guidance on Model Risk Management. (April 4, 2011). <a href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm">URL</a></p>
|
||
<p>Emanuel Moss, Elizabeth Watkins, Ranjit Singh, Madeleine Clare Elish, Jacob Metcalf. 2021. Assembling Accountability: Algorithmic Impact Assessment for the Public Interest. (June 29, 2021). <a href="https://ssrn.com/abstract=3877437 or http://dx.doi.org/10.2139/ssrn.3877437">URL</a></p>
|
||
<p>Fraser, Henry L and Bello y Villarino, Jose-Miguel, Where Residual Risks Reside: A Comparative Approach to Art 9(4) of the European Union's Proposed AI Regulation (September 30, 2021). <a href="https://ssrn.com/abstract=3960461">LINK</a>, <a href="http://dx.doi.org/10.2139/ssrn.3960461">URL</a></p>
|
||
<p>Microsoft. 2022. Microsoft Responsible AI Impact Assessment Template. (June 2022). <a href="https://blogs.microsoft.com/wp-content/uploads/prod/sites/5/2022/06/Microsoft-RAI-Impact-Assessment-Template.pdf">URL</a></p>
|
||
<p>Office of the Comptroller of the Currency. 2021. Comptroller's Handbook: Model Risk Management, Version 1.0, August 2021. <a href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html">URL</a></p>
|
||
<p>Solon Barocas, Asia J. Biega, Benjamin Fish, et al. 2020. When not to design, build, or deploy. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency (FAT* '20). Association for Computing Machinery, New York, NY, USA, 695. <a href="https://doi.org/10.1145/3351095.3375691">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%201.3"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 1.3"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 1.3
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Responses to the AI risks deemed high priority as identified by the Map function, are developed, planned, and documented. Risk response options can include mitigating, transferring, avoiding, or accepting.</p>
|
||
|
||
<div id="button-MANAGE 1.3"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Outcomes from GOVERN-1, MAP-5 and MEASURE-2, can be used to address and document identified risks based on established risk tolerances. Organizations can follow existing regulations and guidelines for risk criteria, tolerances and responses established by organizational, domain, discipline, sector, or professional requirements. In lieu of such guidance, organizations can develop risk response plans based on strategies such as accepted model risk management, enterprise risk management, and information sharing and disclosure practices.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Observe regulatory and established organizational, sector, discipline, or professional standards and requirements for applying risk tolerances within the organization.</li>
|
||
<li>Document procedures for acting on AI system risks related to trustworthiness characteristics.</li>
|
||
<li>Prioritize risks involving physical safety, legal liabilities, regulatory compliance, and negative impacts on individuals, groups, or society.</li>
|
||
<li>Identify risk response plans and resources and organizational teams for carrying out response functions.</li>
|
||
<li>Store risk management and system documentation in an organized, secure repository that is accessible by relevant AI Actors and appropriate personnel.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>Has the system been reviewed to ensure the AI system complies with relevant laws, regulations, standards, and guidance?</li>
|
||
<li>To what extent has the entity defined and documented the regulatory environment—including minimum requirements in laws and regulations?</li>
|
||
<li>Did your organization implement a risk management system to address risks involved in deploying the identified AI solution (e.g. personnel risk or changes to commercial objectives)?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
<li>Datasheets for Datasets. <a href="https://arxiv.org/abs/1803.09010">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Arvind Narayanan. How to recognize AI snake oil. Retrieved October 15, 2022. <a href="https://www.cs.princeton.edu/~arvindn/talks/MIT-STS-AI-snakeoil.pdf">URL</a></p>
|
||
<p>Board of Governors of the Federal Reserve System. SR 11-7: Guidance on Model Risk Management. (April 4, 2011). <a href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm">URL</a></p>
|
||
<p>Emanuel Moss, Elizabeth Watkins, Ranjit Singh, Madeleine Clare Elish, Jacob Metcalf. 2021. Assembling Accountability: Algorithmic Impact Assessment for the Public Interest. (June 29, 2021). <a href="https://ssrn.com/abstract=3877437 or http://dx.doi.org/10.2139/ssrn.3877437">URL</a></p>
|
||
<p>Fraser, Henry L and Bello y Villarino, Jose-Miguel, Where Residual Risks Reside: A Comparative Approach to Art 9(4) of the European Union's Proposed AI Regulation (September 30, 2021). <a href="https://ssrn.com/abstract=3960461">LINK</a>, <a href="http://dx.doi.org/10.2139/ssrn.3960461">URL</a></p>
|
||
<p>Microsoft. 2022. Microsoft Responsible AI Impact Assessment Template. (June 2022). <a href="https://blogs.microsoft.com/wp-content/uploads/prod/sites/5/2022/06/Microsoft-RAI-Impact-Assessment-Template.pdf">URL</a></p>
|
||
<p>Office of the Comptroller of the Currency. 2021. Comptroller's Handbook: Model Risk Management, Version 1.0, August 2021. <a href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html">URL</a></p>
|
||
<p>Solon Barocas, Asia J. Biega, Benjamin Fish, et al. 2020. When not to design, build, or deploy. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency (FAT* '20). Association for Computing Machinery, New York, NY, USA, 695. <a href="https://doi.org/10.1145/3351095.3375691">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%201.4"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 1.4"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 1.4
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Negative residual risks (defined as the sum of all unmitigated risks) to both downstream acquirers of AI systems and end users are documented.</p>
|
||
|
||
<div id="button-MANAGE 1.4"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Organizations may choose to accept or transfer some of the documented risks from MAP and MANAGE 1.3 and 2.1. Such risks, known as residual risk, may affect downstream AI actors such as those engaged in system procurement or use. Transparent monitoring and managing residual risks enables cost benefit analysis and the examination of potential values of AI systems versus its potential negative impacts.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Document residual risks within risk response plans, denoting risks that have been accepted, transferred, or subject to minimal mitigation. </li>
|
||
<li>Establish procedures for disclosing residual risks to relevant downstream AI actors .</li>
|
||
<li>Inform relevant downstream AI actors of requirements for safe operation, known limitations, and suggested warning labels as identified in MAP 3.4.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>What are the roles, responsibilities, and delegation of authorities of personnel involved in the design, development, deployment, assessment and monitoring of the AI system?</li>
|
||
<li>Who will be responsible for maintaining, re-verifying, monitoring, and updating this AI once deployed?</li>
|
||
<li>How will updates/revisions be documented and communicated? How often and by whom?</li>
|
||
<li>How easily accessible and current is the information available to external stakeholders?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a> </li>
|
||
<li>Datasheets for Datasets. <a href="https://arxiv.org/abs/1803.09010">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Arvind Narayanan. How to recognize AI snake oil. Retrieved October 15, 2022. <a href="https://www.cs.princeton.edu/~arvindn/talks/MIT-STS-AI-snakeoil.pdf">URL</a></p>
|
||
<p>Board of Governors of the Federal Reserve System. SR 11-7: Guidance on Model Risk Management. (April 4, 2011). <a href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm">URL</a></p>
|
||
<p>Emanuel Moss, Elizabeth Watkins, Ranjit Singh, Madeleine Clare Elish, Jacob Metcalf. 2021. Assembling Accountability: Algorithmic Impact Assessment for the Public Interest. (June 29, 2021). <a href="https://ssrn.com/abstract=3877437 or http://dx.doi.org/10.2139/ssrn.3877437">URL</a></p>
|
||
<p>Fraser, Henry L and Bello y Villarino, Jose-Miguel, Where Residual Risks Reside: A Comparative Approach to Art 9(4) of the European Union's Proposed AI Regulation (September 30, 2021). <a href="https://ssrn.com/abstract=3960461">LINK</a>, <a href="http://dx.doi.org/10.2139/ssrn.3960461">URL</a></p>
|
||
<p>Microsoft. 2022. Microsoft Responsible AI Impact Assessment Template. (June 2022). <a href="https://blogs.microsoft.com/wp-content/uploads/prod/sites/5/2022/06/Microsoft-RAI-Impact-Assessment-Template.pdf">URL</a></p>
|
||
<p>Office of the Comptroller of the Currency. 2021. Comptroller's Handbook: Model Risk Management, Version 1.0, August 2021. <a href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html">URL</a></p>
|
||
<p>Solon Barocas, Asia J. Biega, Benjamin Fish, et al. 2020. When not to design, build, or deploy. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency (FAT* '20). Association for Computing Machinery, New York, NY, USA, 695. <a href="https://doi.org/10.1145/3351095.3375691">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</li>
|
||
|
||
<li>
|
||
<h2 class="pbindex__top__heading">Manage 2</h2>
|
||
<p class="usa-intro pbindex__top__title">Strategies to maximize AI benefits and minimize negative impacts are planned, prepared, implemented, and documented, and informed by input from relevant AI actors.</p>
|
||
<ul class="pbindex__subcat-ul">
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%202.1"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 2.1"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 2.1
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Resources required to manage AI risks are taken into account, along with viable non-AI alternative systems, approaches, or methods – to reduce the magnitude or likelihood of potential impacts.</p>
|
||
|
||
<div id="button-MANAGE 2.1"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Organizational risk response may entail identifying and analyzing alternative approaches, methods, processes or systems, and balancing tradeoffs between trustworthiness characteristics and how they relate to organizational principles and societal values. Analysis of these tradeoffs is informed by consulting with interdisciplinary organizational teams, independent domain experts, and engaging with individuals or community groups. These processes require sufficient resource allocation.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Plan and implement risk management practices in accordance with established organizational risk tolerances.</li>
|
||
<li>Verify risk management teams are resourced to carry out functions, including<ul>
|
||
<li>Establishing processes for considering methods that are not automated; semi-automated; or other procedural alternatives for AI functions. </li>
|
||
<li>Enhance AI system transparency mechanisms for AI teams.</li>
|
||
<li>Enable exploration of AI system limitations by AI teams. </li>
|
||
<li>Identify, assess, and catalog past failed designs and negative impacts or outcomes to avoid known failure modes.</li>
|
||
</ul>
|
||
</li>
|
||
<li>Identify resource allocation approaches for managing risks in systems:<ul>
|
||
<li>deemed high-risk,</li>
|
||
<li>that self-update (adaptive, online, reinforcement self-supervised learning or similar),</li>
|
||
<li>trained without access to ground truth (unsupervised, semi-supervised, learning or similar), </li>
|
||
<li>with high uncertainty or where risk management is insufficient.</li>
|
||
</ul>
|
||
</li>
|
||
<li>Regularly seek and integrate external expertise and perspectives to supplement organizational diversity (e.g. demographic, disciplinary), equity, inclusion, and accessibility where internal capacity is lacking.</li>
|
||
<li>Enable and encourage regular, open communication and feedback among AI actors and internal or external stakeholders related to system design or deployment decisions.</li>
|
||
<li>Prepare and document plans for continuous monitoring and feedback mechanisms.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>Are mechanisms in place to evaluate whether internal teams are empowered and resourced to effectively carry out risk management functions?</li>
|
||
<li>How will user and other forms of stakeholder engagement be integrated into risk management processes?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a> </li>
|
||
<li>Datasheets for Datasets. <a href="https://arxiv.org/abs/1803.09010">URL</a></li>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Board of Governors of the Federal Reserve System. SR 11-7: Guidance on Model Risk Management. (April 4, 2011). <a href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm">URL</a></p>
|
||
<p>David Wright. 2013. Making Privacy Impact Assessments More Effective. The Information Society, 29 (Oct 2013), 307-315. <a href="https://doi-org.proxygw.wrlc.org/10.1080/01972243.2013.825687">URL</a></p>
|
||
<p>Margaret Mitchell, Simone Wu, Andrew Zaldivar, et al. 2019. Model Cards for Model Reporting. In Proceedings of the Conference on Fairness, Accountability, and Transparency (FAT* '19). Association for Computing Machinery, New York, NY, USA, 220–229. <a href="https://doi.org/10.1145/3287560.3287596">URL</a></p>
|
||
<p>Office of the Comptroller of the Currency. 2021. Comptroller's Handbook: Model Risk Management, Version 1.0, August 2021. <a href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html">URL</a></p>
|
||
<p>Timnit Gebru, Jamie Morgenstern, Briana Vecchione, et al. 2021. Datasheets for Datasets. arXiv:1803.09010. <a href="https://arxiv.org/abs/1803.09010">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%202.2"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 2.2"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 2.2
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Mechanisms are in place and applied to sustain the value of deployed AI systems.</p>
|
||
|
||
<div id="button-MANAGE 2.2"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>System performance and trustworthiness may evolve and shift over time, once an AI system is deployed and put into operation. This phenomenon, generally known as drift, can degrade the value of the AI system to the organization and increase the likelihood of negative impacts. Regular monitoring of AI systems’ performance and trustworthiness enhances organizations’ ability to detect and respond to drift, and thus sustain an AI system’s value once deployed. Processes and mechanisms for regular monitoring address system functionality and behavior - as well as impacts and alignment with the values and norms within the specific context of use. For example, considerations regarding impacts on personal or public safety or privacy may include limiting high speeds when operating autonomous vehicles or restricting illicit content recommendations for minors. </p>
|
||
<p>Regular monitoring activities can enable organizations to systematically and proactively identify emergent risks and respond according to established protocols and metrics. Options for organizational responses include 1) avoiding the risk, 2)accepting the risk, 3) mitigating the risk, or 4) transferring the risk. Each of these actions require planning and resources. Organizations are encouraged to establish risk management protocols with consideration of the trustworthiness characteristics, the deployment context, and real world impacts.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Establish risk controls considering trustworthiness characteristics, including:
|
||
- Data management, quality, and privacy (e.g. minimization, rectification or deletion requests) controls as part of organizational data governance policies.
|
||
- Machine learning and end-point security countermeasures (e.g., robust models, differential privacy, authentication, throttling).
|
||
- Business rules that augment, limit or restrict AI system outputs within certain contexts
|
||
- Utilizing domain expertise related to deployment context for continuous improvement and TEVV across the AI lifecycle.
|
||
- Development and regular tracking of human-AI teaming configurations.
|
||
- Model assessment and test, evaluation, validation and verification (TEVV) protocols.
|
||
- Use of standardized documentation and transparency mechanisms.
|
||
- Software quality assurance practices across AI lifecycle.
|
||
- Mechanisms to explore system limitations and avoid past failed designs or deployments.</li>
|
||
<li>Establish mechanisms to capture feedback from system end users and potentially impacted groups while system is in deployment.
|
||
-Establish mechanisms to capture feedback from system end users and potentially impacted groups about how changes in system deployment (e.g., introducing new technology, decommissioning algorithms and models, adapting system, model or algorithm) may create negative impacts that are not visible along the AI lifecycle.</li>
|
||
<li>Review insurance policies, warranties, or contracts for legal or oversight requirements for risk transfer procedures.</li>
|
||
<li>Document risk tolerance decisions and risk acceptance procedures.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>To what extent can users or parties affected by the outputs of the AI system test the AI system and provide feedback?</li>
|
||
<li>Could the AI system expose people to harm or negative impacts? What was done to mitigate or reduce the potential for harm?</li>
|
||
<li>How will the accountable human(s) address changes in accuracy and precision due to either an adversary’s attempts to disrupt the AI or unrelated changes in the operational or business environment?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Safety, Validity and Reliability Risk Management Approaches and Resources</h5>
|
||
<p>AI Incident Database. 2022. AI Incident Database. <a href="https://incidentdatabase.ai/">URL</a></p>
|
||
<p>AIAAIC Repository. 2022. AI, algorithmic and automation incidents collected, dissected, examined, and divulged. <a href="https://www.aiaaic.org/aiaaic-repository">URL</a></p>
|
||
<p>Alexander D'Amour, Katherine Heller, Dan Moldovan, et al. 2020. Underspecification Presents Challenges for Credibility in Modern Machine Learning. arXiv:2011.03395. <a href="https://arxiv.org/abs/2011.03395">URL</a></p>
|
||
<p>Andrew L. Beam, Arjun K. Manrai, Marzyeh Ghassemi. 2020. Challenges to the Reproducibility of Machine Learning Models in Health Care. Jama 323, 4 (January 6, 2020), 305-306. <a href="https://doi.org/10.1001/jama.2019.20866">URL</a></p>
|
||
<p>Anthony M. Barrett, Dan Hendrycks, Jessica Newman et al. 2022. Actionable Guidance for High-Consequence AI Risk Management: Towards Standards Addressing AI Catastrophic Risks. arXiv:2206.08966. <a href="https://doi.org/10.48550/arXiv.2206.08966">URL</a></p>
|
||
<p>Debugging Machine Learning Models, In Proceedings of ICLR 2019 Workshop, May 6, 2019, New Orleans, Louisiana. <a href="https://debug-ml-iclr2019.github.io/">URL</a></p>
|
||
<p>Jessie J. Smith, Saleema Amershi, Solon Barocas, et al. 2022. REAL ML: Recognizing, Exploring, and Articulating Limitations of Machine Learning Research. arXiv:2205.08363. <a href="https://arxiv.org/abs/2205.08363">URL</a></p>
|
||
<p>Joelle Pineau, Philippe Vincent-Lamarre, Koustuv Sinha, et al. 2020. Improving Reproducibility in Machine Learning Research (A Report from the NeurIPS 2019 Reproducibility Program) arXiv:2003.12206. <a href="https://doi.org/10.48550/arXiv.2003.12206">URL</a></p>
|
||
<p>Kirstie Whitaker. 2017. Showing your working: a how to guide to reproducible research. (August 2017). <a href="https://github.com/WhitakerLab/ReproducibleResearch/blob/master/PRESENTATIONS/Whitaker_ICON_August2017.pdf">LINK</a>, <a href="https://doi.org/10.6084/m9.figshare.4244996.v2">URL</a></p>
|
||
<p>Netflix. Chaos Monkey. <a href="https://netflix.github.io/chaosmonkey/">URL</a></p>
|
||
<p>Peter Henderson, Riashat Islam, Philip Bachman, et al. 2018. Deep reinforcement learning that matters. Proceedings of the AAAI Conference on Artificial Intelligence. 32, 1 (Apr. 2018). <a href="https://doi.org/10.1609/aaai.v32i1.11694">URL</a></p>
|
||
<p>Suchi Saria, Adarsh Subbaswamy. 2019. Tutorial: Safe and Reliable Machine Learning. arXiv:1904.07204. <a href="https://doi.org/10.48550/arXiv.1904.07204">URL</a></p>
|
||
<p>Kang, Daniel, Deepti Raghavan, Peter Bailis, and Matei Zaharia. "Model assertions for monitoring and improving ML models." Proceedings of Machine Learning and Systems 2 (2020): 481-496. <a href="https://proceedings.mlsys.org/paper/2020/file/a2557a7b2e94197ff767970b67041697-Paper.pdf">URL</a></p>
|
||
<h5>Managing Risk Bias</h5>
|
||
<p>National Institute of Standards and Technology (NIST), Reva Schwartz, Apostol Vassilev, et al. 2022. NIST Special Publication 1270 Towards a Standard for Identifying and Managing Bias in Artificial Intelligence. <a href="https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.1270.pdf">URL</a></p>
|
||
<h5>Bias Testing and Remediation Approaches</h5>
|
||
<p>Alekh Agarwal, Alina Beygelzimer, Miroslav Dudík, et al. 2018. A Reductions Approach to Fair Classification. arXiv:1803.02453. <a href="https://doi.org/10.48550/arXiv.1803.02453">URL</a></p>
|
||
<p>Brian Hu Zhang, Blake Lemoine, Margaret Mitchell. 2018. Mitigating Unwanted Biases with Adversarial Learning. arXiv:1801.07593. <a href="https://doi.org/10.48550/arXiv.1801.07593">URL</a></p>
|
||
<p>Drago Plečko, Nicolas Bennett, Nicolai Meinshausen. 2021. Fairadapt: Causal Reasoning for Fair Data Pre-processing. arXiv:2110.10200. <a href="https://doi.org/10.48550/arXiv.2110.10200">URL</a></p>
|
||
<p>Faisal Kamiran, Toon Calders. 2012. Data Preprocessing Techniques for Classification without Discrimination. Knowledge and Information Systems 33 (2012), 1–33. <a href="https://doi.org/10.1007/s10115-011-0463-8">URL</a></p>
|
||
<p>Faisal Kamiran; Asim Karim; Xiangliang Zhang. 2012. Decision Theory for Discrimination-Aware Classification. In Proceedings of the 2012 IEEE 12th International Conference on Data Mining, December 10-13, 2012, Brussels, Belgium. IEEE, 924-929. <a href="https://doi.org/10.1109/ICDM.2012.45">URL</a></p>
|
||
<p>Flavio P. Calmon, Dennis Wei, Karthikeyan Natesan Ramamurthy, et al. 2017. Optimized Data Pre-Processing for Discrimination Prevention. arXiv:1704.03354. <a href="https://doi.org/10.48550/arXiv.1704.03354">URL</a></p>
|
||
<p>Geoff Pleiss, Manish Raghavan, Felix Wu, et al. 2017. On Fairness and Calibration. arXiv:1709.02012. <a href="https://doi.org/10.48550/arXiv.1709.02012">URL</a></p>
|
||
<p>L. Elisa Celis, Lingxiao Huang, Vijay Keswani, et al. 2020. Classification with Fairness Constraints: A Meta-Algorithm with Provable Guarantees. arXiv:1806.06055. <a href="https://doi.org/10.48550/arXiv.1806.06055">URL</a></p>
|
||
<p>Michael Feldman, Sorelle Friedler, John Moeller, et al. 2014. Certifying and Removing Disparate Impact. arXiv:1412.3756. <a href="https://doi.org/10.48550/arXiv.1412.3756">URL</a></p>
|
||
<p>Michael Kearns, Seth Neel, Aaron Roth, et al. 2017. Preventing Fairness Gerrymandering: Auditing and Learning for Subgroup Fairness. arXiv:1711.05144. <a href="https://doi.org/10.48550/arXiv.1711.05144">URL</a></p>
|
||
<p>Michael Kearns, Seth Neel, Aaron Roth, et al. 2018. An Empirical Study of Rich Subgroup Fairness for Machine Learning. arXiv:1808.08166. <a href="https://doi.org/10.48550/arXiv.1808.08166">URL</a></p>
|
||
<p>Moritz Hardt, Eric Price, and Nathan Srebro. 2016. Equality of Opportunity in Supervised Learning. In Proceedings of the 30th Conference on Neural Information Processing Systems (NIPS 2016), 2016, Barcelona, Spain. <a href="https://papers.nips.cc/paper/2016/file/9d2682367c3935defcb1f9e247a97c0d-Paper.pdf">URL</a></p>
|
||
<p>Rich Zemel, Yu Wu, Kevin Swersky, et al. 2013. Learning Fair Representations. In Proceedings of the 30th International Conference on Machine Learning 2013, PMLR 28, 3, 325-333. <a href="http://proceedings.mlr.press/v28/zemel13.html">URL</a></p>
|
||
<p>Toshihiro Kamishima, Shotaro Akaho, Hideki Asoh & Jun Sakuma. 2012. Fairness-Aware Classifier with Prejudice Remover Regularizer. In Peter A. Flach, Tijl De Bie, Nello Cristianini (eds) Machine Learning and Knowledge Discovery in Databases. European Conference ECML PKDD 2012, Proceedings Part II, September 24-28, 2012, Bristol, UK. Lecture Notes in Computer Science 7524. Springer, Berlin, Heidelberg. <a href="https://doi.org/10.1007/978-3-642-33486-3_3">URL</a></p>
|
||
<h5>Security and Resilience Resources</h5>
|
||
<p>FTC Start With Security Guidelines. 2015. <a href="https://www.ftc.gov/system/files/documents/plain-language/pdf0205-startwithsecurity.pdf">URL</a> </p>
|
||
<p>Gary McGraw et al. 2022. BIML Interactive Machine Learning Risk Framework. Berryville Institute for Machine Learning. <a href="https://berryvilleiml.com/interactive/">URL</a></p>
|
||
<p>Ilia Shumailov, Yiren Zhao, Daniel Bates, et al. 2021. Sponge Examples: Energy-Latency Attacks on Neural Networks. arXiv:2006.03463. <a href="https://doi.org/10.48550/arXiv.2006.03463">URL</a></p>
|
||
<p>Marco Barreno, Blaine Nelson, Anthony D. Joseph, et al. 2010. The Security of Machine Learning. Machine Learning 81 (2010), 121-148. <a href="https://doi.org/10.1007/s10994-010-5188-5">URL</a></p>
|
||
<p>Matt Fredrikson, Somesh Jha, Thomas Ristenpart. 2015. Model Inversion Attacks that Exploit Confidence Information and Basic Countermeasures. In Proceedings of the 22nd ACM SIGSAC Conference on Computer and Communications Security (CCS '15), October 2015. Association for Computing Machinery, New York, NY, USA, 1322–1333. <a href="https://doi.org/10.1145/2810103.2813677">URL</a></p>
|
||
<p>National Institute for Standards and Technology (NIST). 2022. Cybersecurity Framework. <a href="https://www.nist.gov/cyberframework">URL</a></p>
|
||
<p>Nicolas Papernot. 2018. A Marauder's Map of Security and Privacy in Machine Learning. arXiv:1811.01134. <a href="https://doi.org/10.48550/arXiv.1811.01134">URL</a></p>
|
||
<p>Reza Shokri, Marco Stronati, Congzheng Song, et al. 2017. Membership Inference Attacks against Machine Learning Models. arXiv:1610.05820. <a href="https://doi.org/10.48550/arXiv.1610.05820">URL</a></p>
|
||
<p>Adversarial Threat Matrix (MITRE). 2021. <a href="https://github.com/mitre/advmlthreatmatrix">URL</a></p>
|
||
<h5>Interpretability and Explainability Approaches</h5>
|
||
<p>Chaofan Chen, Oscar Li, Chaofan Tao, et al. 2019. This Looks Like That: Deep Learning for Interpretable Image Recognition. arXiv:1806.10574. <a href="https://doi.org/10.48550/arXiv.1806.10574">URL</a></p>
|
||
<p>Cynthia Rudin. 2019. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. arXiv:1811.10154. <a href="https://doi.org/10.48550/arXiv.1811.10154">URL</a></p>
|
||
<p>Daniel W. Apley, Jingyu Zhu. 2019. Visualizing the Effects of Predictor Variables in Black Box Supervised Learning Models. arXiv:1612.08468. <a href="https://doi.org/10.48550/arXiv.1612.08468">URL</a></p>
|
||
<p>David A. Broniatowski. 2021. Psychological Foundations of Explainability and Interpretability in Artificial Intelligence. National Institute of Standards and Technology (NIST) IR 8367. National Institute of Standards and Technology, Gaithersburg, MD. <a href="https://doi.org/10.6028/NIST.IR.8367">URL</a></p>
|
||
<p>Forough Poursabzi-Sangdeh, Daniel G. Goldstein, Jake M. Hofman, et al. 2021. Manipulating and Measuring Model Interpretability. arXiv:1802.07810. <a href="https://doi.org/10.48550/arXiv.1802.07810">URL</a></p>
|
||
<p>Hongyu Yang, Cynthia Rudin, Margo Seltzer. 2017. Scalable Bayesian Rule Lists. arXiv:1602.08610. <a href="https://doi.org/10.48550/arXiv.1602.08610">URL</a></p>
|
||
<p>P. Jonathon Phillips, Carina A. Hahn, Peter C. Fontana, et al. 2021. Four Principles of Explainable Artificial Intelligence. National Institute of Standards and Technology (NIST) IR 8312. National Institute of Standards and Technology, Gaithersburg, MD. <a href="https://doi.org/10.6028/NIST.IR.8312">URL</a></p>
|
||
<p>Scott Lundberg, Su-In Lee. 2017. A Unified Approach to Interpreting Model Predictions. arXiv:1705.07874. <a href="https://doi.org/10.48550/arXiv.1705.07874">URL</a></p>
|
||
<p>Susanne Gaube, Harini Suresh, Martina Raue, et al. 2021. Do as AI say: susceptibility in deployment of clinical decision-aids. npj Digital Medicine 4, Article 31 (2021). <a href="https://doi.org/10.1038/s41746-021-00385-9">URL</a></p>
|
||
<p>Yin Lou, Rich Caruana, Johannes Gehrke, et al. 2013. Accurate intelligible models with pairwise interactions. In Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining (KDD '13), August 2013. Association for Computing Machinery, New York, NY, USA, 623–631. <a href="https://doi.org/10.1145/2487575.2487579">URL</a></p>
|
||
<h5>Post-Decommission</h5>
|
||
<p>Upol Ehsan, Ranjit Singh, Jacob Metcalf and Mark O. Riedl. “The Algorithmic Imprint.” Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency (2022). [URL] (https://arxiv.org/pdf/2206.03275v1)</p>
|
||
<h5>Privacy Resources</h5>
|
||
<p>National Institute for Standards and Technology (NIST). 2022. Privacy Framework. <a href="https://www.nist.gov/privacy-framework">URL</a></p>
|
||
<h5>Data Governance</h5>
|
||
<p>Marijn Janssen, Paul Brous, Elsa Estevez, Luis S. Barbosa, Tomasz Janowski, Data governance: Organizing data for trustworthy Artificial Intelligence, Government Information Quarterly, Volume 37, Issue 3, 2020, 101493, ISSN 0740-624X. <a href="https://doi.org/10.1016/j.giq.2020.101493">URL</a></p>
|
||
<h5>Software Resources</h5>
|
||
<ul>
|
||
<li><a href="https://github.com/SelfExplainML/PiML-Toolbox">PiML</a> (explainable models, performance assessment)</li>
|
||
<li><a href="https://github.com/interpretml/interpret">Interpret</a> (explainable models)</li>
|
||
<li><a href="https://cran.r-project.org/web/packages/iml/index.html">Iml</a> (explainable models)</li>
|
||
<li><a href="https://github.com/ModelOriented/drifter">Drifter</a> library (performance assessment)</li>
|
||
<li><a href="https://github.com/uber/manifold">Manifold</a> library (performance assessment)</li>
|
||
<li><a href="https://github.com/SALib/SALib">SALib</a> library (performance assessment)</li>
|
||
<li><a href="https://pair-code.github.io/what-if-tool/index.html#about">What-If Tool</a> (performance assessment)</li>
|
||
<li><a href="http://rasbt.github.io/mlxtend/">MLextend</a> (performance assessment)</li>
|
||
<li>AI Fairness 360: <ul>
|
||
<li><a href="https://github.com/Trusted-AI/AIF360">Python</a> (bias testing and mitigation)</li>
|
||
<li><a href="https://github.com/Trusted-AI/AIF360/tree/master/aif360/aif360-r">R</a> (bias testing and mitigation)</li>
|
||
</ul>
|
||
</li>
|
||
<li><a href="https://github.com/Trusted-AI/adversarial-robustness-toolbox">Adversarial-robustness-toolbox</a> (ML security)</li>
|
||
<li><a href="https://github.com/MadryLab/robustness">Robustness</a> (ML security)</li>
|
||
<li><a href="https://github.com/tensorflow/privacy">tensorflow/privacy</a> (ML security)</li>
|
||
<li><a href="https://www.nist.gov/itl/applied-cybersecurity/privacy-engineering/collaboration-space/focus-areas/de-id/tools">NIST De-identification Tools</a> (Privacy and ML security)</li>
|
||
<li><a href="https://dvc.org/">Dvc</a> (MLops, deployment)</li>
|
||
<li><a href="https://github.com/gigantum">Gigantum</a> (MLops, deployment)</li>
|
||
<li><a href="https://mlflow.org/">Mlflow</a> (MLops, deployment)</li>
|
||
<li><a href="https://github.com/google/ml-metadata">Mlmd</a> (MLops, deployment)</li>
|
||
<li><a href="https://github.com/VertaAI/modeldb">Modeldb</a> (MLops, deployment)</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%202.3"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 2.3"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 2.3
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Procedures are followed to respond to and recover from a previously unknown risk when it is identified.</p>
|
||
|
||
<div id="button-MANAGE 2.3"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>AI systems – like any technology – can demonstrate non-functionality or failure or unexpected and unusual behavior. They also can be subject to attacks, incidents, or other misuse or abuse – which their sources are not always known apriori. Organizations can establish, document, communicate and maintain treatment procedures to recognize and counter, mitigate and manage risks that were not previously identified.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Protocols, resources, and metrics are in place for continual monitoring of AI systems’ performance, trustworthiness, and alignment with contextual norms and values </li>
|
||
<li>Establish and regularly review treatment and response plans for incidents, negative impacts, or outcomes.</li>
|
||
<li>Establish and maintain procedures to regularly monitor system components for drift, decontextualization, or other AI system behavior factors, </li>
|
||
<li>Establish and maintain procedures for capturing feedback about negative impacts.</li>
|
||
<li>Verify contingency processes to handle any negative impacts associated with mission-critical AI systems, and to deactivate systems.</li>
|
||
<li>Enable preventive and post-hoc exploration of AI system limitations by relevant AI actor groups.</li>
|
||
<li>Decommission systems that exceed risk tolerances.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>Who will be responsible for maintaining, re-verifying, monitoring, and updating this AI once deployed?</li>
|
||
<li>Are the responsibilities of the personnel involved in the various AI governance processes clearly defined? (Including responsibilities to decommission the AI system.)</li>
|
||
<li>What processes exist for data generation, acquisition/collection, ingestion, staging/storage, transformations, security, maintenance, and dissemination?</li>
|
||
<li>How will the appropriate performance metrics, such as accuracy, of the AI be monitored after the AI is deployed? </li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a> </li>
|
||
<li>WEF - Companion to the Model AI Governance Framework – Implementation and Self-Assessment Guide for Organizations. <a href="https://www.pdpc.gov.sg/-/media/files/pdpc/pdf-files/resource-for-organisation/ai/sgisago.ashx">URL</a></li>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>AI Incident Database. 2022. AI Incident Database. <a href="https://incidentdatabase.ai/">URL</a></p>
|
||
<p>AIAAIC Repository. 2022. AI, algorithmic and automation incidents collected, dissected, examined, and divulged. <a href="https://www.aiaaic.org/aiaaic-repository">URL</a></p>
|
||
<p>Andrew Burt and Patrick Hall. 2018. What to Do When AI Fails. O’Reilly Media, Inc. (May 18, 2020). Retrieved October 17, 2022. <a href="https://www.oreilly.com/radar/what-to-do-when-ai-fails/">URL</a></p>
|
||
<p>National Institute for Standards and Technology (NIST). 2022. Cybersecurity Framework. <a href="https://www.nist.gov/cyberframework">URL</a></p>
|
||
<p>SANS Institute. 2022. Security Consensus Operational Readiness Evaluation (SCORE) Security Checklist [or Advanced Persistent Threat (APT) Handling Checklist]. <a href="https://www.sans.org/media/score/checklists/APT-IncidentHandling-Checklist.pdf">URL</a></p>
|
||
<p>Suchi Saria, Adarsh Subbaswamy. 2019. Tutorial: Safe and Reliable Machine Learning. arXiv:1904.07204. <a href="https://doi.org/10.48550/arXiv.1904.07204">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%202.4"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 2.4"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 2.4
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Mechanisms are in place and applied, responsibilities are assigned and understood to supersede, disengage, or deactivate AI systems that demonstrate performance or outcomes inconsistent with intended use.</p>
|
||
|
||
<div id="button-MANAGE 2.4"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Performance inconsistent with intended use does not always increase risk or lead to negative impacts. Rigorous TEVV practices are useful for protecting against negative impacts regardless of intended use. When negative impacts do arise, superseding (bypassing), disengaging, or deactivating/decommissioning a model, AI system component(s), or the entire AI system may be necessary, such as when: </p>
|
||
<ul>
|
||
<li>a system reaches the end of its lifetime</li>
|
||
<li>detected or identified risks exceed tolerance thresholds</li>
|
||
<li>adequate system mitigation actions are beyond the organization’s capacity</li>
|
||
<li>feasible system mitigation actions do not meet regulatory, legal, norms or standards. </li>
|
||
<li>impending risk is detected during continual monitoring, for which feasible mitigation cannot be identified or implemented in a timely fashion. </li>
|
||
</ul>
|
||
<p>Safely removing AI systems from operation, either temporarily or permanently, under these scenarios requires standard protocols that minimize operational disruption and downstream negative impacts. Protocols can involve redundant or backup systems that are developed in alignment with established system governance policies (see GOVERN 1.7), regulatory compliance, legal frameworks, business requirements and norms and l standards within the application context of use. Decision thresholds and metrics for actions to bypass or deactivate system components are part of continual monitoring procedures. Incidents that result in a bypass/deactivate decision require documentation and review to understand root causes, impacts, and potential opportunities for mitigation and redeployment. Organizations are encouraged to develop risk and change management protocols that consider and anticipate upstream and downstream consequences of both temporary and/or permanent decommissioning, and provide contingency options.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Regularly review established procedures for AI system bypass actions, including plans for redundant or backup systems to ensure continuity of operational and/or business functionality.</li>
|
||
<li>Regularly review Identify system incident thresholds for activating bypass or deactivation responses.</li>
|
||
<li>Apply change management processes to understand the upstream and downstream consequences of bypassing or deactivating an AI system or AI system components.</li>
|
||
<li>Apply protocols, resources and metrics for decisions to supersede, bypass or deactivate AI systems or AI system components.</li>
|
||
<li>Preserve materials for forensic, regulatory, and legal review.</li>
|
||
<li>Conduct internal root cause analysis and process reviews of bypass or deactivation events. </li>
|
||
<li>Decommission and preserve system components that cannot be updated to meet criteria for redeployment.</li>
|
||
<li>Establish criteria for redeploying updated system components, in consideration of trustworthy characteristics</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>What are the roles, responsibilities, and delegation of authorities of personnel involved in the design, development, deployment, assessment and monitoring of the AI system?</li>
|
||
<li>Did your organization implement a risk management system to address risks involved in deploying the identified AI solution (e.g. personnel risk or changes to commercial objectives)?</li>
|
||
<li>What testing, if any, has the entity conducted on the AI system to identify errors and limitations (i.e. adversarial or stress testing)?</li>
|
||
<li>To what extent does the entity have established procedures for retiring the AI system, if it is no longer needed?</li>
|
||
<li>How did the entity use assessments and/or evaluations to determine if the system can be scaled up, continue, or be decommissioned?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities. <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Decommissioning Template. Application Lifecycle And Supporting Docs. Cloud and Infrastructure Community of Practice. <a href="https://www.cio.gov/policies-and-priorities/application-lifecycle/">URL</a></p>
|
||
<p>Develop a Decommission Plan. M3 Playbook. Office of Shared Services and Solutions and Performance Improvement. General Services Administration. <a href="https://ussm.gsa.gov/2.8/">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</li>
|
||
|
||
<li>
|
||
<h2 class="pbindex__top__heading">Manage 3</h2>
|
||
<p class="usa-intro pbindex__top__title">AI risks and benefits from third-party entities are managed.</p>
|
||
<ul class="pbindex__subcat-ul">
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%203.1"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 3.1"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 3.1
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">AI risks and benefits from third-party resources are regularly monitored, and risk controls are applied and documented.</p>
|
||
|
||
<div id="button-MANAGE 3.1"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>AI systems may depend on external resources and associated processes, including third-party data, software or hardware systems. Third parties’ supplying organizations with components and services, including tools, software, and expertise for AI system design, development, deployment or use can improve efficiency and scalability. It can also increase complexity and opacity, and, in-turn, risk. Documenting third-party technologies, personnel, and resources that were employed can help manage risks. Focusing first and foremost on risks involving physical safety, legal liabilities, regulatory compliance, and negative impacts on individuals, groups, or society is recommended.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Have legal requirements been addressed?</li>
|
||
<li>Apply organizational risk tolerance to third-party AI systems.</li>
|
||
<li>Apply and document organizational risk management plans and practices to third-party AI technology, personnel, or other resources.</li>
|
||
<li>Identify and maintain documentation for third-party AI systems and components.</li>
|
||
<li>Establish testing, evaluation, validation and verification processes for third-party AI systems which address the needs for transparency without exposing proprietary algorithms .</li>
|
||
<li>Establish processes to identify beneficial use and risk indicators in third-party systems or components, such as inconsistent software release schedule, sparse documentation, and incomplete software change management (e.g., lack of forward or backward compatibility).</li>
|
||
<li>Organizations can establish processes for third parties to report known and potential vulnerabilities, risks or biases in supplied resources.</li>
|
||
<li>Verify contingency processes for handling negative impacts associated with mission-critical third-party AI systems.</li>
|
||
<li>Monitor third-party AI systems for potential negative impacts and risks associated with trustworthiness characteristics.</li>
|
||
<li>Decommission third-party systems that exceed risk tolerances.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>If a third party created the AI system or some of its components, how will you ensure a level of explainability or interpretability? Is there documentation?</li>
|
||
<li>If your organization obtained datasets from a third party, did your organization assess and manage the risks of using such datasets?</li>
|
||
<li>Did you establish a process for third parties (e.g. suppliers, end users, subjects, distributors/vendors or workers) to report potential vulnerabilities, risks or biases in the AI system?</li>
|
||
<li>Have legal requirements been addressed?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a></li>
|
||
<li>WEF - Companion to the Model AI Governance Framework – Implementation and Self-Assessment Guide for Organizations. <a href="https://www.pdpc.gov.sg/-/media/files/pdpc/pdf-files/resource-for-organisation/ai/sgisago.ashx">URL</a></li>
|
||
<li>Datasheets for Datasets. <a href="https://arxiv.org/abs/1803.09010">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Office of the Comptroller of the Currency. 2021. Proposed Interagency Guidance on Third-Party Relationships: Risk Management. July 12, 2021. <a href="https://www.occ.gov/news-issuances/news-releases/2021/nr-occ-2021-74a.pdf">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%203.2"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 3.2"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 3.2
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Pre-trained models which are used for development are monitored as part of AI system regular monitoring and maintenance.</p>
|
||
|
||
<div id="button-MANAGE 3.2"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>A common approach in AI development is transfer learning, whereby an existing pre-trained model is adapted for use in a different, but related application. AI actors in development tasks often use pre-trained models from third-party entities for tasks such as image classification, language prediction, and entity recognition, because the resources to build such models may not be readily available to most organizations. Pre-trained models are typically trained to address various classification or prediction problems, using exceedingly large datasets and computationally intensive resources. The use of pre-trained models can make it difficult to anticipate negative system outcomes or impacts. Lack of documentation or transparency tools increases the difficulty and general complexity when deploying pre-trained models and hinders root cause analyses.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Identify pre-trained models within AI system inventory for risk tracking.</li>
|
||
<li>Establish processes to independently and continually monitor performance and trustworthiness of pre-trained models, and as part of third-party risk tracking. </li>
|
||
<li>Monitor performance and trustworthiness of AI system components connected to pre-trained models, and as part of third-party risk tracking.</li>
|
||
<li>Identify, document and remediate risks arising from AI system components and pre-trained models per organizational risk management procedures, and as part of third-party risk tracking.</li>
|
||
<li>Decommission AI system components and pre-trained models which exceed risk tolerances, and as part of third-party risk tracking.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>How has the entity documented the AI system’s data provenance, including sources, origins, transformations, augmentations, labels, dependencies, constraints, and metadata?</li>
|
||
<li>Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet?</li>
|
||
<li>How does the entity ensure that the data collected are adequate, relevant, and not excessive in relation to the intended purpose?</li>
|
||
<li>If the dataset becomes obsolete how will this be communicated?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a></li>
|
||
<li>WEF - Companion to the Model AI Governance Framework – Implementation and Self-Assessment Guide for Organizations. <a href="https://www.pdpc.gov.sg/-/media/files/pdpc/pdf-files/resource-for-organisation/ai/sgisago.ashx">URL</a></li>
|
||
<li>Datasheets for Datasets. <a href="https://arxiv.org/abs/1803.09010">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Larysa Visengeriyeva et al. “Awesome MLOps,“ GitHub. Accessed January 9, 2023. <a href="https://github.com/visenger">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</li>
|
||
|
||
<li>
|
||
<h2 class="pbindex__top__heading">Manage 4</h2>
|
||
<p class="usa-intro pbindex__top__title">Risk treatments including response and recovery, and communication plans to the identified and measured AI risks are documented and monitored regularly.</p>
|
||
<ul class="pbindex__subcat-ul">
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%204.1"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 4.1"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 4.1
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Post-deployment AI system monitoring plans are implemented, including mechanisms for capturing and evaluating input from users and other relevant AI actors, appeal and override, decommissioning, incident response, recovery, and change management.</p>
|
||
|
||
<div id="button-MANAGE 4.1"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>AI system performance and trustworthiness can change due to a variety of factors. Regular AI system monitoring can help deployers identify performance degradations, adversarial attacks, unexpected and unusual behavior, near-misses, and impacts. Including pre- and post-deployment external feedback about AI system performance can enhance organizational awareness about positive and negative impacts, and reduce the time to respond to risks and harms.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Establish and maintain procedures to monitor AI system performance for risks and negative and positive impacts associated with trustworthiness characteristics. </li>
|
||
<li>Perform post-deployment TEVV tasks to evaluate AI system validity and reliability, bias and fairness, privacy, and security and resilience.</li>
|
||
<li>Evaluate AI system trustworthiness in conditions similar to deployment context of use, and prior to deployment.</li>
|
||
<li>Establish and implement red-teaming exercises at a prescribed cadence, and evaluate their efficacy. </li>
|
||
<li>Establish procedures for tracking dataset modifications such as data deletion or rectification requests.</li>
|
||
<li>Establish mechanisms for regular communication and feedback between relevant AI actors and internal or external stakeholders to capture information about system performance, trustworthiness and impact.</li>
|
||
<li>Share information about errors, near-misses, and attack patterns with incident databases, other organizations with similar systems, and system users and stakeholders.</li>
|
||
<li>Respond to and document detected or reported negative impacts or issues in AI system performance and trustworthiness.</li>
|
||
<li>Decommission systems that exceed establish risk tolerances.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>To what extent has the entity documented the post-deployment AI system’s testing methodology, metrics, and performance outcomes?</li>
|
||
<li>How easily accessible and current is the information available to external stakeholders?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities, <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
<li>Datasheets for Datasets. <a href="https://arxiv.org/abs/1803.09010">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Navdeep Gill, Patrick Hall, Kim Montgomery, and Nicholas Schmidt. "A Responsible Machine Learning Workflow with Focus on Interpretable Models, Post-hoc Explanation, and Discrimination Testing." Information 11, no. 3 (2020): 137. <a href="https://www.mdpi.com/2078-2489/11/3/137">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%204.2"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 4.2"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 4.2
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Measurable activities for continual improvements are integrated into AI system updates and include regular engagement with interested parties, including relevant AI actors.</p>
|
||
|
||
<div id="button-MANAGE 4.2"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Regular monitoring processes enable system updates to enhance performance and functionality in accordance with regulatory and legal frameworks, and organizational and contextual values and norms. These processes also facilitate analyses of root causes, system degradation, drift, near-misses, and failures, and incident response and documentation. </p>
|
||
<p>AI actors across the lifecycle have many opportunities to capture and incorporate external feedback about system performance, limitations, and impacts, and implement continuous improvements. Improvements may not always be to model pipeline or system processes, and may instead be based on metrics beyond accuracy or other quality performance measures. In these cases, improvements may entail adaptations to business or organizational procedures or practices. Organizations are encouraged to develop improvements that will maintain traceability and transparency for developers, end users, auditors, and relevant AI actors.</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Integrate trustworthiness characteristics into protocols and metrics used for continual improvement.</li>
|
||
<li>Establish processes for evaluating and integrating feedback into AI system improvements.</li>
|
||
<li>Assess and evaluate alignment of proposed improvements with relevant regulatory and legal frameworks</li>
|
||
<li>Assess and evaluate alignment of proposed improvements connected to the values and norms within the context of use.</li>
|
||
<li>Document the basis for decisions made relative to tradeoffs between trustworthy characteristics, system risks, and system opportunities</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>How will user and other forms of stakeholder engagement be integrated into the model development process and regular performance review once deployed?</li>
|
||
<li>To what extent can users or parties affected by the outputs of the AI system test the AI system and provide feedback?</li>
|
||
<li>To what extent has the entity defined and documented the regulatory environment—including minimum requirements in laws and regulations?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP - Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities, <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
<li>Artificial Intelligence Ethics Framework For The Intelligence Community. <a href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Yen, Po-Yin, et al. "Development and Evaluation of Socio-Technical Metrics to Inform HIT Adaptation." <a href="https://digital.ahrq.gov/sites/default/files/docs/citation/r21hs024767-yen-final-report-2019.pdf">URL</a></p>
|
||
<p>Carayon, Pascale, and Megan E. Salwei. "Moving toward a sociotechnical systems approach to continuous health information technology design: the path forward for improving electronic health record usability and reducing clinician burnout." Journal of the American Medical Informatics Association 28.5 (2021): 1026-1028. <a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8068435/pdf/ocab002.pdf">URL</a></p>
|
||
<p>Mishra, Deepa, et al. "Organizational capabilities that enable big data and predictive analytics diffusion and organizational performance: A resource-based perspective." Management Decision (2018).</p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
<li>
|
||
|
||
<div data-allow-multiple="data-allow-multiple"
|
||
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container">
|
||
|
||
<h3 id="Manage%204.3"
|
||
class="usa-accordion__heading pbindex__outer-accordion__heading">
|
||
<button type="button"
|
||
aria-expanded="false"
|
||
aria-controls="button-MANAGE 4.3"
|
||
class="usa-accordion__button pbindex__outer-accordion__button">
|
||
MANAGE 4.3
|
||
</button>
|
||
</h3>
|
||
<p class="pbindex__outer-accordion__description">Incidents and errors are communicated to relevant AI actors including affected communities. Processes for tracking, responding to, and recovering from incidents and errors are followed and documented.</p>
|
||
|
||
<div id="button-MANAGE 4.3"
|
||
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul">
|
||
|
||
<ul class="pbindex__collection__ul usa-collection">
|
||
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">About</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Regularly documenting an accurate and transparent account of identified and reported errors can enhance AI risk management activities., Examples include:</p>
|
||
<ul>
|
||
<li>how errors were identified, </li>
|
||
<li>incidents related to the error, </li>
|
||
<li>whether the error has been repaired, and</li>
|
||
<li>how repairs can be distributed to all impacted stakeholders and users.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Suggested Actions</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<ul>
|
||
<li>Establish procedures to regularly share information about errors, incidents and negative impacts with relevant stakeholders, operators, practitioners and users, and impacted parties.</li>
|
||
<li>Maintain a database of reported errors, near-misses, incidents and negative impacts including date reported, number of reports, assessment of impact and severity, and responses.</li>
|
||
<li>Maintain a database of system changes, reason for change, and details of how the change was made, tested and deployed. </li>
|
||
<li>Maintain version history information and metadata to enable continuous improvement processes.</li>
|
||
<li>Verify that relevant AI actors responsible for identifying complex or emergent risks are properly resourced and empowered.</li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">Transparency and Documentation</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<h5>Organizations can document the following</h5>
|
||
<ul>
|
||
<li>What corrective actions has the entity taken to enhance the quality, accuracy, reliability, and representativeness of the data?</li>
|
||
<li>To what extent does the entity communicate its AI strategic goals and objectives to the community of stakeholders? How easily accessible and current is the information available to external stakeholders?</li>
|
||
<li>What type of information is accessible on the design, operations, and limitations of the AI system to external stakeholders, including end users, consumers, regulators, and individuals impacted by use of the AI system?</li>
|
||
</ul>
|
||
<h5>AI Transparency Resources</h5>
|
||
<ul>
|
||
<li>GAO-21-519SP: Artificial Intelligence: An Accountability Framework for Federal Agencies & Other Entities, <a href="https://www.gao.gov/products/gao-21-519sp">URL</a></li>
|
||
</ul>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
|
||
<li class="usa-collection__item">
|
||
<div class="pbindex__content_section_heading_container">
|
||
<h4 class="usa-collection__heading">References</h4>
|
||
</div>
|
||
<div class="usa-collection__body pbindex__content_section_contentp_container">
|
||
|
||
<p>Wei, M., & Zhou, Z. (2022). AI Ethics Issues in Real World: Evidence from AI Incident Database. ArXiv, abs/2206.07635. <a href="https://arxiv.org/pdf/2206.07635.pdf">URL</a></p>
|
||
<p>McGregor, Sean. "Preventing repeated real world AI failures by cataloging incidents: The AI incident database." Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 35. No. 17. 2021. <a href="https://arxiv.org/pdf/2011.08512.pdf">URL</a></p>
|
||
<p>Macrae, Carl. "Learning from the failure of autonomous and intelligent systems: Accidents, safety, and sociotechnical sources of risk." Risk analysis 42.9 (2022): 1999-2025. <a href="https://onlinelibrary.wiley.com/doi/epdf/10.1111/risa.13850">URL</a></p>
|
||
|
||
</div>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
|
||
|
||
</main>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
<footer class="nist-footer padding-bottom-4">
|
||
<div class="grid-container nist-footer__info">
|
||
<div class="grid-row">
|
||
<div class="tablet:grid-col-6 padding-left-0">
|
||
<div class="nist-footer__logo">
|
||
<a href="https://www.nist.gov/"
|
||
title="National Institute of Standards and Technology"
|
||
class="nist-footer__logo-link"
|
||
rel="home">
|
||
<img src="/img/nist_logo_brand_white.svg"
|
||
role="img"
|
||
alt="National Institute of Standards and Technology logo"
|
||
width="300px"
|
||
height="42px" />
|
||
</a>
|
||
</div>
|
||
<div class="nist-footer__contact">
|
||
<h3 class="nist-footer__contact-heading">HEADQUARTERS</h3>
|
||
<address>
|
||
100 Bureau Drive
|
||
<br>
|
||
Gaithersburg, MD 20899
|
||
<br>
|
||
<a href="tel:301-975-2000">301-975-2000</a>
|
||
</address>
|
||
<p>
|
||
<a href="mailto:do-webmaster@nist.gov">Webmaster</a> | <a href="https://www.nist.gov/about-nist/contact-us">Contact Us</a> | <a href="https://www.nist.gov/visit">Our Other Offices</a>
|
||
</p>
|
||
</div>
|
||
</div>
|
||
<div class="tablet:grid-col-6">
|
||
<div class="nist-footer__social-links grid-row">
|
||
<a class="nist-social nist-social--x-white ext"
|
||
href=" https://x.com/NIST"
|
||
data-extlink="">
|
||
<span>X</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/x.svg"
|
||
alt="X" />
|
||
</a>
|
||
<a class="nist-social nist-social--facebook-white ext"
|
||
href=" https://www.facebook.com/NIST"
|
||
data-extlink="">
|
||
<span>Facebook</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/facebook.svg"
|
||
alt="Facebook" />
|
||
</a>
|
||
<a class="nist-social nist-social--linkedin-white ext"
|
||
href=" https://www.linkedin.com/company/nist"
|
||
data-extlink="">
|
||
<span>LinkedIn</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/linkedin.svg"
|
||
alt="LinkedIn" />
|
||
</a>
|
||
<a class="nist-social nist-social--instagram-white ext"
|
||
href=" https://www.instagram.com/nist/"
|
||
data-extlink="">
|
||
<span>Instagram</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/instagram.svg"
|
||
alt="Instagram" />
|
||
</a>
|
||
<a class="nist-social nist-social--youtube-white ext"
|
||
href=" https://www.youtube.com/NIST"
|
||
data-extlink="">
|
||
<span>YouTube</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/youtube.svg"
|
||
alt="YouTube" />
|
||
</a>
|
||
<a class="nist-social nist-social--rss-white"
|
||
href=" https://www.nist.gov/news-events/nist-rss-feeds">
|
||
<span>RSS Feed</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/rss_feed.svg"
|
||
alt="RSS" />
|
||
</a>
|
||
<a class="nist-social nist-social--envelope-white ext"
|
||
href=" https://public.govdelivery.com/accounts/USNIST/subscriber/new"
|
||
data-extlink="">
|
||
<span>Mailing List</span>
|
||
<img class="nist-social-footer-icon filter-white"
|
||
src="/img/usa-icons/mail.svg"
|
||
alt="Subscribe to Mailing List" />
|
||
</a>
|
||
</div>
|
||
<br>
|
||
<div class="nist-footer__feedback grid-row">
|
||
How are we doing? <a class="margin-left-2 usa-button"
|
||
rel="nofollow"
|
||
href="https://www.nist.gov/form/nist-gov-feedback?destination=/national-institute-standards-and-technology"
|
||
title="Provide feedback">Feedback</a>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
<div class="nist-footer__inner">
|
||
<div class="nist-footer__menu" role="navigation">
|
||
<ul>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/privacy-policy">Site Privacy</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/oism/accessibility">Accessibility</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/privacy">Privacy Program</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/oism/copyrights">Copyrights</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.commerce.gov/vulnerability-disclosure-policy">Vulnerability Disclosure</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/no-fear-act-policy">No Fear Act Policy</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/foia">FOIA</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.nist.gov/environmental-policy-statement">Environmental Policy</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item ">
|
||
<a href="https://www.nist.gov/summary-report-scientific-integrity">Scientific Integrity</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item ">
|
||
<a href="https://www.nist.gov/nist-information-quality-standards">Information Quality Standards</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.commerce.gov/">Commerce.gov</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.science.gov/">Science.gov</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://www.usa.gov/">USA.gov</a>
|
||
</li>
|
||
<li class="nist-footer__menu-item">
|
||
<a href="https://vote.gov/">Vote.gov</a>
|
||
</li>
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
</footer>
|
||
|
||
|
||
</div>
|
||
|
||
|
||
|
||
|
||
<script nonce="Bt+wUFLLXpM971wHzwK4Pw=="
|
||
defer
|
||
src="/dist/uswds.min.fce4bb2752d4.js"></script>
|
||
<script nonce="Bt+wUFLLXpM971wHzwK4Pw=="
|
||
type="text/javascript"
|
||
src="/dist/application.a74e10c27c89.js"
|
||
defer></script>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<script nonce="Bt+wUFLLXpM971wHzwK4Pw==" src="https://www.googletagmanager.com/gtag/js?id=G-3CZ2H64ZSV" async></script>
|
||
<script nonce="Bt+wUFLLXpM971wHzwK4Pw==" type="application/javascript" defer>window.dataLayer = window.dataLayer || [];
|
||
function gtag() {
|
||
dataLayer.push(arguments);
|
||
}
|
||
gtag('js', new Date());
|
||
gtag('config', 'G-3CZ2H64ZSV');</script>
|
||
<noscript>
|
||
<style nonce="Bt+wUFLLXpM971wHzwK4Pw==" >
|
||
iframe {
|
||
display: none;
|
||
visibility: hidden;
|
||
}
|
||
</style>
|
||
|
||
<iframe src="https://www.googletagmanager.com/ns.html?id=G-3CZ2H64ZSV"
|
||
height="0"
|
||
width="0"
|
||
style="display:none;
|
||
visibility:hidden"></iframe>
|
||
</noscript>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
</body>
|
||
</html>
|