nist-gov/airc.nist.gov/AI_RMF_Knowledge_Base/Playbook/Map
2025-03-05 18:59:57 +00:00

5646 lines
246 KiB
Text
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>
Map
</title>
<link rel="stylesheet"
type="text/css"
href="/dist/custom_fontawesome.ac60244596e1.css">
<link rel="stylesheet"
type="text/css"
href="/dist/application.ca391f6cb6ce.css">
<link rel="stylesheet" type="text/css" href="/dist/content.124706b6abde.css">
<script nonce="VGGJL6qR72rqyokScTQSXA=="
type="text/javascript"
src="/dist/uswds-init.min.0c5600cc9db1.js"></script>
<meta name="description"
content="" />
<link rel="canonical" href="/airmf-resources/playbook/map/" />
<meta property="og:url" content="/airmf-resources/playbook/map/" />
<meta name="twitter:card" content="summary" />
<meta name="twitter:title"
content="Map " />
<meta name="twitter:description"
content="">
<meta property="og:type" content="website" />
<meta property="og:title" content="Map" />
<meta property="og:description"
content="" />
<meta property="og:site_name" content="" />
</head>
<body class="
layout-styleguide
">
<a class="usa-skipnav" href="#main-content">Skip to main content</a>
<div class="app-content">
<section class="usa-banner site-banner"
aria-label="Official website of the United States government">
<div class="usa-accordion">
<header class="usa-banner__header">
<div class="usa-banner__inner">
<div class="grid-col-auto">
<img aria-hidden="true"
class="usa-banner__header-flag"
src="/img/us_flag_small.png"
alt="Small US Flag" />
</div>
<div class="grid-col-fill tablet:grid-col-auto" aria-hidden="true">
<p class="usa-banner__header-text">An official website of the United States government</p>
<p class="usa-banner__header-action">Heres how you know</p>
</div>
<button type="button"
class="usa-accordion__button usa-banner__button"
aria-expanded="false"
aria-controls="gov-banner-default-default">
<span class="usa-banner__button-text">Heres how you know</span>
</button>
</div>
</header>
<div class="usa-banner__content usa-accordion__content"
id="gov-banner-default-default">
<div class="grid-row grid-gap-lg">
<div class="usa-banner__guidance tablet:grid-col-6">
<img class="usa-banner__icon usa-media-block__img"
src="/img/icon-dot-gov.svg"
role="img"
alt="Small Dot Gov Icon"
aria-hidden="true" />
<div class="usa-media-block__body">
<p>
<strong>Official websites use .gov</strong>
<br />
A
<strong>.gov</strong> website belongs to an official government
organization in the United States.
</p>
</div>
</div>
<div class="usa-banner__guidance tablet:grid-col-6">
<img class="usa-banner__icon usa-media-block__img"
src="/img/icon-https.svg"
role="img"
alt="Small HTTPS Lock Icon"
aria-hidden="true" />
<div class="usa-media-block__body">
<p>
<strong>Secure .gov websites use HTTPS</strong>
<br />
A
<strong>lock</strong> (
<span class="icon-lock">
<svg xmlns="http://www.w3.org/2000/svg"
width="52"
height="64"
viewBox="0 0 52 64"
class="usa-banner__lock-image"
role="img"
aria-labelledby="banner-lock-description-default"
focusable="false">
<title id="banner-lock-title-default">Lock</title>
<desc id="banner-lock-description-default">Locked padlock icon</desc>
<path fill="#000000" fill-rule="evenodd" d="M26 0c10.493 0 19 8.507 19 19v9h3a4 4 0 0 1 4 4v28a4 4 0 0 1-4 4H4a4 4 0 0 1-4-4V32a4 4 0 0 1 4-4h3v-9C7 8.507 15.507 0 26 0zm0 8c-5.979 0-10.843 4.77-10.996 10.712L15 19v9h22v-9c0-6.075-4.925-11-11-11z" />
</svg>
</span>) or <strong>https://</strong> means youve safely connected to
the .gov website. Share sensitive information only on official,
secure websites.
</p>
</div>
</div>
</div>
</div>
</div>
</section>
<a class="usa-skipnav" href="#main-content">Skip to main content</a>
<header class="usa-header usa-header--extended site-header site-header--dark"
role="banner">
<div class="usa-navbar site-header__navbar">
<a href="https://www.nist.gov"
title="National Institute of Standards and Technology"
aria-label="Home">
<img src="/img/nist_logo_brand_white.svg"
role="img"
class="nist-logo"
alt="National Institute of Standards and Technology logo" />
</a>
<button type="button" class="usa-menu-btn">Menu</button>
</div>
</header>
<div class="usa-overlay"></div>
<nav aria-label="Main Site navigation" class="usa-nav site-nav">
<div class="usa-nav__inner site-nav__inner nav-header">
<button type="button" class="usa-nav__close">
<img src="/img/usa-icons/close.svg" role="img" alt="Close" />
</button>
<ul class="usa-nav__primary usa-accordion airc-nav-list">
<li class="usa-nav__primary-item">
<div class="usa-logo site-logo" id="-logo">
<em class="usa-logo__text site-logo__text site-nav-text">
<a href="/" title="Trustworthy & Responsible AI Resource Center">
<span aria-hidden="true" class="site-title--short">AIRMF</span>
<span class="site-title--long">Trustworthy & Responsible AI Resource Center</span>
</a>
</em>
</div>
</li>
<!-- Mobile-nav -->
<ul class="usa-nav__primary usa-nav__primary--mobile usa-accordion emp-mobilenav"
id="airc-mobile-bar">
<li class="usa-nav__primary-item is-current ">
<a href="/"
class="usa-current"
>
<span>Home</span>
</a>
</li>
<li class="usa-nav__primary-item is-current ">
<a href="/airmf-resources/"
class="usa-current
sb-menu
"
id="mentgl_4"
>
<span>AI RMF Resources</span>
<span class="caret"></span>
</a>
<ul class="usa-sidenav__sublist" id="mentgl_4">
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/airmf/"
class="
sb-menu
"
id="mentgl_5"
>
<span>AI RMF</span>
<span class="caret"></span>
</a>
</li>
<li class="usa-nav__submenu-item is-current ">
<a href="/airmf-resources/playbook/"
class="usa-current
sb-menu
"
id="mentgl_18"
>
<span>Playbook</span>
<span class="caret"></span>
</a>
<ul class="usa-sidenav__sublist" id="mentgl_18">
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/playbook/govern/"
class=""
>
<span>Govern</span>
</a>
</li>
<li class="usa-nav__submenu-item is-current ">
<a href="/airmf-resources/playbook/map/"
class="usa-current"
>
<span>Map</span>
</a>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/playbook/measure/"
class=""
>
<span>Measure</span>
</a>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/playbook/manage/"
class=""
>
<span>Manage</span>
</a>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/playbook/audit-log/"
class=""
>
<span>Audit Log</span>
</a>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/playbook/faq/"
class=""
>
<span>FAQ</span>
</a>
</li>
</ul>
<style nonce="VGGJL6qR72rqyokScTQSXA==">
ul.usa-sidenav__sublist::marker,
li.usa-nav__submenu-item::marker {
content: ' ';
font-size: 1.2em;
}
</style>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/roadmap/"
class=""
>
<span>Roadmap</span>
</a>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/usecases/"
class=""
>
<span>Example of Use Cases</span>
</a>
</li>
<li class="usa-nav__submenu-item ">
<a href="/airmf-resources/crosswalks/"
class=""
>
<span>Crosswalk Documents</span>
</a>
</li>
</ul>
<style nonce="VGGJL6qR72rqyokScTQSXA==">
ul.usa-sidenav__sublist::marker,
li.usa-nav__submenu-item::marker {
content: ' ';
font-size: 1.2em;
}
</style>
</li>
<li class="usa-nav__primary-item ">
<a href="/glossary/"
class=""
>
<span>Glossary</span>
</a>
</li>
<li class="usa-nav__primary-item ">
<a href="/technical-reports/"
class=""
>
<span>Technical Reports</span>
</a>
</li>
</ul>
<!-- end Mobile nav -->
</ul>
<div class="usa-nav__secondary">
<form id="search_form-mobile"
class="site-search usa-search usa-search--small flex-fill"
action="/search"
accept-charset="UTF-8"
method="get">
<!-- input name="utf8" type="hidden" value="&#x2713;" /-->
<input type="hidden" name="affiliate" id="affiliate-mobile" value="uswds" />
<div role="search">
<label class="usa-sr-only" for="query-mobile">Search the AIRC Website</label>
<input id="query-mobile"
class="usa-input usagov-search-autocomplete"
name="query"
type="search"
placeholder="Search AIRC Website"
autocomplete="off" />
<button class="site-search__button usa-button margin-top-0"
type="submit"
name="commit">
<img src="/img/usa-icons-bg/search--white.svg"
class="usa-search__submit-icon"
alt="Search">
</button>
</div>
</form>
</div>
</div>
</nav>
<div class="default-container">
<nav class="usa-breadcrumb site-breadcrumbs" aria-label="Breadcrumbs">
<ol class="usa-breadcrumb__list">
<li class="usa-breadcrumb__list-item">
<span class="usa-breadcrumb__link">
<a href="/">Home</a>
</span>
</li>
<li class="usa-breadcrumb__list-item">
<span class="usa-breadcrumb__link">
<a href="/airmf-resources/">AI RMF Resources</a>
</span>
</li>
<li class="usa-breadcrumb__list-item">
<span class="usa-breadcrumb__link">
<a href="/airmf-resources/playbook/">Playbook</a>
</span>
</li>
<li class="usa-breadcrumb__link active">Map</li>
</ol>
</nav>
<div id="user-content-root">
<aside class="sidenav emp-sidenav padding-top-1"
id="page-side-navigation"
aria-label="Side Navigation">
<ul class="site-sidenav usa-sidenav usa-accordion" id="airc-sidebar">
<li class="usa-sidenav__item is-current">
<a href="/"
class="usa-current"
>
<span>Home</span>
</a>
</li>
<li class="usa-sidenav__item is-current">
<a href="/airmf-resources/"
class="usa-current
sb-menu
"
id="ddtoggle_4"
>
<span>AI RMF Resources</span>
<span class="caret"></span>
</a>
<ul class="usa-sidenav__sublist" id="ddtoggle_4">
<li class="usa-sitenav__item ">
<a href="/airmf-resources/airmf/"
class="
sb-menu
"
id="ddtoggle_5"
>
<span>AI RMF</span>
<span class="caret"></span>
</a>
</li>
<li class="usa-sitenav__item is-current">
<a href="/airmf-resources/playbook/"
class="usa-current
sb-menu
"
id="ddtoggle_18"
>
<span>Playbook</span>
<span class="caret"></span>
</a>
<ul class="usa-sidenav__sublist" id="ddtoggle_18">
<li class="usa-sitenav__item ">
<a href="/airmf-resources/playbook/govern/"
class=""
>
<span>Govern</span>
</a>
</li>
<li class="usa-sitenav__item is-current">
<a href="/airmf-resources/playbook/map/"
class="usa-current"
>
<span>Map</span>
</a>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/playbook/measure/"
class=""
>
<span>Measure</span>
</a>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/playbook/manage/"
class=""
>
<span>Manage</span>
</a>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/playbook/audit-log/"
class=""
>
<span>Audit Log</span>
</a>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/playbook/faq/"
class=""
>
<span>FAQ</span>
</a>
</li>
</ul>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/roadmap/"
class=""
>
<span>Roadmap</span>
</a>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/usecases/"
class=""
>
<span>Example of Use Cases</span>
</a>
</li>
<li class="usa-sitenav__item ">
<a href="/airmf-resources/crosswalks/"
class=""
>
<span>Crosswalk Documents</span>
</a>
</li>
</ul>
</li>
<li class="usa-sidenav__item">
<a href="/glossary/"
class=""
>
<span>Glossary</span>
</a>
</li>
<li class="usa-sidenav__item">
<a href="/technical-reports/"
class=""
>
<span>Technical Reports</span>
</a>
</li>
</ul>
</aside>
<div class="usa-in-page-nav-container site-in-page-nav-container">
<aside class="usa-in-page-nav">
</aside>
<main id="main-content" class="main-content">
<div class="grid-container"><style>
aside.usa-in-page-nav li.usa-in-page-nav__item--sub-item {
margin-left: 0.25rem;
}
</style>
<div class="pbindex grid-container" id="pbindex-Map">
<ul class="usa-button-group flex-justify-end">
<li class="usa-button-group__item">
<button
id="pbindex-button-expand"
class="usa-button usa-button--outline pbindex-event"
>
Expand All
</button>
</li>
<li class="usa-button-group__item">
<button
id="pbindex-button-collapse"
class="usa-button usa-button--outline pbindex-event"
>
Collapse All
</button>
</li>
</ul>
<h1>Map</h1>
<ul id="Map" class="pbindex__top-ul">
<li>
<h2 class="pbindex__top__heading">Map 1</h2>
<p class="usa-intro pbindex__top__title">
Context is established and understood.
</p>
<ul class="pbindex__subcat-ul">
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%201.1"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 1.1"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 1.1
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Intended purpose, potentially beneficial uses, context-specific
laws, norms and expectations, and prospective settings in which
the AI system will be deployed are understood and documented.
Considerations include: specific set or types of users along with
their expectations; potential positive and negative impacts of
system uses to individuals, communities, organizations, society,
and the planet; assumptions and related limitations about AI
system purposes; uses and risks across the development or product
AI lifecycle; TEVV and system metrics.
</p>
<div
id="button-MAP 1.1"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Highly accurate and optimized systems can cause harm.
Relatedly, organizations should expect broadly deployed AI
tools to be reused, repurposed, and potentially misused
regardless of intentions.
</p>
<p>
AI actors can work collaboratively, and with external
parties such as community groups, to help delineate the
bounds of acceptable deployment, consider preferable
alternatives, and identify principles and strategies to
manage likely risks. Context mapping is the first step in
this effort, and may include examination of the following:
</p>
<ul>
<li>intended purpose and impact of system use.</li>
<li>concept of operations.</li>
<li>
intended, prospective, and actual deployment setting.
</li>
<li>requirements for system deployment and operation.</li>
<li>end user and operator expectations.</li>
<li>specific set or types of end users.</li>
<li>
potential negative impacts to individuals, groups,
communities, organizations, and society or
context-specific impacts such as legal requirements or
impacts to the environment.
</li>
<li>
unanticipated, downstream, or other unknown contextual
factors.
</li>
<li>how AI system changes connect to impacts.</li>
</ul>
<p>
These types of processes can assist AI actors in
understanding how limitations, constraints, and other
realities associated with the deployment and use of AI
technology can create impacts once they are deployed or
operate in the real world. When coupled with the enhanced
organizational culture resulting from the established
policies and procedures in the Govern function, the Map
function can provide opportunities to foster and instill
new perspectives, activities, and skills for approaching
risks and impacts.
</p>
<p>
Context mapping also includes discussion and consideration
of non-AI or non-technology alternatives especially as
related to whether the given context is narrow enough to
manage AI and its potential negative impacts. Non-AI
alternatives may include capturing and evaluating
information using semi-autonomous or mostly-manual
methods.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Maintain awareness of industry, technical, and
applicable legal standards.
</li>
<li>
Examine trustworthiness of AI system design and
consider, non-AI solutions
</li>
<li>
Consider intended AI system design tasks along with
unanticipated purposes in collaboration with human
factors and socio-technical domain experts.
</li>
<li>
Define and document the task, purpose, minimum
functionality, and benefits of the AI system to inform
considerations about whether the utility of the project
or its lack of.
</li>
<li>
Identify whether there are non-AI or non-technology
alternatives that will lead to more trustworthy
outcomes.
</li>
<li>
Examine how changes in system performance affect
downstream events such as decision-making (e.g: changes
in an AI model objective function create what types of
impacts in how many candidates do/do not get a job
interview).
</li>
<li>
Determine actions to map and track post-decommissioning
stages of AI deployment and potential negative or
positive impacts to individuals, groups and communities.
</li>
<li>
Determine the end user and organizational requirements,
including business and technical requirements.
</li>
<li>
Determine and delineate the expected and acceptable AI
system context of use, including:
<ul>
<li>social norms</li>
<li>Impacted individuals, groups, and communities</li>
<li>
potential positive and negative impacts to
individuals, groups, communities, organizations, and
society
</li>
<li>operational environment</li>
</ul>
</li>
<li>
Perform context analysis related to time frame, safety
concerns, geographic area, physical environment,
ecosystems, social environment, and cultural norms
within the intended setting (or conditions that closely
approximate the intended setting.
</li>
<li>
Gain and maintain awareness about evaluating scientific
claims related to AI system performance and benefits
before launching into system design.
</li>
<li>
Identify human-AI interaction and/or roles, such as
whether the application will support or replace human
decision making.
</li>
<li>
Plan for risks related to human-AI configurations, and
document requirements, roles, and responsibilities for
human oversight of deployed systems.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
To what extent is the output of each component
appropriate for the operational context?
</li>
<li>
Which AI actors are responsible for the decisions of the
AI and is this person aware of the intended uses and
limitations of the analytic?
</li>
<li>
Which AI actors are responsible for maintaining,
re-verifying, monitoring, and updating this AI once
deployed?
</li>
<li>
Who is the person(s) accountable for the ethical
considerations across the AI lifecycle?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities,
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
“Stakeholders in Explainable AI,” Sep. 2018.
<a href="http://arxiv.org/abs/1810.00184">URL</a>
</li>
<li>
"Microsoft Responsible AI Standard, v2".
<a
href="https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4ZPmV"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Socio-technical systems</h5>
<p>
Andrew D. Selbst, danah boyd, Sorelle A. Friedler, et al.
2019. Fairness and Abstraction in Sociotechnical Systems.
In Proceedings of the Conference on Fairness,
Accountability, and Transparency (FAccT'19). Association
for Computing Machinery, New York, NY, USA, 5968.
<a href="https://doi.org/10.1145/3287560.3287598">URL</a>
</p>
<h5>Problem formulation</h5>
<p>
Roel Dobbe, Thomas Krendl Gilbert, and Yonatan Mintz.
2021. Hard choices in artificial intelligence. Artificial
Intelligence 300 (14 July 2021), 103555, ISSN 0004-3702.
<a href="https://doi.org/10.1016/j.artint.2021.103555"
>URL</a
>
</p>
<p>
Samir Passi and Solon Barocas. 2019. Problem Formulation
and Fairness. In Proceedings of the Conference on
Fairness, Accountability, and Transparency (FAccT'19).
Association for Computing Machinery, New York, NY, USA,
3948.
<a href="https://doi.org/10.1145/3287560.3287567">URL</a>
</p>
<h5>Context mapping</h5>
<p>
Emilio Gómez-González and Emilia Gómez. 2020. Artificial
intelligence in medicine and healthcare. Joint Research
Centre (European Commission).
<a
href="https://op.europa.eu/en/publication-detail/-/publication/b4b5db47-94c0-11ea-aac4-01aa75ed71a1/language-en"
>URL</a
>
</p>
<p>
Sarah Spiekermann and Till Winkler. 2020. Value-based
Engineering for Ethics by Design. arXiv:2004.13676.
<a href="https://arxiv.org/abs/2004.13676">URL</a>
</p>
<p>
Social Impact Lab. 2017. Framework for Context Analysis of
Technologies in Social Change Projects (Draft v2.0).
<a
href="https://www.alnap.org/system/files/content/resource/files/main/Draft%20SIMLab%20Context%20Analysis%20Framework%20v2.0.pdf"
>URL</a
>
</p>
<p>
Solon Barocas, Asia J. Biega, Margarita Boyarskaya, et al.
2021. Responsible computing during COVID-19 and beyond.
Commun. ACM 64, 7 (July 2021), 3032.
<a href="https://doi.org/10.1145/3466612">URL</a>
</p>
<h5>Identification of harms</h5>
<p>
Harini Suresh and John V. Guttag. 2020. A Framework for
Understanding Sources of Harm throughout the Machine
Learning Life Cycle. arXiv:1901.10002.
<a href="https://arxiv.org/abs/1901.10002">URL</a>
</p>
<p>
Margarita Boyarskaya, Alexandra Olteanu, and Kate
Crawford. 2020. Overcoming Failures of Imagination in AI
Infused System Development and Deployment.
arXiv:2011.13416.
<a href="https://arxiv.org/abs/2011.13416">URL</a>
</p>
<p>
Microsoft. Foundations of assessing harm. 2022.
<a
href="https://docs.microsoft.com/en-us/azure/architecture/guide/responsible-innovation/harms-modeling/"
>URL</a
>
</p>
<h5>Understanding and documenting limitations in ML</h5>
<p>
Alexander D'Amour, Katherine Heller, Dan Moldovan, et al.
2020. Underspecification Presents Challenges for
Credibility in Modern Machine Learning. arXiv:2011.03395.
<a href="https://arxiv.org/abs/2011.03395">URL</a>
</p>
<p>
Arvind Narayanan. "How to Recognize AI Snake Oil." Arthur
Miller Lecture on Science and Ethics (2019).
<a
href="https://www.cs.princeton.edu/~arvindn/talks/MIT-STS-AI-snakeoil.pdf"
>URL</a
>
</p>
<p>
Jessie J. Smith, Saleema Amershi, Solon Barocas, et al.
2022. REAL ML: Recognizing, Exploring, and Articulating
Limitations of Machine Learning Research.
arXiv:2205.08363.
<a href="https://arxiv.org/abs/2205.08363">URL</a>
</p>
<p>
Margaret Mitchell, Simone Wu, Andrew Zaldivar, et al.
2019. Model Cards for Model Reporting. In Proceedings of
the Conference on Fairness, Accountability, and
Transparency (FAT* '19). Association for Computing
Machinery, New York, NY, USA, 220229.
<a href="https://doi.org/10.1145/3287560.3287596">URL</a>
</p>
<p>
Matthew Arnold, Rachel K. E. Bellamy, Michael Hind, et al.
2019. FactSheets: Increasing Trust in AI Services through
Supplier's Declarations of Conformity. arXiv:1808.07261.
<a href="https://arxiv.org/abs/1808.07261">URL</a>
</p>
<p>
Matthew J. Salganik, Ian Lundberg, Alexander T. Kindel,
Caitlin E. Ahearn, Khaled Al-Ghoneim, Abdullah Almaatouq,
Drew M. Altschul et al. "Measuring the Predictability of
Life Outcomes with a Scientific Mass Collaboration."
Proceedings of the National Academy of Sciences 117, No.
15 (2020): 8398-8403.
<a href="https://www.pnas.org/doi/10.1073/pnas.1915006117"
>URL</a
>
</p>
<p>
Michael A. Madaio, Luke Stark, Jennifer Wortman Vaughan,
and Hanna Wallach. 2020. Co-Designing Checklists to
Understand Organizational Challenges and Opportunities
around Fairness in AI. In Proceedings of the 2020 CHI
Conference on Human Factors in Computing Systems (CHI
20). Association for Computing Machinery, New York, NY,
USA, 114.
<a href="https://doi.org/10.1145/3313831.3376445">URL</a>
</p>
<p>
Timnit Gebru, Jamie Morgenstern, Briana Vecchione, et al.
2021. Datasheets for Datasets. arXiv:1803.09010.
<a href="https://arxiv.org/abs/1803.09010">URL</a>
</p>
<p>
Bender, E. M., Friedman, B. &amp; McMillan-Major, A.,
(2022). A Guide for Writing Data Statements for Natural
Language Processing. University of Washington. Accessed
July 14, 2022.
<a
href="https://techpolicylab.uw.edu/wp-content/uploads/2021/11/Data_Statements_Guide_V2.pdf"
>URL</a
>
</p>
<p>
Meta AI. System Cards, a new resource for understanding
how AI systems work, 2021.
<a
href="https://ai.facebook.com/blog/system-cards-a-new-resource-for-understanding-how-ai-systems-work/"
>URL</a
>
</p>
<h5>When not to deploy</h5>
<p>
Solon Barocas, Asia J. Biega, Benjamin Fish, et al. 2020.
When not to design, build, or deploy. In Proceedings of
the 2020 Conference on Fairness, Accountability, and
Transparency (FAT* '20). Association for Computing
Machinery, New York, NY, USA, 695.
<a href="https://doi.org/10.1145/3351095.3375691">URL</a>
</p>
<h5>Post-decommission</h5>
<p>
Upol Ehsan, Ranjit Singh, Jacob Metcalf and Mark O. Riedl.
“The Algorithmic Imprint.” Proceedings of the 2022 ACM
Conference on Fairness, Accountability, and Transparency
(2022). [URL] (https://arxiv.org/pdf/2206.03275v1)
</p>
<h5>Statistical balance</h5>
<p>
Ziad Obermeyer, Brian Powers, Christine Vogeli, and
Sendhil Mullainathan. 2019. Dissecting racial bias in an
algorithm used to manage the health of populations.
Science 366, 6464 (25 Oct. 2019), 447-453.
<a href="https://doi.org/10.1126/science.aax2342">URL</a>
</p>
<h5>Assessment of science in AI</h5>
<p>
Arvind Narayanan. How to recognize AI snake oil.
<a
href="https://www.cs.princeton.edu/~arvindn/talks/MIT-STS-AI-snakeoil.pdf"
>URL</a
>
</p>
<p>
Emily M. Bender. 2022. On NYT Magazine on AI: Resist the
Urge to be Impressed. (April 17, 2022).
<a
href="https://medium.com/@emilymenonbender/on-nyt-magazine-on-ai-resist-the-urge-to-be-impressed-3d92fd9a0edd"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%201.2"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 1.2"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 1.2
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Inter-disciplinary AI actors, competencies, skills and capacities
for establishing context reflect demographic diversity and broad
domain and user experience expertise, and their participation is
documented. Opportunities for interdisciplinary collaboration are
prioritized.
</p>
<div
id="button-MAP 1.2"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Successfully mapping context requires a team of AI actors
with a diversity of experience, expertise, abilities and
backgrounds, and with the resources and independence to
engage in critical inquiry.
</p>
<p>
Having a diverse team contributes to more broad and open
sharing of ideas and assumptions about the purpose and
function of the technology being designed and developed
making these implicit aspects more explicit. The benefit
of a diverse staff in managing AI risks is not the beliefs
or presumed beliefs of individual workers, but the
behavior that results from a collective perspective. An
environment which fosters critical inquiry creates
opportunities to surface problems and identify existing
and emergent risks.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Establish interdisciplinary teams to reflect a wide
range of skills, competencies, and capabilities for AI
efforts. Verify that team membership includes
demographic diversity, broad domain expertise, and lived
experiences. Document team composition.
</li>
<li>
Create and empower interdisciplinary expert teams to
capture, learn, and engage the interdependencies of
deployed AI systems and related terminologies and
concepts from disciplines outside of AI practice such as
law, sociology, psychology, anthropology, public policy,
systems design, and engineering.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
To what extent do the teams responsible for developing
and maintaining the AI system reflect diverse opinions,
backgrounds, experiences, and perspectives?
</li>
<li>
Did the entity document the demographics of those
involved in the design and development of the AI system
to capture and communicate potential biases inherent to
the development process, according to forum
participants?
</li>
<li>
What specific perspectives did stakeholders share, and
how were they integrated across the design, development,
deployment, assessment, and monitoring of the AI system?
</li>
<li>
To what extent has the entity addressed stakeholder
perspectives on the potential negative impacts of the AI
system on end users and impacted populations?
</li>
<li>
What type of information is accessible on the design,
operations, and limitations of the AI system to external
stakeholders, including end users, consumers,
regulators, and individuals impacted by use of the AI
system?
</li>
<li>
Did your organization address usability problems and
test whether user interfaces served their intended
purposes? Consulting the community or end users at the
earliest stages of development to ensure there is
transparency on the technology used and how it is
deployed.
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
<li>
WEF Companion to the Model AI Governance Framework-
2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGIsago.pdf"
>URL</a
>
</li>
<li>
AI policies and initiatives, in Artificial Intelligence
in Society, OECD, 2019.
<a
href="https://www.oecd.org/publications/artificial-intelligence-in-society-eedfee77-en.htm"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Sina Fazelpour and Maria De-Arteaga. 2022. Diversity in
sociotechnical machine learning systems. Big Data &amp;
Society 9, 1 (Jan. 2022).
<a href="https://doi.org/10.1177%2F20539517221082027"
>URL</a
>
</p>
<p>
Microsoft Community Jury , Azure Application Architecture
Guide.
<a
href="https://docs.microsoft.com/en-us/azure/architecture/guide/responsible-innovation/community-jury/"
>URL</a
>
</p>
<p>
Fernando Delgado, Stephen Yang, Michael Madaio, Qian Yang.
(2021). Stakeholder Participation in AI: Beyond "Add
Diverse Stakeholders and Stir".
<a
href="https://deepai.org/publication/stakeholder-participation-in-ai-beyond-add-diverse-stakeholders-and-stir"
>URL</a
>
</p>
<p>
Kush Varshney, Tina Park, Inioluwa Deborah Raji, Gaurush
Hiranandani, Narasimhan Harikrishna, Oluwasanmi Koyejo,
Brianna Richardson, and Min Kyung Lee. Participatory
specification of trustworthy machine learning, 2021.
</p>
<p>
Donald Martin, Vinodkumar Prabhakaran, Jill A. Kuhlberg,
Andrew Smart and William S. Isaac. “Participatory Problem
Formulation for Fairer Machine Learning Through Community
Based System Dynamics”, ArXiv abs/2005.07572 (2020).
<a href="https://arxiv.org/pdf/2005.07572.pdf">URL</a>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%201.3"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 1.3"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 1.3
</button>
</h3>
<p class="pbindex__outer-accordion__description">
The organizations mission and relevant goals for the AI
technology are understood and documented.
</p>
<div
id="button-MAP 1.3"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Defining and documenting the specific business purpose of
an AI system in a broader context of societal values helps
teams to evaluate risks and increases the clarity of
“go/no-go” decisions about whether to deploy.
</p>
<p>
Trustworthy AI technologies may present a demonstrable
business benefit beyond implicit or explicit costs,
provide added value, and don't lead to wasted resources.
Organizations can feel confident in performing risk
avoidance if the implicit or explicit risks outweigh the
advantages of AI systems, and not implementing an AI
solution whose risks surpass potential benefits.
</p>
<p>
For example, making AI systems more equitable can result
in better managed risk, and can help enhance consideration
of the business value of making inclusively designed,
accessible and more equitable AI systems.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Build transparent practices into AI system development
processes.
</li>
<li>
Review the documented system purpose from a
socio-technical perspective and in consideration of
societal values.
</li>
<li>
Determine possible misalignment between societal values
and stated organizational principles and code of ethics.
</li>
<li>
Flag latent incentives that may contribute to negative
impacts.
</li>
<li>
Evaluate AI system purpose in consideration of potential
risks, societal values, and stated organizational
principles.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
How does the AI system help the entity meet its goals
and objectives?
</li>
<li>
How do the technical specifications and requirements
align with the AI systems goals and objectives?
</li>
<li>
To what extent is the output appropriate for the
operational context?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI 2019,
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>.
</li>
<li>
Including Insights from the Comptroller Generals Forum
on the Oversight of Artificial Intelligence An
Accountability Framework for Federal Agencies and Other
Entities, 2021,
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>,
<a
href="https://www.gao.gov/assets/gao-21-519sp-highlights.pdf"
>PDF</a
>.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
M.S. Ackerman (2000). The Intellectual Challenge of CSCW:
The Gap Between Social Requirements and Technical
Feasibility. HumanComputer Interaction, 15, 179 - 203.
<a
href="https://socialworldsresearch.org/sites/default/files/hci.final_.pdf"
>URL</a
>
</p>
<p>
McKane Andrus, Sarah Dean, Thomas Gilbert, Nathan Lambert,
Tom Zick (2021). AI Development for the Public Interest:
From Abstraction Traps to Sociotechnical Risks.
<a href="https://arxiv.org/pdf/2102.04255.pdf">URL</a>
</p>
<p>
Abeba Birhane, Pratyusha Kalluri, Dallas Card, et al.
2022. The Values Encoded in Machine Learning Research.
arXiv:2106.15590.
<a href="https://arxiv.org/abs/2106.15590">URL</a>
</p>
<p>
Board of Governors of the Federal Reserve System. SR 11-7:
Guidance on Model Risk Management. (April 4, 2011).
<a
href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm"
>URL</a
>
</p>
<p>
Iason Gabriel, Artificial Intelligence, Values, and
Alignment. Minds &amp; Machines 30, 411437 (2020).
<a href="https://doi.org/10.1007/s11023-020-09539-2"
>URL</a
>
</p>
<p>
PEAT “Business Case for Equitable AI”.
<a
href="https://www.peatworks.org/ai-disability-inclusion-toolkit/business-case-for-equitable-ai/"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%201.4"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 1.4"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 1.4
</button>
</h3>
<p class="pbindex__outer-accordion__description">
The business value or context of business use has been clearly
defined or in the case of assessing existing AI systems
re-evaluated.
</p>
<div
id="button-MAP 1.4"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Socio-technical AI risks emerge from the interplay between
technical development decisions and how a system is used,
who operates it, and the social context into which it is
deployed. Addressing these risks is complex and requires a
commitment to understanding how contextual factors may
interact with AI lifecycle actions. One such contextual
factor is how organizational mission and identified system
purpose create incentives within AI system design,
development, and deployment tasks that may result in
positive and negative impacts. By establishing
comprehensive and explicit enumeration of AI systems
context of of business use and expectations, organizations
can identify and manage these types of risks.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Document business value or context of business use
</li>
<li>
Reconcile documented concerns about the systems purpose
within the business context of use compared to the
organizations stated values, mission statements, social
responsibility commitments, and AI principles.
</li>
<li>
Reconsider the design, implementation strategy, or
deployment of AI systems with potential impacts that do
not reflect institutional values.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
What goals and objectives does the entity expect to
achieve by designing, developing, and/or deploying the
AI system?
</li>
<li>
To what extent are the system outputs consistent with
the entitys values and principles to foster public
trust and equity?
</li>
<li>
To what extent are the metrics consistent with system
goals, objectives, and constraints, including ethical
and compliance considerations?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Algorithm Watch. AI Ethics Guidelines Global Inventory.
<a href="https://inventory.algorithmwatch.org/">URL</a>
</p>
<p>
Ethical OS toolkit.
<a href="https://ethicalos.org/">URL</a>
</p>
<p>
Emanuel Moss and Jacob Metcalf. 2020. Ethics Owners: A New
Model of Organizational Responsibility in Data-Driven
Technology Companies. Data &amp; Society Research
Institute.
<a href="https://datasociety.net/pubs/Ethics-Owners.pdf"
>URL</a
>
</p>
<p>
Future of Life Institute. Asilomar AI Principles.
<a
href="https://futureoflife.org/2017/08/11/ai-principles/"
>URL</a
>
</p>
<p>
Leonard Haas, Sebastian Gießler, and Veronika Thiel. 2020.
In the realm of paper tigers exploring the failings of
AI ethics guidelines. (April 28, 2020).
<a
href="https://algorithmwatch.org/en/ai-ethics-guidelines-inventory-upgrade-2020/"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%201.5"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 1.5"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 1.5
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Organizational risk tolerances are determined and documented.
</p>
<div
id="button-MAP 1.5"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Risk tolerance reflects the level and type of risk the
organization is willing to accept while conducting its
mission and carrying out its strategy.
</p>
<p>
Organizations can follow existing regulations and
guidelines for risk criteria, tolerance and response
established by organizational, domain, discipline, sector,
or professional requirements. Some sectors or industries
may have established definitions of harm or may have
established documentation, reporting, and disclosure
requirements.
</p>
<p>
Within sectors, risk management may depend on existing
guidelines for specific applications and use case
settings. Where established guidelines do not exist,
organizations will want to define reasonable risk
tolerance in consideration of different sources of risk
(e.g., financial, operational, safety and wellbeing,
business, reputational, and model risks) and different
levels of risk (e.g., from negligible to critical).
</p>
<p>
Risk tolerances inform and support decisions about whether
to continue with development or deployment - termed
“go/no-go”. Go/no-go decisions related to AI system risks
can take stakeholder feedback into account, but remain
independent from stakeholders vested financial or
reputational interests.
</p>
<p>
If mapping risk is prohibitively difficult, a "no-go"
decision may be considered for the specific system.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Utilize existing regulations and guidelines for risk
criteria, tolerance and response established by
organizational, domain, discipline, sector, or
professional requirements.
</li>
<li>
Establish risk tolerance levels for AI systems and
allocate the appropriate oversight resources to each
level.
</li>
<li>
Establish risk criteria in consideration of different
sources of risk, (e.g., financial, operational, safety
and wellbeing, business, reputational, and model risks)
and different levels of risk (e.g., from negligible to
critical).
</li>
<li>
Identify maximum allowable risk tolerance above which
the system will not be deployed, or will need to be
prematurely decommissioned, within the contextual or
application setting.
</li>
<li>
Articulate and analyze tradeoffs across trustworthiness
characteristics as relevant to proposed context of use.
When tradeoffs arise, document them and plan for
traceable actions (e.g.: impact mitigation, removal of
system from development or use) to inform management
decisions.
</li>
<li>
Review uses of AI systems for “off-label” purposes,
especially in settings that organizations have deemed as
high-risk. Document decisions, risk-related trade-offs,
and system limitations.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
Which existing regulations and guidelines apply, and the
entity has followed, in the development of system risk
tolerances?
</li>
<li>
What criteria and assumptions has the entity utilized
when developing system risk tolerances?
</li>
<li>
How has the entity identified maximum allowable risk
tolerance?
</li>
<li>
What conditions and purposes are considered “off-label”
for system use?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
<li>
WEF Companion to the Model AI Governance Framework-
2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGIsago.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Board of Governors of the Federal Reserve System. SR 11-7:
Guidance on Model Risk Management. (April 4, 2011).
<a
href="https://www.federalreserve.gov/supervisionreg/srletters/sr1107.htm"
>URL</a
>
</p>
<p>
The Office of the Comptroller of the Currency. Enterprise
Risk Appetite Statement. (Nov. 20, 2019).
<a
href="https://www.occ.treas.gov/publications-and-resources/publications/banker-education/files/pub-risk-appetite-statement.pdf"
>URL</a
>
</p>
<p>
Brenda Boultwood, How to Develop an Enterprise Risk-Rating
Approach (Aug. 26, 2021). Global Association of Risk
Professionals (garp.org). Accessed Jan. 4, 2023.
<a
href="https://www.garp.org/risk-intelligence/culture-governance/how-to-develop-an-enterprise-risk-rating-approach"
>URL</a
>
</p>
<p>
Virginia Eubanks, 1972-, Automating Inequality: How
High-tech Tools Profile, Police, and Punish the Poor. New
York, NY, St. Martin's Press, 2018.
</p>
<p>
GAO-17-63: Enterprise Risk Management: Selected Agencies
Experiences Illustrate Good Practices in Managing Risk.
<a href="https://www.gao.gov/assets/gao-17-63.pdf">URL</a>
See Table 3.
</p>
<p>
NIST Risk Management Framework.
<a
href="https://csrc.nist.gov/projects/risk-management/about-rmf"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%201.6"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 1.6"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 1.6
</button>
</h3>
<p class="pbindex__outer-accordion__description">
System requirements (e.g., “the system shall respect the privacy
of its users”) are elicited from and understood by relevant AI
actors. Design decisions take socio-technical implications into
account to address AI risks.
</p>
<div
id="button-MAP 1.6"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
AI system development requirements may outpace
documentation processes for traditional software. When
written requirements are unavailable or incomplete, AI
actors may inadvertently overlook business and stakeholder
needs, over-rely on implicit human biases such as
confirmation bias and groupthink, and maintain exclusive
focus on computational requirements.
</p>
<p>
Eliciting system requirements, designing for end users,
and considering societal impacts early in the design phase
is a priority that can enhance AI systems
trustworthiness.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Proactively incorporate trustworthy characteristics into
system requirements.
</li>
<li>
Establish mechanisms for regular communication and
feedback between relevant AI actors and internal or
external stakeholders related to system design or
deployment decisions.
</li>
<li>
Develop and standardize practices to assess potential
impacts at all stages of the AI lifecycle, and in
collaboration with interdisciplinary experts, actors
external to the team that developed or deployed the AI
system, and potentially impacted communities .
</li>
<li>
Include potentially impacted groups, communities and
external entities (e.g. civil society organizations,
research institutes, local community groups, and trade
associations) in the formulation of priorities,
definitions and outcomes during impact assessment
activities.
</li>
<li>
Conduct qualitative interviews with end user(s) to
regularly evaluate expectations and design plans related
to Human-AI configurations and tasks.
</li>
<li>
Analyze dependencies between contextual factors and
system requirements. List potential impacts that may
arise from not fully considering the importance of
trustworthiness characteristics in any decision making.
</li>
<li>
Follow responsible design techniques in tasks such as
software engineering, product management, and
participatory engagement. Some examples for eliciting
and documenting stakeholder requirements include product
requirement documents (PRDs), user stories, user
interaction/user experience (UI/UX) research, systems
engineering, ethnography and related field methods.
</li>
<li>
Conduct user research to understand individuals, groups
and communities that will be impacted by the AI, their
values &amp; context, and the role of systemic and
historical biases. Integrate learnings into decisions
about data selection and representation.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
What type of information is accessible on the design,
operations, and limitations of the AI system to external
stakeholders, including end users, consumers,
regulators, and individuals impacted by use of the AI
system?
</li>
<li>
To what extent is this information sufficient and
appropriate to promote transparency? Promote
transparency by enabling external stakeholders to access
information on the design, operation, and limitations of
the AI system.
</li>
<li>
To what extent has relevant information been disclosed
regarding the use of AI systems, such as (a) what the
system is for, (b) what it is not for, (c) how it was
designed, and (d) what its limitations are?
(Documentation and external communication can offer a
way for entities to provide transparency.)
</li>
<li>
How will the relevant AI actor(s) address changes in
accuracy and precision due to either an adversarys
attempts to disrupt the AI system or unrelated changes
in the operational/business environment, which may
impact the accuracy of the AI system?
</li>
<li>
What metrics has the entity developed to measure
performance of the AI system?
</li>
<li>
What justifications, if any, has the entity provided for
the assumptions, boundaries, and limitations of the AI
system?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Stakeholders in Explainable AI, Sep. 2018.
<a href="http://arxiv.org/abs/1810.00184">URL</a>
</li>
<li>
High-Level Expert Group on Artificial Intelligence set
up by the European Commission, Ethics Guidelines for
Trustworthy AI.
<a
href="https://digital-strategy.ec.europa.eu/en/library/ethics-guidelines-trustworthy-ai"
>URL</a
>,
<a
href="https://www.aepd.es/sites/default/files/2019-12/ai-ethics-guidelines.pdf"
>PDF</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
National Academies of Sciences, Engineering, and Medicine
2022. Fostering Responsible Computing Research:
Foundations and Practices. Washington, DC: The National
Academies Press.
<a href="https://doi.org/10.17226/26507">URL</a>
</p>
<p>
Abeba Birhane, William S. Isaac, Vinodkumar Prabhakaran,
Mark Diaz, Madeleine Clare Elish, Iason Gabriel and Shakir
Mohamed. “Power to the People? Opportunities and
Challenges for Participatory AI.” Equity and Access in
Algorithms, Mechanisms, and Optimization (2022).
<a href="https://arxiv.org/pdf/2209.07572.pdf">URL</a>
</p>
<p>
Amit K. Chopra, Fabiano Dalpiaz, F. Başak Aydemir, et al.
2014. Protos: Foundations for engineering innovative
sociotechnical systems. In 2014 IEEE 22nd International
Requirements Engineering Conference (RE) (2014), 53-62.
<a href="https://doi.org/10.1109/RE.2014.6912247">URL</a>
</p>
<p>
Andrew D. Selbst, danah boyd, Sorelle A. Friedler, et al.
2019. Fairness and Abstraction in Sociotechnical Systems.
In Proceedings of the Conference on Fairness,
Accountability, and Transparency (FAT* '19). Association
for Computing Machinery, New York, NY, USA, 5968.
<a href="https://doi.org/10.1145/3287560.3287598">URL</a>
</p>
<p>
Gordon Baxter and Ian Sommerville. 2011. Socio-technical
systems: From design methods to systems engineering.
Interacting with Computers, 23, 1 (Jan. 2011), 417.
<a href="https://doi.org/10.1016/j.intcom.2010.07.003"
>URL</a
>
</p>
<p>
Roel Dobbe, Thomas Krendl Gilbert, and Yonatan Mintz.
2021. Hard choices in artificial intelligence. Artificial
Intelligence 300 (14 July 2021), 103555, ISSN 0004-3702.
<a href="https://doi.org/10.1016/j.artint.2021.103555"
>URL</a
>
</p>
<p>
Yilin Huang, Giacomo Poderi, Sanja Šćepanović, et al.
2019. Embedding Internet-of-Things in Large-Scale
Socio-technical Systems: A Community-Oriented Design in
Future Smart Grids. In The Internet of Things for Smart
Urban Ecosystems (2019), 125-150. Springer, Cham.
<a href="https://doi.org/10.1007/978-3-319-96550-5_6"
>URL</a
>
</p>
<p>
Victor Udoewa, (2022). An introduction to radical
participatory design: decolonising participatory design
processes. Design Science. 8. 10.1017/dsj.2022.24.
<a
href="https://www.cambridge.org/core/journals/design-science/article/an-introduction-to-radical-participatory-design-decolonising-participatory-design-processes/63F70ECC408844D3CD6C1A5AC7D35F4D"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
</ul>
</li>
<li>
<h2 class="pbindex__top__heading">Map 2</h2>
<p class="usa-intro pbindex__top__title">
Categorization of the AI system is performed.
</p>
<ul class="pbindex__subcat-ul">
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%202.1"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 2.1"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 2.1
</button>
</h3>
<p class="pbindex__outer-accordion__description">
The specific task, and methods used to implement the task, that
the AI system will support is defined (e.g., classifiers,
generative models, recommenders).
</p>
<div
id="button-MAP 2.1"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
AI actors define the technical learning or decision-making
task(s) an AI system is designed to accomplish, or the
benefits that the system will provide. The clearer and
narrower the task definition, the easier it is to map its
benefits and risks, leading to more fulsome risk
management.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Define and document AI systems existing and potential
learning task(s) along with known assumptions and
limitations.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
To what extent has the entity clearly defined technical
specifications and requirements for the AI system?
</li>
<li>
To what extent has the entity documented the AI systems
development, testing methodology, metrics, and
performance outcomes?
</li>
<li>
How do the technical specifications and requirements
align with the AI systems goals and objectives?
</li>
<li>
Did your organization implement accountability-based
practices in data management and protection (e.g. the
PDPA and OECD Privacy Principles)?
</li>
<li>
How are outputs marked to clearly show that they came
from an AI?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Datasheets for Datasets.
<a href="http://arxiv.org/abs/1803.09010">URL</a>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
<li>
WEF Companion to the Model AI Governance Framework-
2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGIsago.pdf"
>URL</a
>
</li>
<li>
ATARC Model Transparency Assessment (WD) 2020.
<a
href="https://atarc.org/wp-content/uploads/2020/10/atarc_model_transparency_assessment-FINAL-092020-2.docx"
>URL</a
>
</li>
<li>
Transparency in Artificial Intelligence - S. Larsson and
F. Heintz 2020.
<a
href="https://lucris.lub.lu.se/ws/files/79208055/Larsson_Heintz_2020_Transparency_in_artificial_intelligence_2020_05_05.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Leong, Brenda (2020). The Spectrum of Artificial
Intelligence - An Infographic Tool. Future of Privacy
Forum.
<a
href="https://fpf.org/blog/the-spectrum-of-artificial-intelligence-an-infographic-tool/"
>URL</a
>
</p>
<p>
Brownlee, Jason (2020). A Tour of Machine Learning
Algorithms. Machine Learning Mastery.
<a
href="https://machinelearningmastery.com/a-tour-of-machine-learning-algorithms/"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%202.2"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 2.2"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 2.2
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Information about the AI systems knowledge limits and how system
output may be utilized and overseen by humans is documented.
Documentation provides sufficient information to assist relevant
AI actors when making informed decisions and taking subsequent
actions.
</p>
<div
id="button-MAP 2.2"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
An AI lifecycle consists of many interdependent activities
involving a diverse set of actors that often do not have
full visibility or control over other parts of the
lifecycle and its associated contexts or risks. The
interdependencies between these activities, and among the
relevant AI actors and organizations, can make it
difficult to reliably anticipate potential impacts of AI
systems. For example, early decisions in identifying the
purpose and objective of an AI system can alter its
behavior and capabilities, and the dynamics of deployment
setting (such as end users or impacted individuals) can
shape the positive or negative impacts of AI system
decisions. As a result, the best intentions within one
dimension of the AI lifecycle can be undermined via
interactions with decisions and conditions in other, later
activities. This complexity and varying levels of
visibility can introduce uncertainty. And, once deployed
and in use, AI systems may sometimes perform poorly,
manifest unanticipated negative impacts, or violate legal
or ethical norms. These risks and incidents can result
from a variety of factors. For example, downstream
decisions can be influenced by end user over-trust or
under-trust, and other complexities related to
AI-supported decision-making.
</p>
<p>
Anticipating, articulating, assessing and documenting AI
systems knowledge limits and how system output may be
utilized and overseen by humans can help mitigate the
uncertainty associated with the realities of AI system
deployments. Rigorous design processes include defining
system knowledge limits, which are confirmed and refined
based on TEVV processes.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Document settings, environments and conditions that are
outside the AI systems intended use.
</li>
<li>
Design for end user workflows and toolsets, concept of
operations, and explainability and interpretability
criteria in conjunction with end user(s) and associated
qualitative feedback.
</li>
<li>
Plan and test human-AI configurations under close to
real-world conditions and document results.
</li>
<li>
Follow stakeholder feedback processes to determine
whether a system achieved its documented purpose within
a given use context, and whether end users can correctly
comprehend system outputs or results.
</li>
<li>
Document dependencies on upstream data and other AI
systems, including if the specified system is an
upstream dependency for another AI system or other data.
</li>
<li>
Document connections the AI system or data will have to
external networks (including the internet), financial
markets, and critical infrastructure that have potential
for negative externalities. Identify and document
negative impacts as part of considering the broader risk
thresholds and subsequent go/no-go deployment as well as
post-deployment decommissioning decisions.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
Does the AI system provide sufficient information to
assist the personnel to make an informed decision and
take actions accordingly?
</li>
<li>
What type of information is accessible on the design,
operations, and limitations of the AI system to external
stakeholders, including end users, consumers,
regulators, and individuals impacted by use of the AI
system?
</li>
<li>
Based on the assessment, did your organization implement
the appropriate level of human involvement in
AI-augmented decision-making?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Datasheets for Datasets.
<a href="http://arxiv.org/abs/1803.09010">URL</a>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
<li>
WEF Companion to the Model AI Governance Framework-
2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGIsago.pdf"
>URL</a
>
</li>
<li>
ATARC Model Transparency Assessment (WD) 2020.
<a
href="https://atarc.org/wp-content/uploads/2020/10/atarc_model_transparency_assessment-FINAL-092020-2.docx"
>URL</a
>
</li>
<li>
Transparency in Artificial Intelligence - S. Larsson and
F. Heintz 2020.
<a
href="https://lucris.lub.lu.se/ws/files/79208055/Larsson_Heintz_2020_Transparency_in_artificial_intelligence_2020_05_05.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Context of use</h5>
<p>
International Standards Organization (ISO). 2019. ISO
9241-210:2019 Ergonomics of human-system interaction —
Part 210: Human-centred design for interactive systems.
<a href="https://www.iso.org/standard/77520.html">URL</a>
</p>
<p>
National Institute of Standards and Technology (NIST),
Mary Theofanos, Yee-Yin Choong, et al. 2017. NIST Handbook
161 Usability Handbook for Public Safety Communications:
Ensuring Successful Systems for First Responders.
<a href="https://doi.org/10.6028/NIST.HB.161">URL</a>
</p>
<h5>Human-AI interaction</h5>
<p>
Committee on Human-System Integration Research Topics for
the 711th Human Performance Wing of the Air Force Research
Laboratory and the National Academies of Sciences,
Engineering, and Medicine. 2022. Human-AI Teaming:
State-of-the-Art and Research Needs. Washington, D.C.
National Academies Press.
<a
href="https://nap.nationalacademies.org/catalog/26355/human-ai-teaming-state-of-the-art-and-research-needs"
>URL</a
>
</p>
<p>
Human Readiness Level Scale in the System Development
Process, American National Standards Institute and Human
Factors and Ergonomics Society, ANSI/HFES 400-2021
</p>
<p>
Microsoft Responsible AI Standard, v2.
<a
href="https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4ZPmV"
>URL</a
>
</p>
<p>
Saar Alon-Barkat, Madalina Busuioc, HumanAI Interactions
in Public Sector Decision Making: “Automation Bias” and
“Selective Adherence” to Algorithmic Advice, Journal of
Public Administration Research and Theory, 2022;, muac007.
<a href="https://doi.org/10.1093/jopart/muac007">URL</a>
</p>
<p>
Zana Buçinca, Maja Barbara Malaya, and Krzysztof Z. Gajos.
2021. To Trust or to Think: Cognitive Forcing Functions
Can Reduce Overreliance on AI in AI-assisted
Decision-making. Proc. ACM Hum.-Comput. Interact. 5,
CSCW1, Article 188 (April 2021), 21 pages.
<a href="https://doi.org/10.1145/3449287">URL</a>
</p>
<p>
Mary L. Cummings. 2006 Automation and accountability in
decision support system interface design.The Journal of
Technology Studies 32(1): 2331.
<a
href="https://scholar.lib.vt.edu/ejournals/JOTS/v32/v32n1/pdf/cummings.pdf"
>URL</a
>
</p>
<p>
Engstrom, D. F., Ho, D. E., Sharkey, C. M., &amp; Cuéllar,
M. F. (2020). Government by algorithm: Artificial
intelligence in federal administrative agencies. NYU
School of Law, Public Law Research Paper, (20-54).
<a
href="https://www.acus.gov/report/government-algorithm-artificial-intelligence-federal-administrative-agencies"
>URL</a
>
</p>
<p>
Susanne Gaube, Harini Suresh, Martina Raue, et al. 2021.
Do as AI say: susceptibility in deployment of clinical
decision-aids. npj Digital Medicine 4, Article 31 (2021).
<a href="https://doi.org/10.1038/s41746-021-00385-9"
>URL</a
>
</p>
<p>
Ben Green. 2021. The Flaws of Policies Requiring Human
Oversight of Government Algorithms. Computer Law &amp;
Security Review 45 (26 Apr. 2021).
<a href="https://dx.doi.org/10.2139/ssrn.3921216">URL</a>
</p>
<p>
Ben Green and Amba Kak. 2021. The False Comfort of Human
Oversight as an Antidote to A.I. Harm. (June 15, 2021).
<a
href="https://slate.com/technology/2021/06/human-oversight-artificial-intelligence-laws.html"
>URL</a
>
</p>
<p>
Grgić-Hlača, N., Engel, C., &amp; Gummadi, K. P. (2019).
Human decision making with machine assistance: An
experiment on bailing and jailing. Proceedings of the ACM
on Human-Computer Interaction, 3(CSCW), 1-25.
<a href="https://dl.acm.org/doi/pdf/10.1145/3359280"
>URL</a
>
</p>
<p>
Forough Poursabzi-Sangdeh, Daniel G Goldstein, Jake M
Hofman, et al. 2021. Manipulating and Measuring Model
Interpretability. In Proceedings of the 2021 CHI
Conference on Human Factors in Computing Systems (CHI
'21). Association for Computing Machinery, New York, NY,
USA, Article 237, 152.
<a href="https://doi.org/10.1145/3411764.3445315">URL</a>
</p>
<p>
C. J. Smith (2019). Designing trustworthy AI: A
human-machine teaming framework to guide development.
arXiv preprint arXiv:1910.03515.
<a
href="https://kilthub.cmu.edu/articles/conference_contribution/Designing_Trustworthy_AI_A_Human-Machine_Teaming_Framework_to_Guide_Development/12119847/1"
>URL</a
>
</p>
<p>
T. Warden, P. Carayon, EM et al. The National Academies
Board on Human System Integration (BOHSI) Panel:
Explainable AI, System Transparency, and Human Machine
Teaming. Proceedings of the Human Factors and Ergonomics
Society Annual Meeting. 2019;63(1):631-635.
doi:10.1177/1071181319631100.
<a
href="https://sites.nationalacademies.org/cs/groups/dbassesite/documents/webpage/dbasse_196735.pdf"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%202.3"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 2.3"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 2.3
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Scientific integrity and TEVV considerations are identified and
documented, including those related to experimental design, data
collection and selection (e.g., availability, representativeness,
suitability), system trustworthiness, and construct validation.
</p>
<div
id="button-MAP 2.3"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Standard testing and evaluation protocols provide a basis
to confirm assurance in a system that it is operating as
designed and claimed. AI systems complexities create
challenges for traditional testing and evaluation
methodologies, which tend to be designed for static or
isolated system performance. Opportunities for risk
continue well beyond design and deployment, into system
operation and application of system-enabled decisions.
Testing and evaluation methodologies and metrics therefore
address a continuum of activities. TEVV is enhanced when
key metrics for performance, safety, and reliability are
interpreted in a socio-technical context and not confined
to the boundaries of the AI system pipeline.
</p>
<p>
Other challenges for managing AI risks relate to
dependence on large scale datasets, which can impact data
quality and validity concerns. The difficulty of finding
the “right” data may lead AI actors to select datasets
based more on accessibility and availability than on
suitability for operationalizing the phenomenon that the
AI system intends to support or inform. Such decisions
could contribute to an environment where the data used in
processes is not fully representative of the populations
or phenomena that are being modeled, introducing
downstream risks. Practices such as dataset reuse may also
lead to disconnect from the social contexts and time
periods of their creation. This contributes to issues of
validity of the underlying dataset for providing proxies,
measures, or predictors within the model.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Identify and document experiment design and statistical
techniques that are valid for testing complex
socio-technical systems like AI, which involve human
factors, emergent properties, and dynamic context(s) of
use.
</li>
<li>
Develop and apply TEVV protocols for models, system and
its subcomponents, deployment, and operation.
</li>
<li>
Demonstrate and document that AI system performance and
validation metrics are interpretable and unambiguous for
downstream decision making tasks, and take
socio-technical factors such as context of use into
consideration.
</li>
<li>
Identify and document assumptions, techniques, and
metrics used for testing and evaluation throughout the
AI lifecycle including experimental design techniques
for data collection, selection, and management practices
in accordance with data governance policies established
in GOVERN.
</li>
<li>
Identify testing modules that can be incorporated
throughout the AI lifecycle, and verify that processes
enable corroboration by independent evaluators.
</li>
<li>
Establish mechanisms for regular communication and
feedback among relevant AI actors and internal or
external stakeholders related to the validity of design
and deployment assumptions.
</li>
<li>
Establish mechanisms for regular communication and
feedback between relevant AI actors and internal or
external stakeholders related to the development of TEVV
approaches throughout the lifecycle to detect and assess
potentially harmful impacts
</li>
<li>
Document assumptions made and techniques used in data
selection, curation, preparation and analysis,
including:
<ul>
<li>
identification of constructs and proxy targets,
</li>
<li>
development of indices especially those
operationalizing concepts that are inherently
unobservable (e.g. “hireability,” “criminality.”
“lendability”).
</li>
</ul>
</li>
<li>
Map adherence to policies that address data and
construct validity, bias, privacy and security for AI
systems and verify documentation, oversight, and
processes.
</li>
<li>
Identify and document transparent methods (e.g. causal
discovery methods) for inferring causal relationships
between constructs being modeled and dataset attributes
or proxies.
</li>
<li>
Identify and document processes to understand and trace
test and training data lineage and its metadata
resources for mapping risks.
</li>
<li>
Document known limitations, risk mitigation efforts
associated with, and methods used for, training data
collection, selection, labeling, cleaning, and analysis
(e.g. treatment of missing, spurious, or outlier data;
biased estimators).
</li>
<li>
Establish and document practices to check for
capabilities that are in excess of those that are
planned for, such as emergent properties, and to revisit
prior risk management steps in light of any new
capabilities.
</li>
<li>
Establish processes to test and verify that design
assumptions about the set of deployment contexts
continue to be accurate and sufficiently complete.
</li>
<li>
Work with domain experts and other external AI actors
to:
<ul>
<li>
Gain and maintain contextual awareness and knowledge
about how human behavior, organizational factors and
dynamics, and society influence, and are represented
in, datasets, processes, models, and system output.
</li>
<li>
Identify participatory approaches for responsible
Human-AI configurations and oversight tasks, taking
into account sources of cognitive bias.
</li>
<li>
Identify techniques to manage and mitigate sources
of bias (systemic, computational, human- cognitive)
in computational models and systems, and the
assumptions and decisions in their development..
</li>
</ul>
</li>
<li>
Investigate and document potential negative impacts due
related to the full product lifecycle and associated
processes that may conflict with organizational values
and principles.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
Are there any known errors, sources of noise, or
redundancies in the data?
</li>
<li>
Over what time-frame was the data collected? Does the
collection time-frame match the creation time-frame
</li>
<li>
What is the variable selection and evaluation process?
</li>
<li>
How was the data collected? Who was involved in the data
collection process? If the dataset relates to people
(e.g., their attributes) or was generated by people,
were they informed about the data collection? (e.g.,
datasets that collect writing, photos, interactions,
transactions, etc.)
</li>
<li>
As time passes and conditions change, is the training
data still representative of the operational
environment?
</li>
<li>
Why was the dataset created? (e.g., were there specific
tasks in mind, or a specific gap that needed to be
filled?)
</li>
<li>
How does the entity ensure that the data collected are
adequate, relevant, and not excessive in relation to the
intended purpose?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Datasheets for Datasets.
<a href="http://arxiv.org/abs/1803.09010">URL</a>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
<li>
WEF Companion to the Model AI Governance Framework-
2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGIsago.pdf"
>URL</a
>
</li>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
ATARC Model Transparency Assessment (WD) 2020.
<a
href="https://atarc.org/wp-content/uploads/2020/10/atarc_model_transparency_assessment-FINAL-092020-2.docx"
>URL</a
>
</li>
<li>
Transparency in Artificial Intelligence - S. Larsson and
F. Heintz 2020.
<a
href="https://lucris.lub.lu.se/ws/files/79208055/Larsson_Heintz_2020_Transparency_in_artificial_intelligence_2020_05_05.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Challenges with dataset selection</h5>
<p>
Alexandra Olteanu, Carlos Castillo, Fernando Diaz, and
Emre Kiciman. 2019. Social Data: Biases, Methodological
Pitfalls, and Ethical Boundaries. Front. Big Data 2, 13
(11 July 2019).
<a href="https://doi.org/10.3389/fdata.2019.00013">URL</a>
</p>
<p>
Amandalynne Paullada, Inioluwa Deborah Raji, Emily M.
Bender, et al. 2020. Data and its (dis)contents: A survey
of dataset development and use in machine learning
research. arXiv:2012.05345.
<a href="https://arxiv.org/abs/2012.05345">URL</a>
</p>
<p>
Catherine D'Ignazio and Lauren F. Klein. 2020. Data
Feminism. The MIT Press, Cambridge, MA.
<a href="https://data-feminism.mitpress.mit.edu/">URL</a>
</p>
<p>
Miceli, M., &amp; Posada, J. (2022). The Data-Production
Dispositif. ArXiv, abs/2205.11963.
</p>
<p>
Barbara Plank. 2016. What to do about non-standard (or
non-canonical) language in NLP. arXiv:1608.07836.
<a href="https://arxiv.org/abs/1608.07836">URL</a>
</p>
<h5>
Dataset and test, evaluation, validation and verification
(TEVV) processes in AI system development
</h5>
<p>
National Institute of Standards and Technology (NIST),
Reva Schwartz, Apostol Vassilev, et al. 2022. NIST Special
Publication 1270 Towards a Standard for Identifying and
Managing Bias in Artificial Intelligence.
<a
href="https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.1270.pdf"
>URL</a
>
</p>
<p>
Inioluwa Deborah Raji, Emily M. Bender, Amandalynne
Paullada, et al. 2021. AI and the Everything in the Whole
Wide World Benchmark. arXiv:2111.15366.
<a href="https://arxiv.org/abs/2111.15366">URL</a>
</p>
<h5>Statistical balance</h5>
<p>
Ziad Obermeyer, Brian Powers, Christine Vogeli, and
Sendhil Mullainathan. 2019. Dissecting racial bias in an
algorithm used to manage the health of populations.
Science 366, 6464 (25 Oct. 2019), 447-453.
<a href="https://doi.org/10.1126/science.aax2342">URL</a>
</p>
<p>
Amandalynne Paullada, Inioluwa Deborah Raji, Emily M.
Bender, et al. 2020. Data and its (dis)contents: A survey
of dataset development and use in machine learning
research. arXiv:2012.05345.
<a href="https://arxiv.org/abs/2012.05345">URL</a>
</p>
<p>
Solon Barocas, Anhong Guo, Ece Kamar, et al. 2021.
Designing Disaggregated Evaluations of AI Systems:
Choices, Considerations, and Tradeoffs. Proceedings of the
2021 AAAI/ACM Conference on AI, Ethics, and Society.
Association for Computing Machinery, New York, NY, USA,
368378.
<a href="https://doi.org/10.1145/3461702.3462610">URL</a>
</p>
<h5>Measurement and evaluation</h5>
<p>
Abigail Z. Jacobs and Hanna Wallach. 2021. Measurement and
Fairness. In Proceedings of the 2021 ACM Conference on
Fairness, Accountability, and Transparency (FAccT 21).
Association for Computing Machinery, New York, NY, USA,
375385.
<a href="https://doi.org/10.1145/3442188.3445901">URL</a>
</p>
<p>
Ben Hutchinson, Negar Rostamzadeh, Christina Greer, et al.
2022. Evaluation Gaps in Machine Learning Practice.
arXiv:2205.05256.
<a href="https://arxiv.org/abs/2205.05256">URL</a>
</p>
<p>
Laura Freeman, "Test and evaluation for artificial
intelligence." Insight 23.1 (2020): 27-30.
<a href="https://doi.org/10.1002/inst.12281">URL</a>
</p>
<h5>Existing frameworks</h5>
<p>
National Institute of Standards and Technology. (2018).
Framework for improving critical infrastructure
cybersecurity.
<a
href="https://nvlpubs.nist.gov/nistpubs/cswp/nist.cswp.04162018.pdf"
>URL</a
>
</p>
<p>
Kaitlin R. Boeckl and Naomi B. Lefkovitz. "NIST Privacy
Framework: A Tool for Improving Privacy Through Enterprise
Risk Management, Version 1.0." National Institute of
Standards and Technology (NIST), January 16, 2020.
<a
href="https://www.nist.gov/publications/nist-privacy-framework-tool-improving-privacy-through-enterprise-risk-management."
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
</ul>
</li>
<li>
<h2 class="pbindex__top__heading">Map 3</h2>
<p class="usa-intro pbindex__top__title">
AI capabilities, targeted usage, goals, and expected benefits and costs
compared with appropriate benchmarks are understood.
</p>
<ul class="pbindex__subcat-ul">
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%203.1"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 3.1"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 3.1
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Potential benefits of intended AI system functionality and
performance are examined and documented.
</p>
<div
id="button-MAP 3.1"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
AI systems have enormous potential to improve quality of
life, enhance economic prosperity and security costs.
Organizations are encouraged to define and document system
purpose and utility, and its potential positive impacts
and benefits beyond current known performance benchmarks.
</p>
<p>
It is encouraged that risk management and assessment of
benefits and impacts include processes for regular and
meaningful communication with potentially affected groups
and communities. These stakeholders can provide valuable
input related to systems benefits and possible
limitations. Organizations may differ in the types and
number of stakeholders with which they engage.
</p>
<p>
Other approaches such as human-centered design (HCD) and
value-sensitive design (VSD) can help AI teams to engage
broadly with individuals and communities. This type of
engagement can enable AI teams to learn about how a given
technology may cause positive or negative impacts, that
were not originally considered or intended.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Utilize participatory approaches and engage with system
end users to understand and document AI systems
potential benefits, efficacy and interpretability of AI
task output.
</li>
<li>
Maintain awareness and documentation of the individuals,
groups, or communities who make up the systems internal
and external stakeholders.
</li>
<li>
Verify that appropriate skills and practices are
available in-house for carrying out participatory
activities such as eliciting, capturing, and
synthesizing user, operator and external feedback, and
translating it for AI design and development functions.
</li>
<li>
Establish mechanisms for regular communication and
feedback between relevant AI actors and internal or
external stakeholders related to system design or
deployment decisions.
</li>
<li>
Consider performance to human baseline metrics or other
standard benchmarks.
</li>
<li>
Incorporate feedback from end users, and potentially
impacted individuals and communities about perceived
system benefits .
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
Have the benefits of the AI system been communicated to
end users?
</li>
<li>
Have the appropriate training material and disclaimers
about how to adequately use the AI system been provided
to end users?
</li>
<li>
Has your organization implemented a risk management
system to address risks involved in deploying the
identified AI system (e.g. personnel risk or changes to
commercial objectives)?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI 2019.
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Roel Dobbe, Thomas Krendl Gilbert, and Yonatan Mintz.
2021. Hard choices in artificial intelligence. Artificial
Intelligence 300 (14 July 2021), 103555, ISSN 0004-3702.
<a href="https://doi.org/10.1016/j.artint.2021.103555"
>URL</a
>
</p>
<p>
Samir Passi and Solon Barocas. 2019. Problem Formulation
and Fairness. In Proceedings of the Conference on
Fairness, Accountability, and Transparency (FAT* '19).
Association for Computing Machinery, New York, NY, USA,
3948.
<a href="https://doi.org/10.1145/3287560.3287567">URL</a>
</p>
<p>
Vincent T. Covello. 2021. Stakeholder Engagement and
Empowerment. In Communicating in Risk, Crisis, and High
Stress Situations (Vincent T. Covello, ed.), 87-109.
<a href="https://ieeexplore.ieee.org/document/9648995"
>URL</a
>
</p>
<p>
Yilin Huang, Giacomo Poderi, Sanja Šćepanović, et al.
2019. Embedding Internet-of-Things in Large-Scale
Socio-technical Systems: A Community-Oriented Design in
Future Smart Grids. In The Internet of Things for Smart
Urban Ecosystems (2019), 125-150. Springer, Cham.
<a
href="https://link.springer.com/chapter/10.1007/978-3-319-96550-5_6"
>URL</a
>
</p>
<p>
Eloise Taysom and Nathan Crilly. 2017. Resilience in
Sociotechnical Systems: The Perspectives of Multiple
Stakeholders. She Ji: The Journal of Design, Economics,
and Innovation, 3, 3 (2017), 165-182, ISSN 2405-8726.
<a
href="https://www.sciencedirect.com/science/article/pii/S2405872617300758"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%203.2"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 3.2"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 3.2
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Potential costs, including non-monetary costs, which result from
expected or realized AI errors or system functionality and
trustworthiness - as connected to organizational risk tolerance -
are examined and documented.
</p>
<div
id="button-MAP 3.2"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Anticipating negative impacts of AI systems is a difficult
task. Negative impacts can be due to many factors, such as
system non-functionality or use outside of its operational
limits, and may range from minor annoyance to serious
injury, financial losses, or regulatory enforcement
actions. AI actors can work with a broad set of
stakeholders to improve their capacity for understanding
systems potential impacts and subsequently systems
risks.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Perform context analysis to map potential negative
impacts arising from not integrating trustworthiness
characteristics. When negative impacts are not direct or
obvious, AI actors can engage with stakeholders external
to the team that developed or deployed the AI system,
and potentially impacted communities, to examine and
document:
<ul>
<li>Who could be harmed?</li>
<li>What could be harmed?</li>
<li>When could harm arise?</li>
<li>How could harm arise?</li>
</ul>
</li>
<li>
Identify and implement procedures for regularly
evaluating the qualitative and quantitative costs of
internal and external AI system failures. Develop
actions to prevent, detect, and/or correct potential
risks and related impacts. Regularly evaluate failure
costs to inform go/no-go deployment decisions throughout
the AI system lifecycle.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
To what extent does the system/entity consistently
measure progress towards stated goals and objectives?
</li>
<li>
To what extent can users or parties affected by the
outputs of the AI system test the AI system and provide
feedback?
</li>
<li>
Have you documented and explained that machine errors
may differ from human errors?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI 2019.
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Abagayle Lee Blank. 2019. Computer vision machine learning
and future-oriented ethics. Honors Project. Seattle
Pacific University (SPU), Seattle, WA.
<a
href="https://digitalcommons.spu.edu/cgi/viewcontent.cgi?article=1100&amp;context=honorsprojects"
>URL</a
>
</p>
<p>
Margarita Boyarskaya, Alexandra Olteanu, and Kate
Crawford. 2020. Overcoming Failures of Imagination in AI
Infused System Development and Deployment.
arXiv:2011.13416.
<a href="https://arxiv.org/abs/2011.13416">URL</a>
</p>
<p>
Jeff Patton. 2014. User Story Mapping. O'Reilly,
Sebastopol, CA.
<a href="https://www.jpattonassociates.com/story-mapping/"
>URL</a
>
</p>
<p>
Margarita Boenig-Liptsin, Anissa Tanweer &amp; Ari
Edmundson (2022) Data Science Ethos Lifecycle: Interplay
of ethical thinking and data science practice, Journal of
Statistics and Data Science Education, DOI:
10.1080/26939169.2022.2089411
</p>
<p>
J. Cohen, D. S. Katz, M. Barker, N. Chue Hong, R. Haines
and C. Jay, "The Four Pillars of Research Software
Engineering," in IEEE Software, vol. 38, no. 1, pp.
97-105, Jan.-Feb. 2021, doi: 10.1109/MS.2020.2973362.
</p>
<p>
National Academies of Sciences, Engineering, and Medicine
2022. Fostering Responsible Computing Research:
Foundations and Practices. Washington, DC: The National
Academies Press.
<a href="https://doi.org/10.17226/26507">URL</a>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%203.3"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 3.3"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 3.3
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Targeted application scope is specified and documented based on
the systems capability, established context, and AI system
categorization.
</p>
<div
id="button-MAP 3.3"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Systems that function in a narrow scope tend to enable
better mapping, measurement, and management of risks in
the learning or decision-making tasks and the system
context. A narrow application scope also helps ease TEVV
functions and related resources within an organization.
</p>
<p>
For example, large language models or open-ended chatbot
systems that interact with the public on the internet have
a large number of risks that may be difficult to map,
measure, and manage due to the variability from both the
decision-making task and the operational context. Instead,
a task-specific chatbot utilizing templated responses that
follow a defined “user journey” is a scope that can be
more easily mapped, measured and managed.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Consider narrowing contexts for system deployment,
including factors related to: - How outcomes may
directly or indirectly affect users, groups, communities
and the environment. - Length of time the system is
deployed in between re-trainings. - Geographical regions
in which the system operates. - Dynamics related to
community standards or likelihood of system misuse or
abuses (either purposeful or unanticipated). - How AI
system features and capabilities can be utilized within
other applications, or in place of other existing
processes.
</li>
<li>
Engage AI actors from legal and procurement functions
when specifying target application scope.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
To what extent has the entity clearly defined technical
specifications and requirements for the AI system?
</li>
<li>
How do the technical specifications and requirements
align with the AI systems goals and objectives?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI 2019.
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Mark J. Van der Laan and Sherri Rose (2018). Targeted
Learning in Data Science. Cham: Springer International
Publishing, 2018.
</p>
<p>
Alice Zheng. 2015. Evaluating Machine Learning Models
(2015). O'Reilly.
<a
href="https://www.oreilly.com/library/view/evaluating-machine-learning/9781492048756/"
>URL</a
>
</p>
<p>
Brenda Leong and Patrick Hall (2021). 5 things lawyers
should know about artificial intelligence. ABA Journal.
<a
href="https://www.abajournal.com/columns/article/5-things-lawyers-should-know-about-artificial-intelligence"
>URL</a
>
</p>
<p>
UK Centre for Data Ethics and Innovation, “The roadmap to
an effective AI assurance ecosystem”.
<a
href="https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/1039146/The_roadmap_to_an_effective_AI_assurance_ecosystem.pdf"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%203.4"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 3.4"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 3.4
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Processes for operator and practitioner proficiency with AI system
performance and trustworthiness and relevant technical standards
and certifications are defined, assessed and documented.
</p>
<div
id="button-MAP 3.4"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Human-AI configurations can span from fully autonomous to
fully manual. AI systems can autonomously make decisions,
defer decision-making to a human expert, or be used by a
human decision-maker as an additional opinion. In some
scenarios, professionals with expertise in a specific
domain work in conjunction with an AI system towards a
specific end goal—for example, a decision about another
individual(s). Depending on the purpose of the system, the
expert may interact with the AI system but is rarely part
of the design or development of the system itself. These
experts are not necessarily familiar with machine
learning, data science, computer science, or other fields
traditionally associated with AI design or development and
- depending on the application - will likely not require
such familiarity. For example, for AI systems that are
deployed in health care delivery the experts are the
physicians and bring their expertise about medicine—not
data science, data modeling and engineering, or other
computational factors. The challenge in these settings is
not educating the end user about AI system capabilities,
but rather leveraging, and not replacing, practitioner
domain expertise.
</p>
<p>
Questions remain about how to configure humans and
automation for managing AI risks. Risk management is
enhanced when organizations that design, develop or deploy
AI systems for use by professional operators and
practitioners:
</p>
<ul>
<li>
are aware of these knowledge limitations and strive to
identify risks in human-AI interactions and
configurations across all contexts, and the potential
resulting impacts,
</li>
<li>
define and differentiate the various human roles and
responsibilities when using or interacting with AI
systems, and
</li>
<li>
determine proficiency standards for AI system operation
in proposed context of use, as enumerated in MAP-1 and
established in GOVERN-3.2.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Identify and declare AI system features and capabilities
that may affect downstream AI actors decision-making in
deployment and operational settings for example how
system features and capabilities may activate known
risks in various human-AI configurations, such as
selective adherence.
</li>
<li>
Identify skills and proficiency requirements for
operators, practitioners and other domain experts that
interact with AI systems,Develop AI system operational
documentation for AI actors in deployed and operational
environments, including information about known risks,
mitigation criteria, and trustworthy characteristics
enumerated in Map-1.
</li>
<li>
Define and develop training materials for proposed end
users, practitioners and operators about AI system use
and known limitations.
</li>
<li>
Define and develop certification procedures for
operating AI systems within defined contexts of use, and
information about what exceeds operational boundaries.
</li>
<li>
Include operators, practitioners and end users in AI
system prototyping and testing activities to help inform
operational boundaries and acceptable performance.
Conduct testing activities under scenarios similar to
deployment conditions.
</li>
<li>
Verify model output provided to AI system operators,
practitioners and end users is interactive, and
specified to context and user requirements defined in
MAP-1.
</li>
<li>
Verify AI system output is interpretable and unambiguous
for downstream decision making tasks.
</li>
<li>
Design AI system explanation complexity to match the
level of problem and context complexity.
</li>
<li>
Verify that design principles are in place for safe
operation by AI actors in decision-making environments.
</li>
<li>
Develop approaches to track human-AI configurations,
operator, and practitioner outcomes for integration into
continual improvement.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
What policies has the entity developed to ensure the use
of the AI system is consistent with its stated values
and principles?
</li>
<li>
How will the accountable human(s) address changes in
accuracy and precision due to either an adversarys
attempts to disrupt the AI or unrelated changes in
operational/business environment, which may impact the
accuracy of the AI?
</li>
<li>
How does the entity assess whether personnel have the
necessary skills, training, resources, and domain
knowledge to fulfill their assigned responsibilities?
</li>
<li>
Are the relevant staff dealing with AI systems properly
trained to interpret AI model output and decisions as
well as to detect and manage bias in data?
</li>
<li>
What metrics has the entity developed to measure
performance of various components?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
WEF Companion to the Model AI Governance Framework-
2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGIsago.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
National Academies of Sciences, Engineering, and Medicine.
2022. Human-AI Teaming: State-of-the-Art and Research
Needs. Washington, DC: The National Academies Press.
<a href="https://doi.org/10.17226/26355">URL</a>
</p>
<p>
Human Readiness Level Scale in the System Development
Process, American National Standards Institute and Human
Factors and Ergonomics Society, ANSI/HFES 400-2021.
</p>
<p>
Human-Machine Teaming Systems Engineering Guide. P
McDermott, C Dominguez, N Kasdaglis, M Ryan, I Trahan, A
Nelson. MITRE Corporation, 2018.
</p>
<p>
Saar Alon-Barkat, Madalina Busuioc, HumanAI Interactions
in Public Sector Decision Making: “Automation Bias” and
“Selective Adherence” to Algorithmic Advice, Journal of
Public Administration Research and Theory, 2022;, muac007.
<a href="https://doi.org/10.1093/jopart/muac007">URL</a>
</p>
<p>
Breana M. Carter-Browne, Susannah B. F. Paletz, Susan G.
Campbell , Melissa J. Carraway, Sarah H. Vahlkamp, Jana
Schwartz , Polly ORourke, “There is No “AI” in Teams: A
Multidisciplinary Framework for AIs to Work in Human
Teams; Applied Research Laboratory for Intelligence and
Security (ARLIS) Report, June 2021.
<a
href="https://www.arlis.umd.edu/sites/default/files/2022-03/No_AI_In_Teams_FinalReport%20(1).pdf"
>URL</a
>
</p>
<p>
R Crootof, ME Kaminski, and WN Price II. Humans in the
Loop (March 25, 2022). Vanderbilt Law Review, Forthcoming
2023, U of Colorado Law Legal Studies Research Paper No.
22-10, U of Michigan Public Law Research Paper No. 22-011.
<a
href="https://ssrn.com/abstract=4066781 or http://dx.doi.org/10.2139/ssrn.4066781"
>URL</a
>
</p>
<p>
S Mo Jones-Jang, Yong Jin Park, How do people react to AI
failure? Automation bias, algorithmic aversion, and
perceived controllability, Journal of Computer-Mediated
Communication, Volume 28, Issue 1, January 2023, zmac029.
<a href="https://doi.org/10.1093/jcmc/zmac029">URL</a>
</p>
<p>
A Knack, R Carter and A Babuta, "Human-Machine Teaming in
Intelligence Analysis: Requirements for developing trust
in machine learning systems," CETaS Research Reports
(December 2022).
<a
href="https://cetas.turing.ac.uk/sites/default/files/2022-12/cetas_research_report_-_hmt_and_intelligence_analysis_vfinal.pdf"
>URL</a
>
</p>
<p>
SD Ramchurn, S Stein , NR Jennings. Trustworthy human-AI
partnerships. iScience. 2021;24(8):102891. Published 2021
Jul 24. doi:10.1016/j.isci.2021.102891.
<a
href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8365362/pdf/main.pdf"
>URL</a
>
</p>
<p>
M. Veale, M. Van Kleek, and R. Binns, “Fairness and
Accountability Design Needs for Algorithmic Support in
High-Stakes Public Sector Decision-Making,” in Proceedings
of the 2018 CHI Conference on Human Factors in Computing
Systems - CHI 18. Montreal QC, Canada: ACM Press, 2018,
pp. 114.
<a
href="http://dl.acm.org/citation.cfm?doid=3173574.3174014"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%203.5"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 3.5"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 3.5
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Processes for human oversight are defined, assessed, and
documented in accordance with organizational policies from GOVERN
function.
</p>
<div
id="button-MAP 3.5"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
As AI systems have evolved in accuracy and precision,
computational systems have moved from being used purely
for decision support—or for explicit use by and under the
control of a human operator—to automated decision making
with limited input from humans. Computational decision
support systems augment another, typically human, system
in making decisions.These types of configurations increase
the likelihood of outputs being produced with little human
involvement.
</p>
<p>
Defining and differentiating various human roles and
responsibilities for AI systems governance, and
differentiating AI system overseers and those using or
interacting with AI systems can enhance AI risk management
activities.
</p>
<p>
In critical systems, high-stakes settings, and systems
deemed high-risk it is of vital importance to evaluate
risks and effectiveness of oversight procedures before an
AI system is deployed.
</p>
<p>
Ultimately, AI system oversight is a shared
responsibility, and attempts to properly authorize or
govern oversight practices will not be effective without
organizational buy-in and accountability mechanisms, for
example those suggested in the GOVERN function.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Identify and document AI systems features and
capabilities that require human oversight, in relation
to operational and societal contexts, trustworthy
characteristics, and risks identified in MAP-1.
</li>
<li>
Establish practices for AI systems oversight in
accordance with policies developed in GOVERN-1.
</li>
<li>
Define and develop training materials for relevant AI
Actors about AI system performance, context of use,
known limitations and negative impacts, and suggested
warning labels.
</li>
<li>
Include relevant AI Actors in AI system prototyping and
testing activities. Conduct testing activities under
scenarios similar to deployment conditions.
</li>
<li>
Evaluate AI system oversight practices for validity and
reliability. When oversight practices undergo extensive
updates or adaptations, retest, evaluate results, and
course correct as necessary.
</li>
<li>
Verify that model documents contain interpretable
descriptions of system mechanisms, enabling oversight
personnel to make informed, risk-based decisions about
system risks.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
What are the roles, responsibilities, and delegation of
authorities of personnel involved in the design,
development, deployment, assessment and monitoring of
the AI system?
</li>
<li>
How does the entity assess whether personnel have the
necessary skills, training, resources, and domain
knowledge to fulfill their assigned responsibilities?
</li>
<li>
Are the relevant staff dealing with AI systems properly
trained to interpret AI model output and decisions as
well as to detect and manage bias in data?
</li>
<li>
To what extent has the entity documented the AI systems
development, testing methodology, metrics, and
performance outcomes?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Ben Green, “The Flaws of Policies Requiring Human
Oversight of Government Algorithms,” SSRN Journal, 2021.
<a href="https://www.ssrn.com/abstract=3921216">URL</a>
</p>
<p>
Luciano Cavalcante Siebert, Maria Luce Lupetti, Evgeni
Aizenberg, Niek Beckers, Arkady Zgonnikov, Herman
Veluwenkamp, David Abbink, Elisa Giaccardi, Geert-Jan
Houben, Catholijn Jonker, Jeroen van den Hoven, Deborah
Forster, &amp; Reginald Lagendijk (2021). Meaningful human
control: actionable properties for AI system development.
AI and Ethics.
<a
href="https://link.springer.com/article/10.1007/s43681-022-00167-3"
>URL</a
>
</p>
<p>
Mary Cummings, (2014). Automation and Accountability in
Decision Support System Interface Design. The Journal of
Technology Studies. 32. 10.21061/jots.v32i1.a.4.
<a
href="https://scholar.lib.vt.edu/ejournals/JOTS/v32/v32n1/pdf/cummings.pdf"
>URL</a
>
</p>
<p>
Madeleine Elish, M. (2016). Moral Crumple Zones:
Cautionary Tales in Human-Robot Interaction (WeRobot
2016). SSRN Electronic Journal. 10.2139/ssrn.2757236.
<a
href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2757236"
>URL</a
>
</p>
<p>
R Crootof, ME Kaminski, and WN Price II. Humans in the
Loop (March 25, 2022). Vanderbilt Law Review, Forthcoming
2023, U of Colorado Law Legal Studies Research Paper No.
22-10, U of Michigan Public Law Research Paper No. 22-011.
<a href="https://ssrn.com/abstract=4066781">LINK</a>,
<a href="http://dx.doi.org/10.2139/ssrn.4066781">URL</a>
</p>
<p>
Bogdana Rakova, Jingying Yang, Henriette Cramer, &amp;
Rumman Chowdhury (2020). Where Responsible AI meets
Reality. Proceedings of the ACM on Human-Computer
Interaction, 5, 1 - 23.
<a href="https://arxiv.org/pdf/2006.12358.pdf">URL</a>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
</ul>
</li>
<li>
<h2 class="pbindex__top__heading">Map 4</h2>
<p class="usa-intro pbindex__top__title">
Risks and benefits are mapped for all components of the AI system
including third-party software and data.
</p>
<ul class="pbindex__subcat-ul">
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%204.1"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 4.1"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 4.1
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Approaches for mapping AI technology and legal risks of its
components including the use of third-party data or software
are in place, followed, and documented, as are risks of
infringement of a third-partys intellectual property or other
rights.
</p>
<div
id="button-MAP 4.1"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Technologies and personnel from third-parties are another
potential sources of risk to consider during AI risk
management activities. Such risks may be difficult to map
since risk priorities or tolerances may not be the same as
the deployer organization.
</p>
<p>
For example, the use of pre-trained models, which tend to
rely on large uncurated dataset or often have undisclosed
origins, has raised concerns about privacy, bias, and
unanticipated effects along with possible introduction of
increased levels of statistical uncertainty, difficulty
with reproducibility, and issues with scientific validity.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Review audit reports, testing results, product roadmaps,
warranties, terms of service, end user license
agreements, contracts, and other documentation related
to third-party entities to assist in value assessment
and risk management activities.
</li>
<li>
Review third-party software release schedules and
software change management plans (hotfixes, patches,
updates, forward- and backward- compatibility
guarantees) for irregularities that may contribute to AI
system risks.
</li>
<li>
Inventory third-party material (hardware, open-source
software, foundation models, open source data,
proprietary software, proprietary data, etc.) required
for system implementation and maintenance.
</li>
<li>
Review redundancies related to third-party technology
and personnel to assess potential risks due to lack of
adequate support.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
Did you establish a process for third parties (e.g.
suppliers, end users, subjects, distributors/vendors or
workers) to report potential vulnerabilities, risks or
biases in the AI system?
</li>
<li>
If your organization obtained datasets from a third
party, did your organization assess and manage the risks
of using such datasets?
</li>
<li>How will the results be independently verified?</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Language models</h5>
<p>
Emily M. Bender, Timnit Gebru, Angelina McMillan-Major,
and Shmargaret Shmitchell. 2021. On the Dangers of
Stochastic Parrots: Can Language Models Be Too Big? 🦜. In
Proceedings of the 2021 ACM Conference on Fairness,
Accountability, and Transparency (FAccT '21). Association
for Computing Machinery, New York, NY, USA, 610623.
<a href="https://doi.org/10.1145/3442188.3445922">URL</a>
</p>
<p>
Julia Kreutzer, Isaac Caswell, Lisa Wang, et al. 2022.
Quality at a Glance: An Audit of Web-Crawled Multilingual
Datasets. Transactions of the Association for
Computational Linguistics 10 (2022), 5072.
<a href="https://doi.org/10.1162/tacl_a_00447">URL</a>
</p>
<p>
Laura Weidinger, Jonathan Uesato, Maribeth Rauh, et al.
2022. Taxonomy of Risks posed by Language Models. In 2022
ACM Conference on Fairness, Accountability, and
Transparency (FAccT '22). Association for Computing
Machinery, New York, NY, USA, 214229.
<a href="https://doi.org/10.1145/3531146.3533088">URL</a>
</p>
<p>
Office of the Comptroller of the Currency. 2021.
Comptroller's Handbook: Model Risk Management, Version
1.0, August 2021.
<a
href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html"
>URL</a
>
</p>
<p>
Rishi Bommasani, Drew A. Hudson, Ehsan Adeli, et al. 2021.
On the Opportunities and Risks of Foundation Models.
arXiv:2108.07258.
<a href="https://arxiv.org/abs/2108.07258">URL</a>
</p>
<p>
Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret
Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma,
Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori
Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, William
Fedus. “Emergent Abilities of Large Language Models.”
ArXiv abs/2206.07682 (2022).
<a href="https://arxiv.org/pdf/2206.07682.pdf">URL</a>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%204.2"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 4.2"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 4.2
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Internal risk controls for components of the AI system including
third-party AI technologies are identified and documented.
</p>
<div
id="button-MAP 4.2"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
In the course of their work, AI actors often utilize
open-source, or otherwise freely available, third-party
technologies some of which may have privacy, bias, and
security risks. Organizations may consider internal risk
controls for these technology sources and build up
practices for evaluating third-party material prior to
deployment.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Track third-parties preventing or hampering risk-mapping
as indications of increased risk.
</li>
<li>
Supply resources such as model documentation templates
and software safelists to assist in third-party
technology inventory and approval activities.
</li>
<li>
Review third-party material (including data and models)
for risks related to bias, data privacy, and security
vulnerabilities.
</li>
<li>
Apply traditional technology risk controls such as
procurement, security, and data privacy controls to
all acquired third-party technologies.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
Can the AI system be audited by independent third
parties?
</li>
<li>
To what extent do these policies foster public trust and
confidence in the use of the AI system?
</li>
<li>
Are mechanisms established to facilitate the AI systems
auditability (e.g. traceability of the development
process, the sourcing of training data and the logging
of the AI systems processes, outcomes, positive and
negative impact)?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
WEF Model AI Governance Framework Assessment 2020.
<a
href="https://www.pdpc.gov.sg/-/media/Files/PDPC/PDF-Files/Resource-for-Organisation/AI/SGModelAIGovFramework2.pdf"
>URL</a
>
</li>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI - 2019.
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Office of the Comptroller of the Currency. 2021.
Comptroller's Handbook: Model Risk Management, Version
1.0, August 2021. Retrieved on July 7, 2022.
<a
href="https://www.occ.gov/publications-and-resources/publications/comptrollers-handbook/files/model-risk-management/index-model-risk-management.html"
>URL</a
>
</p>
<p>
Proposed Interagency Guidance on Third-Party
Relationships: Risk Management, 2021.
<a
href="https://www.occ.gov/news-issuances/news-releases/2021/nr-occ-2021-74a.pdf"
>URL</a
>
</p>
<p>
Kang, D., Raghavan, D., Bailis, P.D., &amp; Zaharia, M.A.
(2020). Model Assertions for Monitoring and Improving ML
Models. ArXiv, abs/2003.01668.
<a
href="https://proceedings.mlsys.org/paper/2020/file/a2557a7b2e94197ff767970b67041697-Paper.pdf"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
</ul>
</li>
<li>
<h2 class="pbindex__top__heading">Map 5</h2>
<p class="usa-intro pbindex__top__title">
Impacts to individuals, groups, communities, organizations, and society
are characterized.
</p>
<ul class="pbindex__subcat-ul">
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%205.1"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 5.1"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 5.1
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Likelihood and magnitude of each identified impact (both
potentially beneficial and harmful) based on expected use, past
uses of AI systems in similar contexts, public incident reports,
feedback from those external to the team that developed or
deployed the AI system, or other data are identified and
documented.
</p>
<div
id="button-MAP 5.1"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
AI actors can evaluate, document and triage the likelihood
of AI system impacts identified in Map 5.1 Likelihood
estimates may then be assessed and judged for go/no-go
decisions about deploying an AI system. If an organization
decides to proceed with deploying the system, the
likelihood and magnitude estimates can be used to assign
TEVV resources appropriate for the risk level.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Establish assessment scales for measuring AI systems
impact. Scales may be qualitative, such as
red-amber-green (RAG), or may entail simulations or
econometric approaches. Document and apply scales
uniformly across the organizations AI portfolio.
</li>
<li>
Apply TEVV regularly at key stages in the AI lifecycle,
connected to system impacts and frequency of system
updates.
</li>
<li>
Identify and document likelihood and magnitude of system
benefits and negative impacts in relation to
trustworthiness characteristics.
</li>
<li>
Establish processes for red teaming to identify and
connect system limitations to AI lifecycle stage(s) and
potential downstream impacts
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>Which population(s) does the AI system impact?</li>
<li>
What assessments has the entity conducted on
trustworthiness characteristics for example data
security and privacy impacts associated with the AI
system?
</li>
<li>
Can the AI system be tested by independent third
parties?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Datasheets for Datasets.
<a href="http://arxiv.org/abs/1803.09010">URL</a>
</li>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
AI policies and initiatives, in Artificial Intelligence
in Society, OECD, 2019.
<a
href="https://www.oecd.org/publications/artificial-intelligence-in-society-eedfee77-en.htm"
>URL</a
>
</li>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI - 2019.
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Emilio Gómez-González and Emilia Gómez. 2020. Artificial
intelligence in medicine and healthcare. Joint Research
Centre (European Commission).
<a
href="https://op.europa.eu/en/publication-detail/-/publication/b4b5db47-94c0-11ea-aac4-01aa75ed71a1/language-en"
>URL</a
>
</p>
<p>
Artificial Intelligence Incident Database. 2022.
<a href="https://incidentdatabase.ai/?lang=en">URL</a>
</p>
<p>
Anthony M. Barrett, Dan Hendrycks, Jessica Newman and
Brandie Nonnecke. “Actionable Guidance for
High-Consequence AI Risk Management: Towards Standards
Addressing AI Catastrophic Risks". ArXiv abs/2206.08966
(2022) <a href="https://arxiv.org/abs/2206.08966">URL</a>
</p>
<p>
Ganguli, D., et al. (2022). Red Teaming Language Models to
Reduce Harms: Methods, Scaling Behaviors, and Lessons
Learned. arXiv. https://arxiv.org/abs/2209.07858
</p>
<p>
Upol Ehsan, Q. Vera Liao, Samir Passi, Mark O. Riedl, and
Hal Daumé. 2024. Seamful XAI: Operationalizing Seamful
Design in Explainable AI. Proc. ACM Hum.-Comput. Interact.
8, CSCW1, Article 119. https://doi.org/10.1145/3637396
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
<li>
<div
data-allow-multiple="data-allow-multiple"
class="usa-accordion usa-accordion-multiselectable pbindex__outer-accordion__container"
>
<h3
id="Map%205.2"
class="usa-accordion__heading pbindex__outer-accordion__heading"
>
<button
type="button"
aria-expanded="false"
aria-controls="button-MAP 5.2"
class="usa-accordion__button pbindex__outer-accordion__button"
>
MAP 5.2
</button>
</h3>
<p class="pbindex__outer-accordion__description">
Practices and personnel for supporting regular engagement with
relevant AI actors and integrating feedback about positive,
negative, and unanticipated impacts are in place and documented.
</p>
<div
id="button-MAP 5.2"
class="usa-accordion__content usa-prose pbindex__outer-accordion__content pbindex__outer-accordion__content__withul"
>
<ul class="pbindex__collection__ul usa-collection">
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">About</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
AI systems are socio-technical in nature and can have
positive, neutral, or negative implications that extend
beyond their stated purpose. Negative impacts can be wide-
ranging and affect individuals, groups, communities,
organizations, and society, as well as the environment and
national security.
</p>
<p>
Organizations can create a baseline for system monitoring
to increase opportunities for detecting emergent risks.
After an AI system is deployed, engaging different
stakeholder groups who may be aware of, or experience,
benefits or negative impacts that are unknown to AI actors
involved in the design, development and deployment
activities allows organizations to understand and
monitor system benefits and potential negative impacts
more readily.
</p>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">Suggested Actions</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<ul>
<li>
Establish and document stakeholder engagement processes
at the earliest stages of system formulation to identify
potential impacts from the AI system on individuals,
groups, communities, organizations, and society.
</li>
<li>
Employ methods such as value sensitive design (VSD) to
identify misalignments between organizational and
societal values, and system implementation and impact.
</li>
<li>
Identify approaches to engage, capture, and incorporate
input from system end users and other key stakeholders
to assist with continuous monitoring for potential
impacts and emergent risks.
</li>
<li>
Incorporate quantitative, qualitative, and mixed methods
in the assessment and documentation of potential impacts
to individuals, groups, communities, organizations, and
society.
</li>
<li>
Identify a team (internal or external) that is
independent of AI design and development functions to
assess AI system benefits, positive and negative impacts
and their likelihood and magnitude.
</li>
<li>
Evaluate and document stakeholder feedback to assess
potential impacts for actionable insights regarding
trustworthiness characteristics and changes in design
approaches and principles.
</li>
<li>
Develop TEVV procedures that incorporate socio-technical
elements and methods and plan to normalize across
organizational culture. Regularly review and refine TEVV
processes.
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">
Transparency and Documentation
</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<h5>Organizations can document the following</h5>
<ul>
<li>
If the AI system relates to people, does it unfairly
advantage or disadvantage a particular social group? In
what ways? How was this managed?
</li>
<li>
If the AI system relates to other ethically protected
groups, have appropriate obligations been met? (e.g.,
medical data might include information collected from
animals)
</li>
<li>
If the AI system relates to people, could this dataset
expose people to harm or legal action? (e.g., financial
social or otherwise) What was done to mitigate or reduce
the potential for harm?
</li>
</ul>
<h5>AI Transparency Resources</h5>
<ul>
<li>
Datasheets for Datasets.
<a href="http://arxiv.org/abs/1803.09010">URL</a>
</li>
<li>
GAO-21-519SP: AI Accountability Framework for Federal
Agencies &amp; Other Entities.
<a href="https://www.gao.gov/products/gao-21-519sp"
>URL</a
>
</li>
<li>
AI policies and initiatives, in Artificial Intelligence
in Society, OECD, 2019.
<a
href="https://www.oecd.org/publications/artificial-intelligence-in-society-eedfee77-en.htm"
>URL</a
>
</li>
<li>
Intel.gov: AI Ethics Framework for Intelligence
Community - 2020.
<a
href="https://www.intelligence.gov/artificial-intelligence-ethics-framework-for-the-intelligence-community"
>URL</a
>
</li>
<li>
Assessment List for Trustworthy AI (ALTAI) - The
High-Level Expert Group on AI - 2019.
<a href="https://altai.insight-centre.org/">LINK</a>,
<a
href="https://digital-strategy.ec.europa.eu/en/library/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment"
>URL</a
>
</li>
</ul>
</div>
</li>
<li class="usa-collection__item">
<div class="pbindex__content_section_heading_container">
<h4 class="usa-collection__heading">References</h4>
</div>
<div
class="usa-collection__body pbindex__content_section_contentp_container"
>
<p>
Susanne Vernim, Harald Bauer, Erwin Rauch, et al. 2022. A
value sensitive design approach for designing AI-based
worker assistance systems in manufacturing. Procedia
Comput. Sci. 200, C (2022), 505516.
<a href="https://doi.org/10.1016/j.procs.2022.01.248"
>URL</a
>
</p>
<p>
Harini Suresh and John V. Guttag. 2020. A Framework for
Understanding Sources of Harm throughout the Machine
Learning Life Cycle. arXiv:1901.10002. Retrieved from
<a href="https://arxiv.org/abs/1901.10002">URL</a>
</p>
<p>
Margarita Boyarskaya, Alexandra Olteanu, and Kate
Crawford. 2020. Overcoming Failures of Imagination in AI
Infused System Development and Deployment.
arXiv:2011.13416.
<a href="https://arxiv.org/abs/2011.13416">URL</a>
</p>
<p>
Konstantinia Charitoudi and Andrew Blyth. A
Socio-Technical Approach to Cyber Risk Management and
Impact Assessment. Journal of Information Security 4, 1
(2013), 33-41.
<a href="http://dx.doi.org/10.4236/jis.2013.41005">URL</a>
</p>
<p>
Raji, I.D., Smart, A., White, R.N., Mitchell, M., Gebru,
T., Hutchinson, B., Smith-Loud, J., Theron, D., &amp;
Barnes, P. (2020). Closing the AI accountability gap:
defining an end-to-end framework for internal algorithmic
auditing. Proceedings of the 2020 Conference on Fairness,
Accountability, and Transparency.
</p>
<p>
Emanuel Moss, Elizabeth Anne Watkins, Ranjit Singh,
Madeleine Clare Elish, &amp; Jacob Metcalf. 2021.
Assemlbing Accountability: Algorithmic Impact Assessment
for the Public Interest. Data &amp; Society. Accessed
7/14/2022 at
<a
href="https://datasociety.net/library/assembling-accountability-algorithmic-impact-assessment-for-the-public-interest/"
>URL</a
>
</p>
<p>
Shari Trewin (2018). AI Fairness for People with
Disabilities: Point of View. ArXiv, abs/1811.10670.
<a href="https://arxiv.org/pdf/1811.10670.pdf">URL</a>
</p>
<p>
Ada Lovelace Institute. 2022. Algorithmic Impact
Assessment: A Case Study in Healthcare. Accessed July 14,
2022.
<a
href="https://www.adalovelaceinstitute.org/report/algorithmic-impact-assessment-case-study-healthcare/"
>URL</a
>
</p>
<p>
Microsoft Responsible AI Impact Assessment Template. 2022.
Accessed July 14, 2022.
<a
href="https://blogs.microsoft.com/wp-content/uploads/prod/sites/5/2022/06/Microsoft-RAI-Impact-Assessment-Template.pdf"
>URL</a
>
</p>
<p>
Microsoft Responsible AI Impact Assessment Guide. 2022.
Accessed July 14, 2022.
<a
href="https://blogs.microsoft.com/wp-content/uploads/prod/sites/5/2022/06/Microsoft-RAI-Impact-Assessment-Guide.pdf"
>URL</a
>
</p>
<p>
Microsoft Responsible AI Standard, v2.
<a
href="https://query.prod.cms.rt.microsoft.com/cms/api/am/binary/RE4ZPmV"
>URL</a
>
</p>
<p>
Microsoft Research AI Fairness Checklist.
<a
href="https://www.microsoft.com/en-us/research/project/ai-fairness-checklist/"
>URL</a
>
</p>
<p>
PEAT AI &amp; Disability Inclusion Toolkit Risks of Bias
and Discrimination in AI Hiring Tools.
<a
href="https://www.peatworks.org/ai-disability-inclusion-toolkit/risks-of-bias-and-discrimination-in-ai-hiring-tools/"
>URL</a
>
</p>
</div>
</li>
</ul>
</div>
</div>
</li>
</ul>
</li>
</ul>
</div>
</div>
</main>
</div>
</div>
</div>
<footer class="nist-footer padding-bottom-4">
<div class="grid-container nist-footer__info">
<div class="grid-row">
<div class="tablet:grid-col-6 padding-left-0">
<div class="nist-footer__logo">
<a href="https://www.nist.gov/"
title="National Institute of Standards and Technology"
class="nist-footer__logo-link"
rel="home">
<img src="/img/nist_logo_brand_white.svg"
role="img"
alt="National Institute of Standards and Technology logo"
width="300px"
height="42px" />
</a>
</div>
<div class="nist-footer__contact">
<h3 class="nist-footer__contact-heading">HEADQUARTERS</h3>
<address>
100 Bureau Drive
<br>
Gaithersburg, MD 20899
<br>
<a href="tel:301-975-2000">301-975-2000</a>
</address>
<p>
<a href="mailto:do-webmaster@nist.gov">Webmaster</a> | <a href="https://www.nist.gov/about-nist/contact-us">Contact Us</a> | <a href="https://www.nist.gov/visit">Our Other Offices</a>
</p>
</div>
</div>
<div class="tablet:grid-col-6">
<div class="nist-footer__social-links grid-row">
<a class="nist-social nist-social--x-white ext"
href=" https://x.com/NIST"
data-extlink="">
<span>X</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/x.svg"
alt="X" />
</a>
<a class="nist-social nist-social--facebook-white ext"
href=" https://www.facebook.com/NIST"
data-extlink="">
<span>Facebook</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/facebook.svg"
alt="Facebook" />
</a>
<a class="nist-social nist-social--linkedin-white ext"
href=" https://www.linkedin.com/company/nist"
data-extlink="">
<span>LinkedIn</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/linkedin.svg"
alt="LinkedIn" />
</a>
<a class="nist-social nist-social--instagram-white ext"
href=" https://www.instagram.com/nist/"
data-extlink="">
<span>Instagram</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/instagram.svg"
alt="Instagram" />
</a>
<a class="nist-social nist-social--youtube-white ext"
href=" https://www.youtube.com/NIST"
data-extlink="">
<span>YouTube</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/youtube.svg"
alt="YouTube" />
</a>
<a class="nist-social nist-social--rss-white"
href=" https://www.nist.gov/news-events/nist-rss-feeds">
<span>RSS Feed</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/rss_feed.svg"
alt="RSS" />
</a>
<a class="nist-social nist-social--envelope-white ext"
href=" https://public.govdelivery.com/accounts/USNIST/subscriber/new"
data-extlink="">
<span>Mailing List</span>
<img class="nist-social-footer-icon filter-white"
src="/img/usa-icons/mail.svg"
alt="Subscribe to Mailing List" />
</a>
</div>
<br>
<div class="nist-footer__feedback grid-row">
How are we doing? <a class="margin-left-2 usa-button"
rel="nofollow"
href="https://www.nist.gov/form/nist-gov-feedback?destination=/national-institute-standards-and-technology"
title="Provide feedback">Feedback</a>
</div>
</div>
</div>
</div>
<div class="nist-footer__inner">
<div class="nist-footer__menu" role="navigation">
<ul>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/privacy-policy">Site Privacy</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/oism/accessibility">Accessibility</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/privacy">Privacy Program</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/oism/copyrights">Copyrights</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.commerce.gov/vulnerability-disclosure-policy">Vulnerability Disclosure</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/no-fear-act-policy">No Fear Act Policy</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/foia">FOIA</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.nist.gov/environmental-policy-statement">Environmental Policy</a>
</li>
<li class="nist-footer__menu-item ">
<a href="https://www.nist.gov/summary-report-scientific-integrity">Scientific Integrity</a>
</li>
<li class="nist-footer__menu-item ">
<a href="https://www.nist.gov/nist-information-quality-standards">Information Quality Standards</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.commerce.gov/">Commerce.gov</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.science.gov/">Science.gov</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://www.usa.gov/">USA.gov</a>
</li>
<li class="nist-footer__menu-item">
<a href="https://vote.gov/">Vote.gov</a>
</li>
</ul>
</div>
</div>
</footer>
</div>
<script nonce="VGGJL6qR72rqyokScTQSXA=="
defer
src="/dist/uswds.min.fce4bb2752d4.js"></script>
<script nonce="VGGJL6qR72rqyokScTQSXA=="
type="text/javascript"
src="/dist/application.a74e10c27c89.js"
defer></script>
<script nonce="VGGJL6qR72rqyokScTQSXA==" src="https://www.googletagmanager.com/gtag/js?id=G-3CZ2H64ZSV" async></script>
<script nonce="VGGJL6qR72rqyokScTQSXA==" type="application/javascript" defer>window.dataLayer = window.dataLayer || [];
function gtag() {
dataLayer.push(arguments);
}
gtag('js', new Date());
gtag('config', 'G-3CZ2H64ZSV');</script>
<noscript>
<style nonce="VGGJL6qR72rqyokScTQSXA==" >
iframe {
display: none;
visibility: hidden;
}
</style>
<iframe src="https://www.googletagmanager.com/ns.html?id=G-3CZ2H64ZSV"
height="0"
width="0"
style="display:none;
visibility:hidden"></iframe>
</noscript>
</body>
</html>