Merge branch 'main' into 1024-hud-housing-reformulation

This commit is contained in:
Saran Ahluwalia 2021-12-13 05:50:39 -05:00 committed by GitHub
commit 0be4899895
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
89 changed files with 4169 additions and 10598 deletions

View file

@ -4,8 +4,3 @@ Feature: All links on the dataset cards should be functional
Given I am on the "Methodology" page
When I look for the "Datasets used in methodology" CTA
Then All links under "Datasets used in methodology" should work
Scenario: If I click on any link in the additional indicators dataset, they should work
Given I am on the "Methodology" page
When I look for the "Additional Indicators" CTA
Then All links under "Additional Indicators" should work

View file

@ -0,0 +1,8 @@
@use '../../styles/design-system.scss' as *;
.categoriesContainer {
display: flex;
justify-content: space-between;
flex-wrap: wrap;
@include u-margin-top(4);
}

View file

@ -0,0 +1,12 @@
declare namespace CategoriesNamespace {
export interface ICategoriesScss {
categoriesContainer: string;
}
}
declare const CategoriesScssModule: CategoriesNamespace.ICategoriesScss & {
/** WARNING: Only available when `css-loader` is used without `style-loader` or `mini-css-extract-plugin` */
locals: CategoriesNamespace.ICategoriesScss;
};
export = CategoriesScssModule;

View file

@ -0,0 +1,16 @@
import * as React from 'react';
import {render} from '@testing-library/react';
import {LocalizedComponent} from '../../test/testHelpers';
import Categories from './Categories';
describe('rendering of the Categories', () => {
const {asFragment} = render(
<LocalizedComponent>
<Categories />
</LocalizedComponent>,
);
it('checks if component renders', () => {
expect(asFragment()).toMatchSnapshot();
});
});

View file

@ -0,0 +1,43 @@
import React from 'react';
import {Grid} from '@trussworks/react-uswds';
import IndicatorCategory from '../CategoryCard';
import J40MainGridContainer from '../J40MainGridContainer';
import * as METHODOLOGY_COPY from '../../data/copy/methodology';
import * as styles from './Categories.module.scss';
const categories = [
METHODOLOGY_COPY.CATEGORIES.CLIMATE_CHANGE,
METHODOLOGY_COPY.CATEGORIES.CLEAN_ENERGY,
METHODOLOGY_COPY.CATEGORIES.CLEAN_TRANSPORT,
METHODOLOGY_COPY.CATEGORIES.AFFORDABLE_HOUSING,
METHODOLOGY_COPY.CATEGORIES.LEGACY_POLLUTION,
METHODOLOGY_COPY.CATEGORIES.CLEAN_WATER,
METHODOLOGY_COPY.CATEGORIES.HEALTH_BURDENS,
METHODOLOGY_COPY.CATEGORIES.WORKFORCE_DEV,
];
const Categories = () => {
return (
<>
<J40MainGridContainer className={styles.categoriesContainer}>
<Grid row>
<Grid col={12}>
<h2>{METHODOLOGY_COPY.CATEGORY.HEADING}</h2>
</Grid>
</Grid>
</J40MainGridContainer>
<J40MainGridContainer className={styles.categoriesContainer}>
{
categories.map((category, index) => <IndicatorCategory key={index} categoryInfo={category} />)
}
</J40MainGridContainer>
</>
);
};
export default Categories;

View file

@ -0,0 +1,403 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`rendering of the Categories checks if component renders 1`] = `
<DocumentFragment>
<div
class="grid-container-desktop-lg"
data-testid="gridContainer"
>
<div
class="grid-row"
data-testid="grid"
>
<div
class="grid-col-12"
data-testid="grid"
>
<h2>
Categories
</h2>
</div>
</div>
</div>
<div
class="grid-container-desktop-lg"
data-testid="gridContainer"
>
<div>
<h3>
Climate change
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#exp-agr-loss-rate"
>
expected agriculture loss rate
</a>
OR
<a
href="#exp-bld-loss-rate"
>
expected building loss rate
</a>
OR
<a
href="#exp-pop-loss-rate"
>
expected population loss rate
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Clean energy and energy efficiency
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#energy-burden"
>
energy cost burden score
</a>
OR
<a
href="#pm-25"
>
PM2.5
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Clean transportation
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#diesel-pm"
>
diesel particulate matter
</a>
or
<a
href="#traffic-vol"
>
traffic proximity and volume
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Affordable and sustainable housing
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#lead-paint"
>
lead paint
</a>
AND
<a
href="#median-home"
>
the median home value
</a>
is less than
90th percentile OR at or above the 90th percentile for the
<a
href="#house-burden"
>
housing cost burden
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Reduction and remediation of legacy pollution
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#prox-haz"
>
proximity to hazardous waste facilities
</a>
OR
<a
href="#prox-npl"
>
proximity to NLP sites
</a>
OR
<a
href="#prox-rmp"
>
proximity to RMP sites
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Critical clean water and waste infrastructure
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#waste-water"
>
wastewater discharge
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Health burdens
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#asthma"
>
asthma
</a>
OR
<a
href="#diabetes"
>
diabetes
</a>
OR
<a
href="#heart-disease"
>
heart disease
</a>
OR
<a
href="#life-exp"
>
low life expectancy
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
<div>
<h3>
Training and workforce development
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#low-med-inc"
>
low median income relative to area median income
</a>
OR
at or above the 90th percentile for
<a
href="#ling-iso"
>
linguistic isolation
</a>
OR
<a
href="#unemploy"
>
unemployment
</a>
OR
for percentage individuals in households at or below 100% federal
<a
href="#poverty"
>
poverty
</a>
level at or above 90%
</p>
<p>
<strong>
AND
</strong>
where
<a
href="#high-school"
>
the high school degree achievement rates
</a>
for adults 25 years and older is less than 90%
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
</div>
</DocumentFragment>
`;

View file

@ -0,0 +1,3 @@
import Categories from './Categories';
export default Categories;

View file

@ -0,0 +1,15 @@
@use '../../styles/design-system.scss' as *;
@mixin baseCard {
@include u-padding-top(0);
@include u-padding-right(6);
@include u-padding-bottom(6);
@include u-padding-left(6);
@include u-margin-bottom(6);
max-width: 34rem;
}
.categoryCard {
@include baseCard;
@include u-bg('blue-cool-5');
}

View file

@ -0,0 +1,12 @@
declare namespace IndicatorCategoryNamespace {
export interface IIndicatorCategoryScss {
categoryCard: string;
}
}
declare const IndicatorCategoryScssModule: IndicatorCategoryNamespace.IIndicatorCategoryScss & {
/** WARNING: Only available when `css-loader` is used without `style-loader` or `mini-css-extract-plugin` */
locals: IndicatorCategoryNamespace.IIndicatorCategoryScss;
};
export = IndicatorCategoryScssModule;

View file

@ -0,0 +1,18 @@
import * as React from 'react';
import {render} from '@testing-library/react';
import {LocalizedComponent} from '../../test/testHelpers';
import CategoryCard from './CategoryCard';
import * as METHODOLOGY_COPY from '../../data/copy/methodology';
describe('rendering of the CategoryCard', () => {
const {asFragment} = render(
<LocalizedComponent>
<CategoryCard categoryInfo={METHODOLOGY_COPY.CATEGORIES.CLIMATE_CHANGE}/>
</LocalizedComponent>,
);
it('checks if component renders', () => {
expect(asFragment()).toMatchSnapshot();
});
});

View file

@ -0,0 +1,32 @@
import React from 'react';
import * as styles from './CategoryCard.module.scss';
interface ICategoryInterface {
categoryInfo: {
TITLE: JSX.Element,
IF: JSX.Element,
AND: JSX.Element,
THEN: JSX.Element
}
}
const IndicatorCategory = ({categoryInfo}: ICategoryInterface) => {
return (
<div className={styles.categoryCard}>
<h3>
{categoryInfo.TITLE}
</h3>
<p>
{categoryInfo.IF}
</p>
<p>
{categoryInfo.AND}
</p>
<p>
{categoryInfo.THEN}
</p>
</div>
);
};
export default IndicatorCategory;

View file

@ -0,0 +1,53 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`rendering of the CategoryCard checks if component renders 1`] = `
<DocumentFragment>
<div>
<h3>
Climate change
</h3>
<p>
<strong>
IF
</strong>
at or above 90th percentile for
<a
href="#exp-agr-loss-rate"
>
expected agriculture loss rate
</a>
OR
<a
href="#exp-bld-loss-rate"
>
expected building loss rate
</a>
OR
<a
href="#exp-pop-loss-rate"
>
expected population loss rate
</a>
</p>
<p>
<strong>
AND
</strong>
is low income
<sup>
*
</sup>
</p>
<p>
<strong>
THEN
</strong>
the community is disadvantaged.
</p>
</div>
</DocumentFragment>
`;

View file

@ -0,0 +1,3 @@
import CategoryCard from './CategoryCard';
export default CategoryCard;

View file

@ -1,3 +1,4 @@
@use '../../styles/design-system.scss' as *;
@import '../utils.scss';
@mixin baseCard {
@ -8,7 +9,7 @@
.datasetCard {
@include baseCard;
background-color: white;
@include u-bg("gray-3");
}
.datasetCardAdditional {

View file

@ -5,15 +5,16 @@ import * as styles from './datasetCard.module.scss';
import * as METHODOLOGY_COPY from '../../data/copy/methodology';
interface IDatasetCardProps {
datasetCardProps: { [key:string]: string }
additionalIndicator: boolean
datasetCardProps: {
[key:string]: string
}
}
const DatasetCard = ({datasetCardProps, additionalIndicator}:IDatasetCardProps) => {
const DatasetCard = ({datasetCardProps}:IDatasetCardProps) => {
const intl = useIntl();
return (
<div className={additionalIndicator ? styles.datasetCardAdditional : styles.datasetCard}>
<div className={styles.datasetCard} id={datasetCardProps.domID}>
<h3 className={styles.datasetCardIndicator}>{datasetCardProps.indicator}</h3>
<div className={styles.datasetCardDescription}>
{datasetCardProps.description}
@ -22,23 +23,23 @@ const DatasetCard = ({datasetCardProps, additionalIndicator}:IDatasetCardProps)
<ul className={styles.datasetCardList}>
<li className={styles.datasetCardListItem}>
<span className={styles.datasetCardLabels}>
{intl.formatMessage(METHODOLOGY_COPY.DATASET_CARD_LABELS.SOURCE)}
{intl.formatMessage(METHODOLOGY_COPY.DATASET_CARD_LABELS.RESP_PARTY)}
</span>
<a href={datasetCardProps.dataSourceURL} target={'_blank'} rel="noreferrer">
{datasetCardProps.dataSourceLabel}
{datasetCardProps.respPartyLabel}
</a>
</li>
<li className={styles.datasetCardListItem}>
<span className={styles.datasetCardLabels}>
{intl.formatMessage(METHODOLOGY_COPY.DATASET_CARD_LABELS.RESOLUTION)}
</span>
{datasetCardProps.dataResolution}
</li>
<li className={styles.datasetCardListItem}>
<span className={styles.datasetCardLabels}>
{intl.formatMessage(METHODOLOGY_COPY.DATASET_CARD_LABELS.DATE_RANGE)}
</span>
{datasetCardProps.dataDateRange}
{datasetCardProps.dateRange}
</li>
<li className={styles.datasetCardListItem}>
<span className={styles.datasetCardLabels}>
{intl.formatMessage(METHODOLOGY_COPY.DATASET_CARD_LABELS.USED_IN)}
</span>
{datasetCardProps.usedIn}
</li>
</ul>
</div>

View file

@ -1,41 +1,43 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`rendering of addtional indicator dataset card checks if component renders 1`] = `<DocumentFragment />`;
exports[`rendering of indicator dataset card checks if component renders 1`] = `
<DocumentFragment>
<div>
<div
id="low-income"
>
<h3>
Area Median Income
Low Income
</h3>
<div>
Median income of the census block group calculated as a percent
of the metropolitan areas or state's median income.
Percent of a block group's population in households where household income is at or below
200% of the federal poverty level.
</div>
<ul>
<li>
<span>
Data source:
Responsible Party:
</span>
<a
href="https://www.census.gov/programs-surveys/acs"
rel="noreferrer"
target="_blank"
>
Census's American Community Survey
Census's American Community Survey.
</a>
</li>
<li>
<span>
Data resolution:
Date range:
</span>
Census block group
2015-2019
</li>
<li>
<span>
Data date range:
Used in:
</span>
2015-2019
All methodologies except for training and workforce development
</li>
</ul>
</div>

View file

@ -8,19 +8,7 @@ import * as METHODOLOGY_COPY from '../../../data/copy/methodology';
describe('rendering of indicator dataset card', () => {
const {asFragment} = render(
<LocalizedComponent>
<DatasetCard key={0} datasetCardProps={METHODOLOGY_COPY.INDICATORS[0]} additionalIndicator={false}/>
</LocalizedComponent>,
);
it('checks if component renders', () => {
expect(asFragment()).toMatchSnapshot();
});
});
describe('rendering of addtional indicator dataset card', () => {
const {asFragment} = render(
<LocalizedComponent>
<DatasetCard key={0} datasetCardProps={METHODOLOGY_COPY.ADDITIONAL_INDICATORS[0]} additionalIndicator={true}/>
<DatasetCard key={0} datasetCardProps={METHODOLOGY_COPY.INDICATORS[0]}/>
</LocalizedComponent>,
);

View file

@ -1,5 +1,14 @@
@use '../../styles/design-system.scss' as *;
.datasetCardsContainer {
@include u-margin-top(4);
display: flex;
justify-content: space-between;
flex-wrap: wrap;
}
.returnToTop {
display: flex;
justify-content: flex-end;
@include u-margin-bottom(4);
}

View file

@ -1,6 +1,7 @@
declare namespace DatasetContainerScssNamespace {
export interface IDatasetContainerScss {
datasetCardsContainer: string;
returnToTop: string;
}
}

View file

@ -1,5 +1,5 @@
import React from 'react';
import {useIntl} from 'gatsby-plugin-intl';
import {Link, useIntl} from 'gatsby-plugin-intl';
import {Grid} from '@trussworks/react-uswds';
import DatasetCard from '../DatasetCard';
@ -15,7 +15,7 @@ const DatasetContainer = () => {
return (
<>
<J40MainGridContainer fullWidth={true} blueBackground={true}>
<J40MainGridContainer fullWidth={true} blueBackground={false}>
<J40MainGridContainer
dataCy={`${hyphenizeString(METHODOLOGY_COPY.DATASETS.HEADING.defaultMessage)}-block`}>
@ -25,49 +25,26 @@ const DatasetContainer = () => {
</Grid>
</Grid>
<Grid row>
<Grid col={12} tablet={{col: 7}} className={'j40-mb-3'}>
<p>{intl.formatMessage(METHODOLOGY_COPY.DATASETS.INFO)}</p>
</Grid>
</Grid>
<div className={styles.datasetCardsContainer}>
{METHODOLOGY_COPY.INDICATORS.map((card) => <DatasetCard
key={card.indicator}
datasetCardProps={card}
additionalIndicator={false}
/>)}
</div>
</J40MainGridContainer>
</J40MainGridContainer>
<J40MainGridContainer fullWidth={true} blueBackground={false} >
<J40MainGridContainer
dataCy={`${hyphenizeString(METHODOLOGY_COPY.DATASETS.ADDITIONAL_HEADING.defaultMessage)}-block`}>
<Grid row>
<Grid col={12}>
<h2>{intl.formatMessage(METHODOLOGY_COPY.DATASETS.ADDITIONAL_HEADING)}</h2>
<div className={styles.datasetCardsContainer}>
{METHODOLOGY_COPY.INDICATORS.map((card) => <DatasetCard
key={card.indicator}
datasetCardProps={card}
/>)}
</div>
</Grid>
</Grid>
<Grid row>
<Grid col={12} tablet={{col: 7}} className={'j40-mb-3'}>
<p>{intl.formatMessage(METHODOLOGY_COPY.DATASETS.ADDITIONAL_INFO)}</p>
</Grid>
</Grid>
<div className={styles.datasetCardsContainer}>
{METHODOLOGY_COPY.ADDITIONAL_INDICATORS.map((card) => <DatasetCard
key={card.indicator}
datasetCardProps={card}
additionalIndicator={true}
/>)}
<div className={styles.returnToTop}>
<Link to={`/methodology`}>
{METHODOLOGY_COPY.RETURN_TO_TOP.LINK}
</Link>
</div>
</J40MainGridContainer>
</J40MainGridContainer>
</>
);
};

View file

@ -1,4 +1,4 @@
$primary-color: #112f4e;
@use '../../styles/design-system.scss' as *;
.downloadBoxContainer {
@ -6,7 +6,7 @@ $primary-color: #112f4e;
margin: auto;
.downloadBox {
background-color: $primary-color;
@include u-bg('blue-80v');
border-radius: 6px 6px;
.downloadBoxTextBox {
@ -15,12 +15,15 @@ $primary-color: #112f4e;
flex-direction: column;
.downloadBoxTitle {
font-weight: bold;
margin-bottom: 10px;
@include typeset('sans', 'xs', 3);
@include u-text('semibold');
@include u-margin-bottom(2);
}
.downloadBoxText {
margin-bottom: 20px;
@include typeset('sans', 'xs', 3);
@include u-margin-bottom(4);
span {
font-style: italic;
}
@ -42,8 +45,8 @@ $primary-color: #112f4e;
}
.downloadBoxButton{
background-color: white;
color: $primary-color;
@include u-bg('white');
@include u-color('blue-80v');
display: flex;
.downloadPacketText {

View file

@ -0,0 +1,20 @@
@use '../../styles/design-system.scss' as *;
.lowIncomeContainer {
border: 1px solid #DFE1E2;
@include u-margin-top(4);
@include u-padding-left(4);
@include u-padding-right(3);
@include u-padding-bottom(4);
.lowIncomeTitle {
@include typeset('sans', 'xs', 3);
@include u-text('semibold');
}
.lowIncomeText {
@include typeset('sans', 'xs', 3);
@include u-text('light');
}
};

View file

@ -0,0 +1,14 @@
declare namespace LowIncomeNamespace {
export interface ILowIncomeScss {
lowIncomeContainer: string;
lowIncomeTitle: string;
lowIncomeText: string;
}
}
declare const LowIncomeScssModule: LowIncomeNamespace.ILowIncomeScss & {
/** WARNING: Only available when `css-loader` is used without `style-loader` or `mini-css-extract-plugin` */
locals: LowIncomeNamespace.ILowIncomeScss;
};
export = LowIncomeScssModule;

View file

@ -0,0 +1,16 @@
import * as React from 'react';
import {render} from '@testing-library/react';
import {LocalizedComponent} from '../../test/testHelpers';
import LowIncome from './LowIncome';
describe('rendering of the LowIncome', () => {
const {asFragment} = render(
<LocalizedComponent>
<LowIncome />
</LocalizedComponent>,
);
it('checks if component renders', () => {
expect(asFragment()).toMatchSnapshot();
});
});

View file

@ -0,0 +1,26 @@
import React from 'react';
import {useIntl} from 'gatsby-plugin-intl';
import * as METHODOLOGY_COPY from '../../data/copy/methodology';
import * as styles from './LowIncome.module.scss';
const LowIncome = () => {
const intl = useIntl();
return (
<div className={styles.lowIncomeContainer}>
<p className={styles.lowIncomeTitle}>
<sup>*</sup>
{' '}
{intl.formatMessage(METHODOLOGY_COPY.LOW_INCOME.HEADING)}
</p>
<p className={styles.lowIncomeText}>
{intl.formatMessage(METHODOLOGY_COPY.LOW_INCOME.INFO)}
</p>
</div>
);
};
export default LowIncome;

View file

@ -0,0 +1,20 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`rendering of the LowIncome checks if component renders 1`] = `
<DocumentFragment>
<div>
<p>
<sup>
*
</sup>
Low Income
</p>
<p>
At or above 65th percentile for percent of census tract population of households where household
income is at or below 200% of the federal poverty level
</p>
</div>
</DocumentFragment>
`;

View file

@ -0,0 +1,3 @@
import LowIncome from './LowIncome';
export default LowIncome;

View file

@ -0,0 +1,12 @@
@use '../../styles/design-system.scss' as *;
.formulaContainer {
@include u-margin-top(5);
p:not(:first-child) {
font-style: italic;
span {
@include u-text('bold');
}
}
};

View file

@ -0,0 +1,12 @@
declare namespace MethodologyFormulaNamespace {
export interface IMethodologyFormulaScss {
formulaContainer: string;
}
}
declare const MethodologyFormulaScssModule: MethodologyFormulaNamespace.IMethodologyFormulaScss & {
/** WARNING: Only available when `css-loader` is used without `style-loader` or `mini-css-extract-plugin` */
locals: MethodologyFormulaNamespace.IMethodologyFormulaScss;
};
export = MethodologyFormulaScssModule;

View file

@ -0,0 +1,16 @@
import * as React from 'react';
import {render} from '@testing-library/react';
import {LocalizedComponent} from '../../test/testHelpers';
import MethodologyFormula from './MethodologyFormula';
describe('rendering of the MethodologyFormula', () => {
const {asFragment} = render(
<LocalizedComponent>
<MethodologyFormula />
</LocalizedComponent>,
);
it('checks if component renders', () => {
expect(asFragment()).toMatchSnapshot();
});
});

View file

@ -0,0 +1,35 @@
import React from 'react';
import {useIntl} from 'gatsby-plugin-intl';
import * as METHODOLOGY_COPY from '../../data/copy/methodology';
import * as styles from './MethodologyFormula.module.scss';
// The site shows the formula used in the methodology. The constants seen
// below aim to capture the 3 part of that formula. These are not
// reserved words.
const MethodologyFormula = () => {
const intl = useIntl();
return (
<section className={styles.formulaContainer}>
<p>
{intl.formatMessage(METHODOLOGY_COPY.PAGE.FORMULA_INTRO)}
</p>
<p>
{METHODOLOGY_COPY.FORMULA.IF}
</p>
<p>
{METHODOLOGY_COPY.FORMULA.AND}
</p>
<p>
{METHODOLOGY_COPY.FORMULA.THEN}
</p>
</section>
);
};
export default MethodologyFormula;

View file

@ -0,0 +1,31 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`rendering of the MethodologyFormula checks if component renders 1`] = `
<DocumentFragment>
<section>
<p>
Under the current formula, a census tract will be considered disadvantaged:
</p>
<p>
<span>
IF
</span>
it is above the threshold for one or more climate or environmental indicator
</p>
<p>
<span>
AND
</span>
it is above the threshold for one or more socioeconomic indicator
</p>
<p>
<span>
THEN
</span>
the community is considered disadvantaged.
</p>
</section>
</DocumentFragment>
`;

View file

@ -0,0 +1,3 @@
import MethodologyFormula from './MethodologyFormula';
export default MethodologyFormula;

View file

@ -1,125 +0,0 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`rendering of the component should match the snapshot of the MapIntroduction component 1`] = `
<DocumentFragment>
<h2>
Methodology
</h2>
<div
class="grid-row"
data-testid="grid"
>
<div
class="grid-col-7"
data-testid="grid"
>
<p>
The methodology for identifying communities of focus is calculated at the census block group level. Census block geographical boundaries are determined by the U.S. Census Bureau once every ten years. This tool utilizes the census block boundaries from 2010.
</p>
<p>
The following describes the process for identifying communities of focus.
</p>
</div>
</div>
<ol
class="usa-process-list"
>
<li
class="usa-process-list__item"
>
<h3
class="usa-process-list__heading"
data-testid="processListHeading"
>
Gather datasets
</h3>
<p>
</p>
<p
class="flush"
>
The methodology includes the following inputs that are equally weighted.
</p>
<h4>
Percent of Area Median Income
</h4>
<p
class="flush"
>
If a census block group is in a metropolitan area, this value is the median income of the census block group calculated as a percent of the metropolitan areas median income.
</p>
<p>
If a census block group is not in a metropolitan area, this value is the median income of the census block group calculated as a percent of the states median income.
</p>
<h4>
Percent of households below or at 100% of the federal poverty line
</h4>
This is the percent of households in a state with a household income
below or at 100% of the
<a
href="https://www.census.gov/topics/income-poverty/poverty/guidance/poverty-measures.html"
rel="noreferrer"
target="_blank"
>
federal poverty line
</a>
. This federal poverty line is calculated
based on the composition of each household (e.g., based on household size), but it does not vary geographically.
<h4>
The high school degree achievement rate for adults 25 years and older
</h4>
<p
class="flush"
>
The percent of individuals who are 25 or older who have received a high school degree.
</p>
</li>
<li
class="usa-process-list__item"
>
<h3
class="usa-process-list__heading"
data-testid="processListHeading"
>
Determine communites of focus
</h3>
<p>
</p>
<p
class="flush"
>
Under the existing formula, a census block group will be considered a community of focus if:
</p>
<p>
(The median income is less than 80% of the area median income
</p>
<p
class="flush"
>
OR
</p>
<p
class="flush"
>
households living in poverty (at or below 100% of the federal poverty level) is greater than 20%)
</p>
<p
class="flush"
>
AND
</p>
<p
class="flush"
>
The high school degree achievement rate for adults 25 years and older is greater than 95%
</p>
</li>
</ol>
</DocumentFragment>
`;

View file

@ -1,28 +0,0 @@
import * as React from 'react';
import {render} from '@testing-library/react';
import ScoreStepsList from './scoreStepsList';
import {LocalizedComponent} from '../test/testHelpers';
// TODO: Move this to a location that will detect on all tests
// See ticket: #550
beforeAll(() => {
jest.spyOn(global.console, 'error').mockImplementation((...params) => {
console.error(params);
});
});
describe('rendering of the component', () => {
const {asFragment} = render(
<LocalizedComponent>
<ScoreStepsList/>
</LocalizedComponent>,
);
it('should match the snapshot of the MapIntroduction component', () => {
expect(asFragment()).toMatchSnapshot();
});
it('No console errors', () => {
expect(console.error).toBeCalledTimes(0);
});
});

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -2,11 +2,14 @@ import * as React from 'react';
import {Grid} from '@trussworks/react-uswds';
import {useIntl} from 'gatsby-plugin-intl';
import Categories from '../components/Categories';
import DatasetContainer from '../components/DatasetContainer';
import DownloadPacket from '../components/DownloadPacket';
import J40MainGridContainer from '../components/J40MainGridContainer';
import MethodologyFormula from '../components/MethodologyFormula';
import Layout from '../components/layout';
import ScoreStepsList from '../components/scoreStepsList';
import LowIncome from '../components/LowIncome';
// import ScoreStepsList from '../components/scoreStepsList';
import * as METHODOLOGY_COPY from '../data/copy/methodology';
@ -14,7 +17,6 @@ interface MethodPageProps {
location: Location;
}
// markup
const IndexPage = ({location}: MethodPageProps) => {
const intl = useIntl();
@ -25,29 +27,44 @@ const IndexPage = ({location}: MethodPageProps) => {
<h1>{intl.formatMessage(METHODOLOGY_COPY.PAGE.HEADING)}</h1>
{/* First column */}
<Grid row gap className={'j40-mb-5'}>
<Grid col={12} tablet={{col: 6}}>
<Grid col={12} tablet={{col: 8}}>
<section>
<p>
{intl.formatMessage(METHODOLOGY_COPY.PAGE.DESCRIPTION)}
</p>
</section>
{/* Formula section */}
<MethodologyFormula />
{/* Category description */}
<section className={`j40-mt-7`}>
<p>
{intl.formatMessage(METHODOLOGY_COPY.PAGE.CATEGORY_TEXT)}
</p>
</section>
</Grid>
<Grid col={12} tablet={{col: 6}}>
{/* Second column */}
<Grid col={12} tablet={{col: 4}}>
<DownloadPacket />
<LowIncome />
</Grid>
</Grid>
</J40MainGridContainer>
<Categories />
<DatasetContainer/>
<J40MainGridContainer>
{/* <J40MainGridContainer>
<Grid row>
<Grid col>
<ScoreStepsList/>
</Grid>
</Grid>
</J40MainGridContainer>
</J40MainGridContainer> */}
</Layout>
);
};

View file

@ -81,14 +81,19 @@ p.flush {
@include j40-element('sm', 4, 'normal', 0);
}
// 24 pixel margin-bottom
.j40-mb-3 {
@include u-margin-bottom(3)
}
// 40 pixel margin-bottom
.j40-mb-5 {
@include u-margin-bottom(5)
}
// 24 pixel margin-bottom
.j40-mb-3 {
@include u-margin-bottom(3)
// 56 pixel margin-top
.j40-mt-7 {
@include u-margin-top(7)
}
.j40-footer-ceq-font {

View file

@ -319,13 +319,13 @@ Or use `false` for unneeded weights.
----------------------------------------
*/
$theme-font-weight-thin: false;
$theme-font-weight-thin: 100;
$theme-font-weight-light: 300;
$theme-font-weight-normal: 400;
$theme-font-weight-medium: false;
$theme-font-weight-semibold: false;
$theme-font-weight-medium: 500;
$theme-font-weight-semibold: 600;
$theme-font-weight-bold: 700;
$theme-font-weight-heavy: false;
$theme-font-weight-heavy: 800;
// If USWDS is generating your @font-face rules,
// should we generate all available weights

View file

@ -309,7 +309,7 @@ If you update the score in any way, it is necessary to create new pickles so tha
It starts with the `data_pipeline/etl/score/tests/sample_data/score_data_initial.csv`, which is the first two rows of the `score/full/usa.csv`.
To update this file, run a full score generation and then update the file as follows:
To update this file, run a full score generation, then open a Python shell from the `data-pipeline` directory (e.g. `poetry run python3`), and then update the file with the following commands:
```
import pickle
from pathlib import Path
@ -322,6 +322,8 @@ score_initial_df = pd.read_csv(score_csv_path, dtype={"GEOID10_TRACT": "string"}
score_initial_df.to_csv(data_path / "data_pipeline" / "etl" / "score" / "tests" / "sample_data" /"score_data_initial.csv", index=False)
```
Now you can move on to updating inidvidual pickles for the tests. Note that it is helpful to do them in this order:
We have four pickle files that correspond to expected files:
- `score_data_expected.pkl`: Initial score without counties
- `score_transformed_expected.pkl`: Intermediate score with `etl._extract_score` and `etl. _transform_score` applied. There's no file for this intermediate process, so we need to capture the pickle mid-process.

View file

@ -81,7 +81,7 @@ TILES_SCORE_COLUMNS = [
field_names.DIABETES_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.ASTHMA_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.HEART_DISEASE_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.LIFE_EXPECTANCY_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.LOW_LIFE_EXPECTANCY_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.TRAFFIC_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.FEMA_RISK_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.ENERGY_BURDEN_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
@ -89,7 +89,7 @@ TILES_SCORE_COLUMNS = [
field_names.LEAD_PAINT_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.DIESEL_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.PM25_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
field_names.LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX,
field_names.POVERTY_LESS_THAN_200_FPL_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX,
@ -115,7 +115,7 @@ TILES_SCORE_FLOAT_COLUMNS = [
field_names.DIABETES_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.ASTHMA_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.HEART_DISEASE_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.LIFE_EXPECTANCY_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.LOW_LIFE_EXPECTANCY_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.TRAFFIC_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.FEMA_RISK_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.ENERGY_BURDEN_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
@ -123,7 +123,7 @@ TILES_SCORE_FLOAT_COLUMNS = [
field_names.LEAD_PAINT_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.DIESEL_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.PM25_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
field_names.LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX,
field_names.POVERTY_LESS_THAN_200_FPL_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX,
@ -137,7 +137,6 @@ DOWNLOADABLE_SCORE_INDICATOR_COLUMNS_BASIC = [
field_names.DIABETES_FIELD,
field_names.ASTHMA_FIELD,
field_names.HEART_DISEASE_FIELD,
field_names.LIFE_EXPECTANCY_FIELD,
field_names.TRAFFIC_FIELD,
field_names.FEMA_RISK_FIELD,
field_names.ENERGY_BURDEN_FIELD,
@ -149,11 +148,11 @@ DOWNLOADABLE_SCORE_INDICATOR_COLUMNS_BASIC = [
field_names.TOTAL_POP_FIELD,
]
# For every indicator above, we want to include percentile and min-max normalized variants also
# For every indicator above, we want to include percentile also.
DOWNLOADABLE_SCORE_INDICATOR_COLUMNS_FULL = list(
pd.core.common.flatten(
[
[p, f"{p} (percentile)"]
[p, f"{p}{field_names.PERCENTILE_FIELD_SUFFIX}"]
for p in DOWNLOADABLE_SCORE_INDICATOR_COLUMNS_BASIC
]
)
@ -165,8 +164,15 @@ DOWNLOADABLE_SCORE_COLUMNS = [
field_names.COUNTY_FIELD,
field_names.STATE_FIELD,
field_names.SCORE_G_COMMUNITIES,
# Note: the reverse percentile fields get moved down here because
# we put the raw value in the download along with the *reversed* percentile.
# All other fields we put in f"{field_name}" and
# f"{field_name}{field_names.PERCENTILE_FIELD_SUFFIX}", which doesn't work for the
# reversed percentile fields.
field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD,
field_names.MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD
field_names.LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX,
field_names.LIFE_EXPECTANCY_FIELD,
field_names.LOW_LIFE_EXPECTANCY_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
*DOWNLOADABLE_SCORE_INDICATOR_COLUMNS_FULL,
]

View file

@ -404,9 +404,7 @@ class ScoreETL(ExtractTransformLoad):
field_names.POVERTY_LESS_THAN_150_FPL_FIELD,
field_names.POVERTY_LESS_THAN_200_FPL_FIELD,
field_names.AMI_FIELD,
field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD,
field_names.MEDIAN_INCOME_FIELD,
field_names.LIFE_EXPECTANCY_FIELD,
field_names.ENERGY_BURDEN_FIELD,
field_names.FEMA_RISK_FIELD,
field_names.URBAN_HEURISTIC_FIELD,
@ -439,7 +437,6 @@ class ScoreETL(ExtractTransformLoad):
field_names.CENSUS_UNEMPLOYMENT_FIELD_2010,
field_names.CENSUS_POVERTY_LESS_THAN_100_FPL_FIELD_2010,
field_names.CENSUS_DECENNIAL_TOTAL_POPULATION_FIELD_2009,
field_names.CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009,
field_names.EXTREME_HEAT_FIELD,
field_names.HEALTHY_FOOD_FIELD,
field_names.IMPENETRABLE_SURFACES_FIELD,
@ -468,7 +465,19 @@ class ScoreETL(ExtractTransformLoad):
ReversePercentile(
field_name=field_names.READING_FIELD,
low_field_name=field_names.LOW_READING_FIELD,
)
),
ReversePercentile(
field_name=field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD,
low_field_name=field_names.LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD,
),
ReversePercentile(
field_name=field_names.LIFE_EXPECTANCY_FIELD,
low_field_name=field_names.LOW_LIFE_EXPECTANCY_FIELD,
),
ReversePercentile(
field_name=field_names.CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009,
low_field_name=field_names.LOW_CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009,
),
]
columns_to_keep = (
@ -505,10 +514,6 @@ class ScoreETL(ExtractTransformLoad):
max_value = df_copy[numeric_column].max(skipna=True)
logger.info(
f"For data set {numeric_column}, the min value is {min_value} and the max value is {max_value}."
)
df_copy[f"{numeric_column}{field_names.MIN_MAX_FIELD_SUFFIX}"] = (
df_copy[numeric_column] - min_value
) / (max_value - min_value)

File diff suppressed because one or more lines are too long

View file

@ -8,6 +8,7 @@
"outputs": [],
"source": [
"import IPython\n",
"import os\n",
"import pandas as pd\n",
"import os, sys, pathlib\n",
"\n",
@ -29,12 +30,8 @@
"outputs": [],
"source": [
"# Load\n",
"path_to_score_file_1 = (\n",
" DATA_DIR / \"score\" / \"csv\" / \"full\" / \"usa1.csv\"\n",
")\n",
"path_to_score_file_2 = (\n",
" DATA_DIR / \"score\" / \"csv\" / \"full\" / \"usa2.csv\"\n",
")\n",
"path_to_score_file_1 = DATA_DIR / \"compare_two_score_csvs/usa (pre 970).csv\"\n",
"path_to_score_file_2 = DATA_DIR / \"compare_two_score_csvs/usa (post 970).csv\"\n",
"\n",
"score_1_df = pd.read_csv(\n",
" path_to_score_file_1,\n",
@ -56,7 +53,7 @@
"metadata": {},
"outputs": [],
"source": [
"# List columns in one but not the other \n",
"# List columns in one but not the other\n",
"score_2_df.columns.difference(score_1_df.columns)"
]
},
@ -69,11 +66,16 @@
"source": [
"# List rows in one but not the other\n",
"\n",
"if len(score_2_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME]) != len(score_1_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME]):\n",
"if len(score_2_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME]) != len(\n",
" score_1_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME]\n",
"):\n",
" print(\"Different lengths!\")\n",
"\n",
"print(\"Difference in tract IDs:\")\n",
"print(set(score_2_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME]) ^ set(score_1_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME]))\n"
"print(\n",
" set(score_2_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME])\n",
" ^ set(score_1_df[ExtractTransformLoad.GEOID_TRACT_FIELD_NAME])\n",
")"
]
},
{
@ -83,8 +85,13 @@
"metadata": {},
"outputs": [],
"source": [
"# Join \n",
"merged_df = score_1_df.merge(score_2_df, how=\"outer\", on=ExtractTransformLoad.GEOID_TRACT_FIELD_NAME, suffixes=('_1', '_2'))\n",
"# Join\n",
"merged_df = score_1_df.merge(\n",
" score_2_df,\n",
" how=\"outer\",\n",
" on=ExtractTransformLoad.GEOID_TRACT_FIELD_NAME,\n",
" suffixes=(\"_1\", \"_2\"),\n",
")\n",
"merged_df"
]
},
@ -95,14 +102,32 @@
"metadata": {},
"outputs": [],
"source": [
"# Check each duplicate column: \n",
"# Check each duplicate column:\n",
"# Remove the suffix \"_1\"\n",
"duplicate_columns = [x[:-2] for x in merged_df.columns if \"_1\" in x]\n",
"\n",
"for duplicate_column in duplicate_columns:\n",
" print(f\"Checking duplicate column {duplicate_column}\")\n",
" if not merged_df[f\"{duplicate_column}_1\"].equals(merged_df[f\"{duplicate_column}_2\"]):\n",
" print(merged_df[f\"{duplicate_column}_1\"].compare(merged_df[f\"{duplicate_column}_2\"]))\n",
" raise ValueError(f\"Error! Different values in {duplicate_column}\")"
"columns_to_exclude_from_duplicates_check = [\n",
" \"Total threshold criteria exceeded\"\n",
"]\n",
"\n",
"columns_to_check = [column for column in duplicate_columns if column not in columns_to_exclude_from_duplicates_check]\n",
"\n",
"any_errors_found = False\n",
"for column_to_check in columns_to_check:\n",
" print(f\"Checking duplicate column {column_to_check}\")\n",
" if not merged_df[f\"{column_to_check}_1\"].equals(\n",
" merged_df[f\"{column_to_check}_2\"]\n",
" ):\n",
" print(f\"Error! Different values in {column_to_check}\")\n",
" print(\n",
" merged_df[f\"{column_to_check}_1\"].compare(\n",
" merged_df[f\"{column_to_check}_2\"]\n",
" )\n",
" )\n",
" any_errors_found = True\n",
"\n",
"if any_errors_found:\n",
" raise ValueError(f\"Error! Different values in one or more columns.\")"
]
}
],

View file

@ -60,11 +60,15 @@ MEDIAN_INCOME_FIELD = "Median household income in the past 12 months"
MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD = (
"Median household income (% of state median household income)"
)
MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD = "Median household income (% of AMI)"
PERSISTENT_POVERTY_FIELD = "Persistent Poverty Census Tract"
AMI_FIELD = "Area Median Income (State or metropolitan)"
COLLEGE_ATTENDANCE_FIELD = "Percent enrollment in college or graduate school"
MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD = (
"Median household income as a percent of area median income"
)
LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD = (
"Low median household income as a percent of area median income"
)
# Climate
FEMA_RISK_FIELD = "FEMA Risk Index Expected Annual Loss Score"
@ -105,7 +109,6 @@ ENERGY_BURDEN_FIELD = "Energy burden"
DIABETES_FIELD = "Diagnosed diabetes among adults aged >=18 years"
ASTHMA_FIELD = "Current asthma among adults aged >=18 years"
HEART_DISEASE_FIELD = "Coronary heart disease among adults aged >=18 years"
LIFE_EXPECTANCY_FIELD = "Life expectancy (years)"
CANCER_FIELD = "Cancer (excluding skin cancer) among adults aged >=18 years"
HEALTH_INSURANCE_FIELD = (
"Current lack of health insurance among adults aged 18-64 years"
@ -113,6 +116,8 @@ HEALTH_INSURANCE_FIELD = (
PHYS_HEALTH_NOT_GOOD_FIELD = (
"Physical health not good for >=14 days among adults aged >=18 years"
)
LIFE_EXPECTANCY_FIELD = "Life expectancy (years)"
LOW_LIFE_EXPECTANCY_FIELD = "Low life expectancy"
# Other Demographics
TOTAL_POP_FIELD = "Total population"
@ -130,9 +135,6 @@ OVER_64_FIELD = "Individuals over 64 years old"
# Fields from 2010 decennial census (generally only loaded for the territories)
CENSUS_DECENNIAL_MEDIAN_INCOME_2009 = "Median household income in 2009 ($)"
CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009 = (
"Median household income as a percent of territory median income in 2009"
)
CENSUS_DECENNIAL_POVERTY_LESS_THAN_100_FPL_FIELD_2009 = (
"Percentage households below 100% of federal poverty line in 2009"
)
@ -141,7 +143,10 @@ CENSUS_DECENNIAL_UNEMPLOYMENT_FIELD_2009 = (
"Unemployed civilians (percent) in 2009"
)
CENSUS_DECENNIAL_TOTAL_POPULATION_FIELD_2009 = "Total population in 2009"
CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009 = (
"Median household income as a percent of territory median income in 2009"
)
LOW_CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009 = "Low median household income as a percent of territory median income in 2009"
# Fields from 2010 ACS (loaded for comparison with the territories)
CENSUS_UNEMPLOYMENT_FIELD_2010 = "Unemployed civilians (percent) in 2010"
CENSUS_POVERTY_LESS_THAN_100_FPL_FIELD_2010 = (
@ -225,16 +230,12 @@ IMPENETRABLE_SURFACES_FIELD = "Percent impenetrable surface areas"
READING_FIELD = "Third grade reading proficiency"
LOW_READING_FIELD = "Low third grade reading proficiency"
#####
# Names for individual factors being exceeded
# Climate Change
EXPECTED_POPULATION_LOSS_RATE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for expected population loss rate and is low income"
EXPECTED_AGRICULTURE_LOSS_RATE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for expected agriculture loss rate and is low income"
EXPECTED_BUILDING_LOSS_RATE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for expected building loss rate and is low income"
EXTREME_HEAT_MEDIAN_HOUSE_VALUE_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for summer days above 90F and "
f"the median house value is less than {MEDIAN_HOUSE_VALUE_PERCENTILE}th "
f"percentile and is low income"
)
# Clean energy and efficiency
PM25_EXPOSURE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for PM2.5 exposure and is low income"
@ -252,17 +253,10 @@ LEAD_PAINT_MEDIAN_HOUSE_VALUE_LOW_INCOME_FIELD = (
)
HOUSING_BURDEN_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for housing burden and is low income"
IMPENETRABLE_SURFACES_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for impenetrable surfaces and is low "
f"income"
)
# Remediation and Reduction of Legacy Pollution
RMP_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for proximity to RMP sites and is low income"
SUPERFUND_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for proximity to superfund sites and is low income"
HAZARDOUS_WASTE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for proximity to hazardous waste facilities and is low income"
AIR_TOXICS_CANCER_RISK_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for air toxics cancer risk and is low income"
RESPIRATORY_HAZARD_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for respiratory hazard index and is low income"
# Critical Clean Water and Waste Infrastructure
WASTEWATER_DISCHARGE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for wastewater discharge and is low income"
@ -276,11 +270,9 @@ ASTHMA_LOW_INCOME_FIELD = (
)
HEART_DISEASE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for heart disease and is low income"
LIFE_EXPECTANCY_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for life expectancy and is low income"
HEALTHY_FOOD_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for low "
f"access to healthy food and is low income"
LOW_LIFE_EXPECTANCY_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile "
f"for low life expectancy and is low income"
)
# Workforce
@ -304,11 +296,30 @@ LOW_READING_LOW_HS_EDUCATION_FIELD = (
" and has low HS education"
)
MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD = (
f"At or below the {PERCENTILE}th percentile for median income"
" and has low HS education"
LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD = (
f"At or below the {PERCENTILE}th percentile for low median household income as a "
f"percent of area median income and has low HS education"
)
# Not currently used in a factor
EXTREME_HEAT_MEDIAN_HOUSE_VALUE_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for summer days above 90F and "
f"the median house value is less than {MEDIAN_HOUSE_VALUE_PERCENTILE}th "
f"percentile and is low income"
)
IMPENETRABLE_SURFACES_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for impenetrable surfaces and is low "
f"income"
)
AIR_TOXICS_CANCER_RISK_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for air toxics cancer risk and is low income"
RESPIRATORY_HAZARD_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for respiratory hazard index and is low income"
HEALTHY_FOOD_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for low "
f"access to healthy food and is low income"
)
THRESHOLD_COUNT = "Total threshold criteria exceeded"
FPL_200_SERIES = "Is low income"
# End of names for individual factors being exceeded
####

View file

@ -44,6 +44,8 @@ class ScoreL(Score):
robustness over 1-year ACS.
"""
# Create the combined field.
# TODO: move this combined field percentile calculation to `etl_score`,
# since most other percentile logic is there.
# There should only be one entry in either 2009 or 2019 fields, not one in both.
# But just to be safe, we take the mean and ignore null values so if there
# *were* entries in both, this result would make sense.
@ -169,7 +171,7 @@ class ScoreL(Score):
def _climate_factor(self) -> bool:
# In Xth percentile or above for FEMAs Risk Index (Source: FEMA
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -225,7 +227,7 @@ class ScoreL(Score):
def _energy_factor(self) -> bool:
# In Xth percentile or above for DOEs energy cost burden score (Source: LEAD Score)
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -268,7 +270,7 @@ class ScoreL(Score):
# or
# In Xth percentile or above traffic proximity and volume (Source: 2017 U.S. Department of Transportation (DOT) traffic data
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -315,7 +317,7 @@ class ScoreL(Score):
# or
# In Xth percentile or above for housing cost burden (Source: HUD's Comprehensive Housing Affordability Strategy dataset
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -363,7 +365,7 @@ class ScoreL(Score):
def _pollution_factor(self) -> bool:
# Proximity to Risk Management Plan sites is > X
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -410,7 +412,7 @@ class ScoreL(Score):
def _water_factor(self) -> bool:
# In Xth percentile or above for wastewater discharge (Source: EPA Risk-Screening Environmental Indicators (RSEI) Model)
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -441,7 +443,7 @@ class ScoreL(Score):
# or
# In Xth percentile or above for low life expectancy (Source: CDC Places)
# AND
# Low income: In 60th percentile or above for percent of block group population
# Low income: In Nth percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
@ -449,8 +451,7 @@ class ScoreL(Score):
field_names.DIABETES_LOW_INCOME_FIELD,
field_names.ASTHMA_LOW_INCOME_FIELD,
field_names.HEART_DISEASE_LOW_INCOME_FIELD,
field_names.HEALTHY_FOOD_LOW_INCOME_FIELD,
field_names.LIFE_EXPECTANCY_LOW_INCOME_FIELD,
field_names.LOW_LIFE_EXPECTANCY_LOW_INCOME_FIELD,
]
diabetes_threshold = (
@ -475,24 +476,14 @@ class ScoreL(Score):
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
healthy_food_threshold = (
low_life_expectancy_threshold = (
self.df[
field_names.HEALTHY_FOOD_FIELD
field_names.LOW_LIFE_EXPECTANCY_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX
]
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
life_expectancy_threshold = (
self.df[
field_names.LIFE_EXPECTANCY_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX
]
# Note: a high life expectancy is good, so take 1 minus the threshold to invert it,
# and then look for life expenctancies lower than that (not greater than).
<= 1 - self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
self.df[field_names.DIABETES_LOW_INCOME_FIELD] = (
diabetes_threshold & self.df[field_names.FPL_200_SERIES]
)
@ -502,11 +493,8 @@ class ScoreL(Score):
self.df[field_names.HEART_DISEASE_LOW_INCOME_FIELD] = (
heart_disease_threshold & self.df[field_names.FPL_200_SERIES]
)
self.df[field_names.LIFE_EXPECTANCY_LOW_INCOME_FIELD] = (
life_expectancy_threshold & self.df[field_names.FPL_200_SERIES]
)
self.df[field_names.HEALTHY_FOOD_LOW_INCOME_FIELD] = (
healthy_food_threshold & self.df[field_names.FPL_200_SERIES]
self.df[field_names.LOW_LIFE_EXPECTANCY_LOW_INCOME_FIELD] = (
low_life_expectancy_threshold & self.df[field_names.FPL_200_SERIES]
)
self._increment_total_eligibility_exceeded(health_eligibility_columns)
@ -514,23 +502,25 @@ class ScoreL(Score):
return self.df[health_eligibility_columns].any(axis="columns")
def _workforce_factor(self) -> bool:
# Where unemployment is above X%
# Where unemployment is above Xth percentile
# or
# Where median income is less than Y% of the area median income
# Where median income as a percent of area median income is above Xth percentile
# or
# Where the percent of households at or below 100% of the federal poverty level is greater than Z%
# Where the percent of households at or below 100% of the federal poverty level
# is above Xth percentile
# or
# Where linguistic isolation is greater than Y%
# Where linguistic isolation is above Xth percentile
# AND
# Where the high school degree achievement rates for adults 25 years and older is less than 95%
# (necessary to screen out university block groups)
# Where the high school degree achievement rates for adults 25 years and older
# is less than Y%
# (necessary to screen out university tracts)
# Workforce criteria for states fields.
workforce_eligibility_columns = [
field_names.UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD,
field_names.POVERTY_LOW_HS_EDUCATION_FIELD,
field_names.LINGUISTIC_ISOLATION_LOW_HS_EDUCATION_FIELD,
field_names.MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD,
field_names.LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD,
]
high_scool_achievement_rate_threshold = (
@ -546,14 +536,12 @@ class ScoreL(Score):
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
median_income_threshold = (
low_median_income_threshold = (
self.df[
field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
field_names.LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX
]
# Note: a high median income as a % of AMI is good, so take 1 minus the threshold to invert it.
# and then look for median income lower than that (not greater than).
<= 1 - self.ENVIRONMENTAL_BURDEN_THRESHOLD
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
linguistic_isolation_threshold = (
@ -581,8 +569,8 @@ class ScoreL(Score):
poverty_threshold & high_scool_achievement_rate_threshold
)
self.df[field_names.MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD] = (
median_income_threshold & high_scool_achievement_rate_threshold
self.df[field_names.LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD] = (
low_median_income_threshold & high_scool_achievement_rate_threshold
)
self.df[field_names.UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD] = (
@ -624,23 +612,31 @@ class ScoreL(Score):
threshold_cutoff_for_island_areas=self.ENVIRONMENTAL_BURDEN_THRESHOLD,
)
# Also check whether low area median income is 90th percentile or higher
# within the islands.
low_median_income_as_a_percent_of_ami_island_areas_criteria_field_name = (
f"{field_names.LOW_CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009} exceeds "
f"{field_names.PERCENTILE}th percentile"
)
self.df[
low_median_income_as_a_percent_of_ami_island_areas_criteria_field_name
] = (
self.df[
field_names.LOW_CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009
+ field_names.PERCENTILE_FIELD_SUFFIX
]
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
workforce_combined_criteria_for_island_areas = (
self.df[unemployment_island_areas_criteria_field_name]
| self.df[poverty_island_areas_criteria_field_name]
# Also check whether area median income is 10th percentile or lower
# within the islands.
| (
self.df[
field_names.CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009
+ field_names.PERCENTILE_FIELD_SUFFIX
]
# Note: a high median income as a % of AMI is good, so take 1 minus the threshold to invert it.
# and then look for median income lower than that (not greater than).
< 1 - self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| self.df[
low_median_income_as_a_percent_of_ami_island_areas_criteria_field_name
]
) & (
self.df[field_names.CENSUS_DECENNIAL_HIGH_SCHOOL_ED_FIELD_2009]
> self.LACK_OF_HIGH_SCHOOL_MINIMUM_THRESHOLD
>= self.LACK_OF_HIGH_SCHOOL_MINIMUM_THRESHOLD
)
percent_of_island_tracts_highlighted = (

View file

@ -1,153 +0,0 @@
# Overview
This document describes our "data roadmap", which serves several purposes.
# Data roadmap goals
The goals of the data roadmap are as follows:
- Tracking data sets being considered for inclusion in the Climate and Economic Justice Screening Tool (CEJST), either as a data set that is included in the cumulative impacts score or a reference data set that is not included in the score
- Prioritizing data sets, so that it's obvious to developers working on the CEJST which data sets to incorporate next into the tool
- Gathering important details about each data set, such as its geographic resolution and the year it was last updated, so that the CEJST team can make informed decisions about what data to prioritize
- Tracking the problem areas that each data set relates to (e.g., a certain data set may relate to the problem of pesticide exposure amongst migrant farm workers)
- Enabling members of the public to submit ideas for problem areas or data sets to be considered for inclusion in the CEJST, with easy-to-use and accessible tools
- Enabling members of the public to submit revisions to the information about each problem area or data set, with easy-to-use and accessible tools
- Enabling the CEJST development team to review suggestions before incorporating them officially into the data roadmap, to filter out potential noise and spam, or consider how requests may lead to changes in software features and documentation
# User stories
These goals can map onto several user stories for the data roadmap, such as:
- As a community member, I want to suggest a new idea for a dataset.
- As a community member, I want to understand what happened with my suggestion for a new dataset.
- As a community member, I want to edit the details of a dataset proposal to add more information.
- As a WHEJAC board member, I want to vote on what data sources should be prioritized next.
- As a product manager, I want to filter based on characteristics of the data.
- As a developer, I want to know what to work on next.
# Data set descriptions
There are lots of details that are important to track for each data set. This
information helps us prepare to integrate a data set into the tool and prioritize
between different options for data in the data roadmap.
In order to support a process of peer review on edits and updates, these details are
tracked in one `YAML` file per data set description in the directory
[data_roadmap/data_set_descriptions](data_roadmap/data_set_descriptions).
Each data set description includes a number of fields, some of which are required.
The schema defining these fields is written in [Yamale](https://github.com/23andMe/Yamale)
and lives at [data_roadmap/data_set_description_schema.yaml](data_roadmap/data_set_description_schema.yaml).
Because `Yamale` does not provide a method for describing fields, we've created an
additional file that includes written descriptions of the meaning of each field in
the schema. These live in [data_roadmap/data_set_description_field_descriptions.yaml](data_roadmap/data_set_description_field_descriptions.yaml).
In order to provide a helpful starting point for people who are ready to contribute
ideas for a new data set for consideration, there is an auto-generated data set
description template that lives at [data_roadmap/data_set_description_template.yaml](data_roadmap/data_set_description_template.yaml).
# Steps to add a new data set description: the "easy" way
Soon we will create a Google Form that contributors can use to submit ideas for new
data sets. The Google Form will match the schema of the data set descriptions. Please
see [this ticket](https://app.zenhub.com/workspaces/justice40-60993f6e05473d0010ec44e3/issues/usds/justice40-tool/39)
for tracking this work.
# Steps to add a new data set description: the git-savvy way
For those who are comfortable using `git` and `Markdown`, these are the steps to
contribute a new data set description to the data roadmap:
1. Research and learn about the data set you're proposing for consideration.
2. Clone the repository and learn about the [contribution guidelines for this
project](../docs/CONTRIBUTING.md).
3. In your local version of the repository, copy the template from
`data_roadmap/data_set_description_template.yaml` into a new file that lives in
`data_roadmap/data_set_descriptions` and has the name of the data set as the name of the file.
4. Edit this file to ensure it has all of the appropriate details about the data set.
5. If you'd like, you can run the validations in `run_validations_and_write_template`
to ensure your contribution is valid according to the schema. These checks will also
run automatically on each commit.
6. Create a pull request with your new data set description and submit it for peer
review.
Thank you for contributing!
# Tooling proposal and milestones
There is no single tool that supports all the goals and user stories described above.
Therefore we've proposed combining a number of tools in a way that can support them all.
We've also proposed various "milestones" that will allow us to iteratively and
sequentially build the data roadmap in a way that supports the entire vision but
starts with small and achievable steps. These milestones are proposed in order.
This work is most accurately tracked in [this epic](https://app.zenhub.com/workspaces/justice40-60993f6e05473d0010ec44e3/issues/usds/justice40-tool/38).
We've also verbally described them below.
## Milestone: YAML files for data sets and linter (Done)
To start, we'll create a folder in this repository that can
house YAML files, one per data set. Each file will describe the characteristics of the data.
The benefit of using a YAML file for this is that it's easy to subject changes to these files to peer review through the pull request process. This allows external collaborators from the open source community to submit suggested changes, which can be reviewed by the core CEJST team.
We'll use a Python-based script to load all the files in the directory, and then run a schema validator to ensure all the files have valid entries.
For schema validation, we propose using [Yamale](https://github.com/23andMe/Yamale). This provides a lightweight schema and validator, and [integrates nicely with GitHub actions](https://github.com/nrkno/yaml-schema-validator-github-action).
If there's an improper format in any of the files, the schema validator will throw an error.
As part of this milestone, we will also set this up to run automatically with each commit to any branch as part of CI/CD.
## Milestone: Google forms integration
To make it easy for non-engineer members of the public and advisory bodies such as the WHEJAC to submit suggestions for data sets, we will configure a Google Form that maps to the schema of the data set files.
This will enable members of the public to fill out a simple form suggesting data without needing to understand Github or other engineering concepts.
At first, these responses can just go into a resulting Google Sheet and be manually copied and converted into data set description files. Later, we can write a script that converts new entries in the Google Sheet automatically into data set files. This can be setup to run as a trigger on the addition of new rows to the Google Sheet.
## Milestone: Post data in tabular format
Add a script that runs the schema validator on all files and, if successful, posts the results in a tabular format. There are straightforward packages to post a Python dictionary / `pandas` dataframe to Google Sheets and/or Airtable. As part of this milestone, we will also set this up to run automatically with each commit to `main` as part of CI/CD.
This will make it easier to filter the data to answer questions like, "which data sources are available at the census block group level".
## Milestone: Tickets created for incorporating data sets
For each data set that is being considered for inclusion soon in the tool, the project management team will create a ticket for "Incorporating \_\_\_ data set into the database", with a link to the data set detail document. This ticket will be created in the ticket tracking system used by the open source repository, which is ZenHub. This project management system will be public.
At the initial launch, we are not planning for members of the open source community to be able to create tickets, but we would like to consider a process for members of the open source community creating tickets that can go through review by the CEJST team.
This will help developers know what to work on next, and open source community members can also pick up tickets and work to integrate the data sets.
## Milestone: Add problem areas
We'll need to somehow track "problem areas" that describe problems in climate, environmental, and economic justice, even without specific proposals of data sets. For instance, a problem area may be "food insecurity", and a number of data sets can have this as their problem area.
We can change the linter to validate that every data set description maps to one or more known problem areas.
The benefit of this is that some non-data-focused members of the public or the WHEJAC advisory body may want to suggest we prioritize certain problem areas, with or without ideas for specific data sets that may best address that problem area.
It is not clear at this time the best path forward for implementing these problem area descriptions. One option is to create a folder for descriptions of problem areas, which contains YAML files that get validated according to a schema. Another option would be simply to add these as an array in the description of data sets, or add labels to the tickets once data sets are tracked in GitHub tickets.
## Milestone: Add prioritzation voting for WHEJAC and members of the public
This milestone is currently the least well-defined. It's important that members of advisory bodies like the WHEJAC and members of the public be able to "upvote" certain data sets for inclusion in the tool.
One potential for this is to use the [Stanford Participatory Budgeting Platform](https://pbstanford.org/). Here's an [example of voting on proposals within a limited budget](https://pbstanford.org/nyc8/knapsack).
For instance, going into a quarterly planning cycle, the CEJST development team could estimate the amount of time (in developer-weeks) that it would take to clean, analyze, and incorporate each potential data set. For instance, incorporating some already-cleaned census data may take 1 week of a developer's time, while incorporating new asthma data from CMS that's never been publicly released could take 5 weeks. Given a "budget" of the number of developer weeks available (e.g., 2 developers for 13 weeks, or 26 developer-weeks), advisors can vote on their top priorities for inclusion in the tool within the available "budget".

View file

@ -1,39 +0,0 @@
# There is no method for adding field descriptions to `yamale` schemas.
# Therefore, we've created a dictionary here of fields and their descriptions.
name: A short name of the data set.
source: The URL pointing towards the data set itself or more information about the
data set.
relevance_to_environmental_justice: It's useful to spell out why this data is
relevant to EJ issues and/or can be used to identify EJ communities.
spatial_resolution: Dev team needs to know if the resolution is granular enough to be useful
public_status: Whether a dataset has already gone through public release process
(like Census data) or may need a lengthy review process (like Medicaid data).
sponsor: Whether there's a federal agency or non-governmental agency that is working
to provide and maintain this data.
subjective_rating_of_data_quality: Sometimes we don't have statistics on data
quality, but we know it is likely to be accurate or not. How much has it been
vetted by an agency; is this the de facto data set for the topic?
estimated_margin_of_error: Estimated margin of error on measurement, if known. Often
more narrow geographic measures have a higher margin of error due to a smaller sample
for each measurement.
known_data_quality_issues: It can be helpful to write out known problems.
geographic_coverage_percent: We want to think about data that is comprehensive across
America.
geographic_coverage_description: A verbal description of geographic coverage.
data_formats: Developers need to know what formats the data is available in
last_updated_date: When was the data last updated / refreshed? (In format YYYY-MM-DD.
If exact date is not known, use YYYY-01-01.)
frequency_of_updates: How often is this data updated? Is it updated on a reliable
cadence?
documentation: Link to docs. Also, is the documentation good enough? Can we get the
info we need?
data_can_go_in_cloud: Some datasets can not legally go in the cloud
discussion: Review of other topics, such as
peer review (Overview or links out to peer review done on this dataset),
where and how data is available (e.g., Geoplatform.gov? Is it available from multiple
sources?),
risk assessment of the data (e.g. a vendor-processed version of the dataset might not
be open or good enough),
legal considerations (Legal disclaimers, assumption of risk, proprietary?),
accreditation (Is this source accredited?)

View file

@ -1,24 +0,0 @@
# `yamale` schema for descriptions of data sets.
name: str(required=True)
source: str(required=True)
relevance_to_environmental_justice: str(required=False)
data_formats: enum('GeoJSON', 'Esri Shapefile (SHP, DBF, SHX)', 'GML', 'KML/KMZ',
'GPX', 'CSV/XLSX', 'GDB', 'MBTILES', 'LAS', required=True)
spatial_resolution: enum('State/territory', 'County', 'Zip code', 'Census tract',
'Census block group', 'Exact address or lat/long', 'Other', required=True)
public_status: enum('Not Released', 'Public', 'Public for certain audiences', 'Other',
required=True)
sponsor: str(required=True)
subjective_rating_of_data_quality: enum('Low Quality', 'Medium Quality', 'High
Quality', required=False)
estimated_margin_of_error: num(required=False)
known_data_quality_issues: str(required=False)
geographic_coverage_percent: num(required=False)
geographic_coverage_description: str(required=False)
last_updated_date: day(min='2001-01-01', max='2100-01-01', required=True)
frequency_of_updates: enum('Less than annually', 'Approximately annually',
'Once very 1-6 months',
'Daily or more frequently than daily', 'Unknown', required=True)
documentation: str(required=False)
data_can_go_in_cloud: bool(required=False)
discussion: str(required=False)

View file

@ -1,94 +0,0 @@
# Note: This template is automatically generated by the function
# `write_data_set_description_template_file` from the schema
# and field descriptions files. Do not manually edit this file.
name:
# Description: A short name of the data set.
# Required field: True
# Field type: str
source:
# Description: The URL pointing towards the data set itself or more information about the data set.
# Required field: True
# Field type: str
relevance_to_environmental_justice:
# Description: It's useful to spell out why this data is relevant to EJ issues and/or can be used to identify EJ communities.
# Required field: False
# Field type: str
data_formats:
# Description: Developers need to know what formats the data is available in
# Required field: True
# Field type: enum
# Valid choices are one of the following: ('GeoJSON', 'Esri Shapefile (SHP, DBF, SHX)', 'GML', 'KML/KMZ', 'GPX', 'CSV/XLSX', 'GDB', 'MBTILES', 'LAS')
spatial_resolution:
# Description: Dev team needs to know if the resolution is granular enough to be useful
# Required field: True
# Field type: enum
# Valid choices are one of the following: ('State/territory', 'County', 'Zip code', 'Census tract', 'Census block group', 'Exact address or lat/long', 'Other')
public_status:
# Description: Whether a dataset has already gone through public release process (like Census data) or may need a lengthy review process (like Medicaid data).
# Required field: True
# Field type: enum
# Valid choices are one of the following: ('Not Released', 'Public', 'Public for certain audiences', 'Other')
sponsor:
# Description: Whether there's a federal agency or non-governmental agency that is working to provide and maintain this data.
# Required field: True
# Field type: str
subjective_rating_of_data_quality:
# Description: Sometimes we don't have statistics on data quality, but we know it is likely to be accurate or not. How much has it been vetted by an agency; is this the de facto data set for the topic?
# Required field: False
# Field type: enum
# Valid choices are one of the following: ('Low Quality', 'Medium Quality', 'High Quality')
estimated_margin_of_error:
# Description: Estimated margin of error on measurement, if known. Often more narrow geographic measures have a higher margin of error due to a smaller sample for each measurement.
# Required field: False
# Field type: num
known_data_quality_issues:
# Description: It can be helpful to write out known problems.
# Required field: False
# Field type: str
geographic_coverage_percent:
# Description: We want to think about data that is comprehensive across America.
# Required field: False
# Field type: num
geographic_coverage_description:
# Description: A verbal description of geographic coverage.
# Required field: False
# Field type: str
last_updated_date:
# Description: When was the data last updated / refreshed? (In format YYYY-MM-DD. If exact date is not known, use YYYY-01-01.)
# Required field: True
# Field type: day
frequency_of_updates:
# Description: How often is this data updated? Is it updated on a reliable cadence?
# Required field: True
# Field type: enum
# Valid choices are one of the following: ('Less than annually', 'Approximately annually', 'Once very 1-6 months', 'Daily or more frequently than daily', 'Unknown')
documentation:
# Description: Link to docs. Also, is the documentation good enough? Can we get the info we need?
# Required field: False
# Field type: str
data_can_go_in_cloud:
# Description: Some datasets can not legally go in the cloud
# Required field: False
# Field type: bool
discussion:
# Description: Review of other topics, such as peer review (Overview or links out to peer review done on this dataset), where and how data is available (e.g., Geoplatform.gov? Is it available from multiple sources?), risk assessment of the data (e.g. a vendor-processed version of the dataset might not be open or good enough), legal considerations (Legal disclaimers, assumption of risk, proprietary?), accreditation (Is this source accredited?)
# Required field: False
# Field type: str

View file

@ -1,35 +0,0 @@
name: Particulate Matter 2.5
source: https://gaftp.epa.gov/EJSCREEN/
relevance_to_environmental_justice: Particulate matter has a lot of adverse impacts
on health.
data_formats: CSV/XLSX
spatial_resolution: Census block group
public_status: Public
sponsor: EPA
subjective_rating_of_data_quality: Medium Quality
estimated_margin_of_error:
known_data_quality_issues: Many PM 2.5 stations are known to be pretty far apart, so
averaging them can lead to data quality loss.
geographic_coverage_percent:
geographic_coverage_description:
last_updated_date: 2017-01-01
frequency_of_updates: Less than annually
documentation: https://www.epa.gov/sites/production/files/2015-05/documents/ejscreen_technical_document_20150505.pdf#page=13
data_can_go_in_cloud: True
discussion:

View file

@ -1 +0,0 @@
yamale==3.0.8

View file

@ -1,21 +0,0 @@
"""Setup script for `data_roadmap` package."""
import os
from setuptools import find_packages
from setuptools import setup
# TODO: replace this with `poetry`. https://github.com/usds/justice40-tool/issues/57
_PACKAGE_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_PACKAGE_DIRECTORY, "requirements.txt")) as f:
requirements = f.readlines()
setup(
name="data_roadmap",
description="Data roadmap package",
author="CEJST Development Team",
author_email="justice40open@usds.gov",
install_requires=requirements,
include_package_data=True,
packages=find_packages(),
)

View file

@ -1,151 +0,0 @@
import importlib_resources
import pathlib
import yamale
import yaml
# Set directories.
DATA_ROADMAP_DIRECTORY = importlib_resources.files("data_roadmap")
UTILS_DIRECTORY = DATA_ROADMAP_DIRECTORY / "utils"
DATA_SET_DESCRIPTIONS_DIRECTORY = DATA_ROADMAP_DIRECTORY / "data_set_descriptions"
# Set file paths.
DATA_SET_DESCRIPTION_SCHEMA_FILE_PATH = (
DATA_ROADMAP_DIRECTORY / "data_set_description_schema.yaml"
)
DATA_SET_DESCRIPTION_FIELD_DESCRIPTIONS_FILE_PATH = (
DATA_ROADMAP_DIRECTORY / "data_set_description_field_descriptions.yaml"
)
DATA_SET_DESCRIPTION_TEMPLATE_FILE_PATH = (
DATA_ROADMAP_DIRECTORY / "data_set_description_template.yaml"
)
def load_data_set_description_schema(
file_path: pathlib.PosixPath = DATA_SET_DESCRIPTION_SCHEMA_FILE_PATH,
) -> yamale.schema.schema.Schema:
"""Load from file the data set description schema."""
schema = yamale.make_schema(path=file_path)
return schema
def load_data_set_description_field_descriptions(
file_path: pathlib.PosixPath = DATA_SET_DESCRIPTION_FIELD_DESCRIPTIONS_FILE_PATH,
) -> dict:
"""Load from file the descriptions of fields in the data set description."""
# Load field descriptions.
with open(file_path, "r") as stream:
data_set_description_field_descriptions = yaml.safe_load(stream=stream)
return data_set_description_field_descriptions
def validate_descriptions_for_schema(
schema: yamale.schema.schema.Schema,
field_descriptions: dict,
) -> None:
"""Validate descriptions for schema.
Checks that every field in the `yamale` schema also has a field
description in the `field_descriptions` dict.
"""
for field_name in schema.dict.keys():
if field_name not in field_descriptions:
raise ValueError(
f"Field `{field_name}` does not have a "
f"description. Please add one to file `{DATA_SET_DESCRIPTION_FIELD_DESCRIPTIONS_FILE_PATH}`"
)
for field_name in field_descriptions.keys():
if field_name not in schema.dict.keys():
raise ValueError(
f"Field `{field_name}` has a description but is not in the " f"schema."
)
def validate_all_data_set_descriptions(
data_set_description_schema: yamale.schema.schema.Schema,
) -> None:
"""Validate data set descriptions.
Validate each file in the `data_set_descriptions` directory the schema
against the provided schema.
"""
data_set_description_file_paths_generator = DATA_SET_DESCRIPTIONS_DIRECTORY.glob(
"*.yaml"
)
# Validate each file
for file_path in data_set_description_file_paths_generator:
print(f"Validating {file_path}...")
# Create a yamale Data object
data_set_description = yamale.make_data(file_path)
# TODO: explore collecting all errors and raising them at once. - Lucas
yamale.validate(schema=data_set_description_schema, data=data_set_description)
def write_data_set_description_template_file(
data_set_description_schema: yamale.schema.schema.Schema,
data_set_description_field_descriptions: dict,
template_file_path: str = DATA_SET_DESCRIPTION_TEMPLATE_FILE_PATH,
) -> None:
"""Write an example data set description with helpful comments."""
template_file_lines = []
# Write comments at the top of the template
template_file_lines.append(
"# Note: This template is automatically generated by the function\n"
"# `write_data_set_description_template_file` from the schema\n"
"# and field descriptions files. Do not manually edit this file.\n\n"
)
schema_dict = data_set_description_schema.dict
for field_name, field_schema in schema_dict.items():
template_file_lines.append(f"{field_name}: \n")
template_file_lines.append(
f"# Description: {data_set_description_field_descriptions[field_name]}\n"
)
template_file_lines.append(f"# Required field: {field_schema.is_required}\n")
template_file_lines.append(f"# Field type: {field_schema.get_name()}\n")
if type(field_schema) is yamale.validators.validators.Enum:
template_file_lines.append(
f"# Valid choices are one of the following: {field_schema.enums}\n"
)
# Add an empty linebreak to separate fields.
template_file_lines.append("\n")
with open(template_file_path, "w") as file:
file.writelines(template_file_lines)
def run_validations_and_write_template() -> None:
"""Run validations of schema and descriptions, and write a template file."""
# Load the schema and a separate dictionary
data_set_description_schema = load_data_set_description_schema()
data_set_description_field_descriptions = (
load_data_set_description_field_descriptions()
)
validate_descriptions_for_schema(
schema=data_set_description_schema,
field_descriptions=data_set_description_field_descriptions,
)
# Validate all data set descriptions in the directory against schema.
validate_all_data_set_descriptions(
data_set_description_schema=data_set_description_schema
)
# Write an example template for data set descriptions.
write_data_set_description_template_file(
data_set_description_schema=data_set_description_schema,
data_set_description_field_descriptions=data_set_description_field_descriptions,
)
if __name__ == "__main__":
run_validations_and_write_template()

View file

@ -1,248 +0,0 @@
import unittest
from unittest import mock
import yamale
from data_roadmap.utils.utils_data_set_description_schema import (
load_data_set_description_schema,
load_data_set_description_field_descriptions,
validate_descriptions_for_schema,
validate_all_data_set_descriptions,
write_data_set_description_template_file,
)
class UtilsDataSetDescriptionSchema(unittest.TestCase):
@mock.patch("yamale.make_schema")
def test_load_data_set_description_schema(self, make_schema_mock):
load_data_set_description_schema(file_path="mock.yaml")
make_schema_mock.assert_called_once_with(path="mock.yaml")
@mock.patch("yaml.safe_load")
def test_load_data_set_description_field_descriptions(self, yaml_safe_load_mock):
# Note: this isn't a great test, we could mock the actual YAML to
# make it better. - Lucas
mock_dict = {
"name": "The name of the thing.",
"age": "The age of the thing.",
"height": "The height of the thing.",
"awesome": "The awesome of the thing.",
"field": "The field of the thing.",
}
yaml_safe_load_mock.return_value = mock_dict
field_descriptions = load_data_set_description_field_descriptions()
yaml_safe_load_mock.assert_called_once()
self.assertDictEqual(field_descriptions, mock_dict)
def test_validate_descriptions_for_schema(self):
# Test when all descriptions are present.
field_descriptions = {
"name": "The name of the thing.",
"age": "The age of the thing.",
"height": "The height of the thing.",
"awesome": "The awesome of the thing.",
"field": "The field of the thing.",
}
schema = yamale.make_schema(
content="""
name: str()
age: int(max=200)
height: num()
awesome: bool()
field: enum('option 1', 'option 2')
"""
)
# Should pass.
validate_descriptions_for_schema(
schema=schema, field_descriptions=field_descriptions
)
field_descriptions_missing_one = {
"name": "The name of the thing.",
"age": "The age of the thing.",
"height": "The height of the thing.",
"awesome": "The awesome of the thing.",
}
# Should fail because of the missing field description.
with self.assertRaises(ValueError) as context_manager:
validate_descriptions_for_schema(
schema=schema, field_descriptions=field_descriptions_missing_one
)
# Using `assertIn` because the file path is returned in the error
# message, and it varies based on environment.
self.assertIn(
"Field `field` does not have a description. Please add one to file",
str(context_manager.exception),
)
field_descriptions_extra_one = {
"name": "The name of the thing.",
"age": "The age of the thing.",
"height": "The height of the thing.",
"awesome": "The awesome of the thing.",
"field": "The field of the thing.",
"extra": "Extra description.",
}
# Should fail because of the extra field description.
with self.assertRaises(ValueError) as context_manager:
validate_descriptions_for_schema(
schema=schema, field_descriptions=field_descriptions_extra_one
)
# Using `assertIn` because the file path is returned in the error
# message, and it varies based on environment.
self.assertEquals(
"Field `extra` has a description but is not in the schema.",
str(context_manager.exception),
)
def test_validate_all_data_set_descriptions(self):
# Setup a few examples of `yamale` data *before* we mock the `make_data`
# function.
valid_data = yamale.make_data(
content="""
name: Bill
age: 26
height: 6.2
awesome: True
field: option 1
"""
)
invalid_data_1 = yamale.make_data(
content="""
name: Bill
age: asdf
height: 6.2
awesome: asdf
field: option 1
"""
)
invalid_data_2 = yamale.make_data(
content="""
age: 26
height: 6.2
awesome: True
field: option 1
"""
)
# Mock `make_data`.
with mock.patch.object(
yamale, "make_data", return_value=None
) as yamale_make_data_mock:
schema = yamale.make_schema(
content="""
name: str()
age: int(max=200)
height: num()
awesome: bool()
field: enum('option 1', 'option 2')
"""
)
# Make the `make_data` method return valid data.
yamale_make_data_mock.return_value = valid_data
# Should pass.
validate_all_data_set_descriptions(data_set_description_schema=schema)
# Make some of the data invalid.
yamale_make_data_mock.return_value = invalid_data_1
# Should fail because of the invalid field values.
with self.assertRaises(yamale.YamaleError) as context_manager:
validate_all_data_set_descriptions(data_set_description_schema=schema)
self.assertEqual(
str(context_manager.exception),
"""Error validating data
age: 'asdf' is not a int.
awesome: 'asdf' is not a bool.""",
)
# Make some of the data missing.
yamale_make_data_mock.return_value = invalid_data_2
# Should fail because of the missing fields.
with self.assertRaises(yamale.YamaleError) as context_manager:
validate_all_data_set_descriptions(data_set_description_schema=schema)
self.assertEqual(
str(context_manager.exception),
"""Error validating data
name: Required field missing""",
)
@mock.patch("builtins.open", new_callable=mock.mock_open)
def test_write_data_set_description_template_file(self, builtins_writelines_mock):
schema = yamale.make_schema(
content="""
name: str()
age: int(max=200)
height: num()
awesome: bool()
field: enum('option 1', 'option 2')
"""
)
data_set_description_field_descriptions = {
"name": "The name of the thing.",
"age": "The age of the thing.",
"height": "The height of the thing.",
"awesome": "The awesome of the thing.",
"field": "The field of the thing.",
}
write_data_set_description_template_file(
data_set_description_schema=schema,
data_set_description_field_descriptions=data_set_description_field_descriptions,
template_file_path="mock_template.yaml",
)
call_to_writelines = builtins_writelines_mock.mock_calls[2][1][0]
self.assertListEqual(
call_to_writelines,
[
"# Note: This template is automatically generated by the function\n"
"# `write_data_set_description_template_file` from the schema\n"
"# and field descriptions files. Do not manually edit this file.\n\n",
"name: \n",
"# Description: The name of the thing.\n",
"# Required field: True\n",
"# Field type: str\n",
"\n",
"age: \n",
"# Description: The age of the thing.\n",
"# Required field: True\n",
"# Field type: int\n",
"\n",
"height: \n",
"# Description: The height of the thing.\n",
"# Required field: True\n",
"# Field type: num\n",
"\n",
"awesome: \n",
"# Description: The awesome of the thing.\n",
"# Required field: True\n",
"# Field type: bool\n",
"\n",
"field: \n",
"# Description: The field of the thing.\n",
"# Required field: True\n",
"# Field type: enum\n",
"# Valid choices are one of the following: ('option 1', 'option 2')\n",
"\n",
],
)

View file

@ -1,40 +0,0 @@
## create acm certificate
This only needs to be run once for the `sit` environment. stg and prd, we're assuming some other certificate arn will be used
npx serverless create-cert
you'll have to grab the arn of the certificate from the log output or go into the console to get it, looks like the plugin doesn't work any more. Set CLOUDFRONT_CERTIFICATE_ARN in sit to that value
## deploy
sls deploy --aws-profile geoplatform --stage sit --verbose
If it's the first time deploying, you'll have to create a dns entry that points to the cloudfront distribution.
## testing
The examples can be run several different ways
### local
The `package.json` file incluses several examples to run against the local source code. The actual
tasks will execute within AWS, so an `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` must be set in
the `test.env` file.
```bash
$ cd ./functions/detect-changes-for-worker
$ npm run test:gdal
```
### lambda invoke
The deployed lambda functions can be directly invoked with the `serverless invoke` function.
```bash
$ cat ./functions/detect-changes-for-worker/events/gdal.json | sls invoke -s sit -f DetectChangesForWorker
```
New event files can be created to perform one-off data processes.

View file

@ -1,11 +0,0 @@
Conditions:
ShouldOnlyCreateResourcesInSIT:
Fn::Equals:
- ${self:provider.stage}
- sit
ShouldOnlyCreateResourcesInPRD:
Fn::Equals:
- ${self:provider.stage}
- prd

View file

@ -1,37 +0,0 @@
sit:
DEPLOYMENT_BUCKET_PREFIX: ${self:custom.namespaceShort}
STACK_NAME_PREFIX: "${self:custom.namespaceShort}-"
DATA_BUCKET: ${self:custom.environment.DEPLOYMENT_BUCKET_PREFIX}-${self:provider.stage}-${self:service}-data
SHOULD_CREATE_SSL_CERTIFICATE: true
HOSTED_ZONE_ID_DOMAIN: Z104704314NAAG3GV4SN1
HOSTED_ZONE_SUBDOMAIN: ${self:provider.stage}-${self:service}
HOSTED_ZONE_DOMAIN: geoplatform.info
CLOUDFRONT_CERTIFICATE_ARN: arn:aws:acm:us-east-1:998343784597:certificate/083641d4-9df6-4f89-b79d-6697f428f5b9
GDAL_TASK_DEFINITION_NAME: ${self:provider.stage}-${self:service}-gdal
GDAL_CONTAINER_DEFINITION_NAME: ${self:provider.stage}-${self:service}-osgeo-gdal
TIPPECANOE_TASK_DEFINITION_NAME: ${self:provider.stage}-${self:service}-tippecanoe
TIPPECANOE_CONTAINER_DEFINITION_NAME: ${self:provider.stage}-${self:service}-mgiddens-tippecanoe
stg:
DEPLOYMENT_BUCKET_PREFIX: ${self:custom.namespaceShort}
STACK_NAME_PREFIX: "${self:custom.namespaceShort}-"
DATA_BUCKET: ${self:custom.environment.DEPLOYMENT_BUCKET_PREFIX}-${self:provider.stage}-${self:service}-data
SHOULD_CREATE_SSL_CERTIFICATE: false
HOSTED_ZONE_ID_DOMAIN: TBD
HOSTED_ZONE_SUBDOMAIN: ${self:provider.stage}-${self:service}
HOSTED_ZONE_DOMAIN: TBD
CLOUDFRONT_CERTIFICATE_ARN: TBD
GDAL_CONTAINER_DEFINITION_NAME: ${self:provider.stage}-${self:service}-osgeo-gdal
TIPPECANOE_CONTAINER_DEFINITION_NAME: ${self:provider.stage}-${self:service}-tippecanoe
prd:
DEPLOYMENT_BUCKET_PREFIX: ${self:custom.namespaceShort}
STACK_NAME_PREFIX: "${self:custom.namespaceShort}-"
DATA_BUCKET: ${self:custom.environment.DEPLOYMENT_BUCKET_PREFIX}-${self:provider.stage}-${self:service}-data
SHOULD_CREATE_SSL_CERTIFICATE: false
HOSTED_ZONE_ID_DOMAIN: TBD
HOSTED_ZONE_SUBDOMAIN: ${self:service}
HOSTED_ZONE_DOMAIN: TBD
CLOUDFRONT_CERTIFICATE_ARN: TBD
GDAL_CONTAINER_DEFINITION_NAME: ${self:provider.stage}-${self:service}-osgeo-gdal
TIPPECANOE_CONTAINER_DEFINITION_NAME: ${self:provider.stage}-${self:service}-tippecanoe

View file

@ -1,71 +0,0 @@
DetectChangesForWorker:
handler: functions/detect-changes-for-worker/index.handler
name: ${self:provider.stage}-DetectChangesForWorker
description: Scans an S3 bucket (with prefix) for items that have changes recently and sends them to ECS Tasks for processing
runtime: nodejs12.x
memorySize: 512
timeout: 900
environment:
REGION: ${self:provider.region}
STAGE: ${self:provider.stage}
ECS_CLUSTER: !Ref ECSCluster
VPC_SUBNET_ID:
Fn::ImportValue: ${self:provider.stage}-PrivateSubnetOne
GDAL_TASK_DEFINITION: ${self:custom.environment.GDAL_TASK_DEFINITION_NAME}
GDAL_CONTAINER_DEFINITION: ${self:custom.environment.GDAL_CONTAINER_DEFINITION_NAME}
TIPPECANOE_TASK_DEFINITION: ${self:custom.environment.TIPPECANOE_TASK_DEFINITION_NAME}
TIPPECANOE_CONTAINER_DEFINITION: ${self:custom.environment.TIPPECANOE_CONTAINER_DEFINITION_NAME}
# The ECS Tasks can be kicked of my invoking the lambda on a schedule. This can provide the
# ability to do nightly refreshed of the data.
# events:
# - schedule:
# rate: cron(*/2 * * * ? *) # Fire every 2 minutes
# input:
# action: "gdal"
# command:
# - "ogrinfo"
# - "-al"
# - "-so"
# - "-ro"
# - "/vsizip//vsicurl/https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/census/tabblock2010_01_pophu.zip"
# - schedule:
# rate: cron(0 5 * * ? *) # Scan for updated data at Midnight Eastern Time
# input:
# action: enrichment
# sourceBucketName: !Ref DataBucket
# sourceBucketPrefix: usds/custom.csv
# age: 86400 # Seconds
# censusBucketName: j40-sit-justice40-data-harvester-data
# censusBucketPrefix: census/tabblock2010_01_pophu.zip
# pre:
# - Fn::Join: ['', ["wget https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/usds/$", "{source.Key} -O /tmp/custom.csv"]]
# command:
# - "-f"
# - "GeoJSON"
# - "-sql"
# - Fn::Join: ['', ["SELECT * FROM $", "{census.Key:base} LEFT JOIN '/tmp/custom.csv'.custom ON $", "{census.Key:base}.BLOCKID10 = custom.BLOCKID10"]]
# - Fn::Join: ['', ["/vsis3/j40-sit-justice40-data-harvester-data/joined/$", "{source.Key:base}-$", "{census.Key:base}.json"]]
# - Fn::Join: ['', ["/vsizip//vsicurl/https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/census/$", "{census.Key}"]]
# - schedule:
# rate: cron(0 7 * * ? *) # Run two hours after the generating any GeoJSON
# input:
# action: tippecanoe
# pre:
# - "curl https://gp-sit-tileservice-tile-cache.s3.amazonaws.com/usds/usa.csv -o /tmp/usa.csv"
# - "curl https://gp-sit-tileservice-tile-cache.s3.amazonaws.com/usds/tristate.mbtiles -o /tmp/tristate.mbtiles"
# post:
# - "aws s3 cp /tmp/tl_2010_bg_with_data.mbtiles s3://j40-sit-justice40-data-harvester-data/output/tl_2010_bg_with_data.mbtiles"
# - "tile-join --force -pk -pC -n tl_2010_bg -e /tmp/tiles /tmp/tl_2010_bg_with_data.mbtiles"
# - "aws s3 sync /tmp/tiles s3://j40-sit-justice40-data-harvester-data/output/tiles"
# command:
# - "tile-join"
# - "--force"
# - "-pk"
# - "-n"
# - "tl_2010_bg"
# - "-o"
# - "/tmp/tl_2010_bg_with_data.mbtiles"
# - "-c"
# - "/tmp/usa.csv"
# - "/tmp/tristate.mbtiles"

View file

@ -1,13 +0,0 @@
module.exports = {
"env": {
"node": true,
"commonjs": true,
"es2020": true
},
"extends": "eslint:recommended",
"parserOptions": {
"ecmaVersion": 11
},
"rules": {
}
};

View file

@ -1,187 +0,0 @@
/**
* Load an ECS Task Definition template and apply variable substitution
*/
async function createECSTaskDefinition(options, templateName, taskVars) {
const { fs, path } = options.deps;
const { util } = options.deps.local;
// Load the task template
const templatePath = path.join(__dirname, 'taskDefinitions', `${templateName}.json`);
const rawTaskTemplate = await fs.promises.readFile(templatePath, 'utf8');
// Perform variable substitution
const taskTemplate = util.applyVariableSubstitution(options, taskVars, rawTaskTemplate);
// Parse into a JSON object and return
return JSON.parse(taskTemplate);
}
/**
* Takes the event parameters and performs some variable substitution for the
* SQL query based on the actual S3 items being processed.
*/
function createECSTaskVariablesFromS3Record(options, record) {
const { event } = options;
const { REGION } = options.env;
const { util } = options.deps;
// Create substituion variables from the S3 record
const vars = util.createSubstitutionVariablesFromS3Record(options, record, 's3');
// Apply them to the SQL clause
const sql = util.applyVariableSubstitution(options, vars, event.sql);
// Return the modified event record
return {
...event,
REGION,
sql
};
}
/**
* Small utility function to look at a bash command line element and decide if it needs to
* be quoted and/or any characters escaped.
*
* Currently, it just takes are of double-quotes and does not do full nested escapes.
*/
function quoteAndEscape(s) {
// Escape any single quote chars using ASCII codes
// @see https://stackoverflow.com/a/42341860/332406
//
// Throw an exception if there are double-quotes in the command itself, soo much nested
// escaping for now....
if (s.includes('"')) {
throw new Error(`Double-quotes are not allowed in the container arguments`);
}
// If there are any space in the string, wrap it in escaped double-quotes
if (s.includes(' ')) {
return `"${s}"`;
}
return s;
}
/**
* Take an array of commands and modify it with a list of pre- and post-
* command to run in the image. This is primarily used to move files in
* and out of the container ephemeral storage.
*/
function wrapContainerCommand(options, pre, command, post) {
// We will run all of the commands as a chained bash command, so merge everything
// together using '&&' chaining.
//
// We expect the pre/post arrays to be full commands, while the command array is a list
// on individual pieces of a single command.
if (!pre && !post) {
return command;
}
const allCommands = [];
// Pre-commands come first
allCommands.push(...(pre || []));
// Turn the primary array of command line arguments into a single command line string. Be sure to
// quote/escape elements with spaces and double-quotes
allCommands.push(command.map(c => quoteAndEscape(c)).join(' '));
// And add in the post-commands last
allCommands.push(...(post || []));
// Return a new array of commands with everything chained using '&&' so that execution will terminate
// as soon as any command in the chain fails.
return ['/bin/sh', '-c', allCommands.join(' && ')];
}
/**
* Returns the appropriate ECS Task Definition name based on the current action
*/
function getTaskDefinitionName(options, event) {
const { GDAL_TASK_DEFINITION, TIPPECANOE_TASK_DEFINITION } = options.env;
const { action } = event;
switch (action) {
case 'gdal':
case 'ogr2ogr':
case 'enrichment':
return GDAL_TASK_DEFINITION;
case 'tippecanoe':
return TIPPECANOE_TASK_DEFINITION;
}
throw new Error(`No Fargate Task Definition defined for ${action}`);
}
/**
* Returns the appropriate ECS Container Definition name based on the current action
*/
function getFargateContainerDefinitionName(options, event) {
const { GDAL_CONTAINER_DEFINITION, TIPPECANOE_CONTAINER_DEFINITION } = options.env;
const { action } = event;
switch (action) {
case 'gdal':
case 'ogr2ogr':
case 'enrichment':
return GDAL_CONTAINER_DEFINITION;
case 'tippecanoe':
return TIPPECANOE_CONTAINER_DEFINITION
}
throw new Error(`No Fargate Container Definition Name defined for ${action}`);
}
/**
* Executes a (known) container in Fargate with the provided command line parameters.
*/
async function executeRawCommand(options, event) {
const { ecs, logger } = options.deps;
const { env } = options;
const { ECS_CLUSTER, VPC_SUBNET_ID } = env;
// If there are pre- or post- commands defined, wrap up the primary command
const containerCommand = wrapContainerCommand(options, event.pre, event.command, event.post);
// Get the name of the container that we are using
const containerDefinitionName = getFargateContainerDefinitionName(options, event);
// Create the full Task parameter object and execute
const params = {
taskDefinition: getTaskDefinitionName(options, event),
cluster: ECS_CLUSTER,
launchType: 'FARGATE',
count: 1,
networkConfiguration: { // Must be specified for tasks with `awsvpc` networking and awsvpc networking is required for FARGATE launch types
awsvpcConfiguration: {
subnets: [
VPC_SUBNET_ID
],
assignPublicIp: 'DISABLED',
securityGroups: []
}
},
overrides: {
containerOverrides: [
{
name: containerDefinitionName,
command: containerCommand
}
]
}
};
logger.info(`Executing ECS Task...`, JSON.stringify(params, null, 2));
return await ecs.runTask(params).promise();
}
module.exports = {
createECSTaskDefinition,
createECSTaskVariablesFromS3Record,
executeRawCommand,
getFargateContainerDefinitionName,
getTaskDefinitionName,
wrapContainerCommand
};

View file

@ -1,18 +0,0 @@
{
"action": "enrichment",
"sourceBucketName": "j40-sit-justice40-data-harvester-data",
"sourceBucketPrefix": "usds/custom.csv",
"age": 86400,
"censusBucketName": "j40-sit-justice40-data-harvester-data",
"censusBucketPrefix": "census/tabblock2010_01_pophu.zip",
"pre": [
"wget https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/usds/${source.Key} -O /tmp/custom.csv"
],
"command": [
"--debug", "ON",
"-f", "GeoJSON",
"-sql", "SELECT * FROM ${census.Key:base} LEFT JOIN '/tmp/custom.csv'.custom ON ${census.Key:base}.BLOCKID10 = custom.BLOCKID10",
"/vsis3/j40-sit-justice40-data-harvester-data/joined/${source.Key:base}-${census.Key:base}.json",
"/vsizip//vsicurl/https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/census/${census.Key}"
]
}

View file

@ -1,10 +0,0 @@
{
"action": "gdal",
"command": [
"ogrinfo",
"-al",
"-so",
"-ro",
"/vsizip//vsicurl/https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/census/tabblock2010_01_pophu.zip"
]
}

View file

@ -1,10 +0,0 @@
{
"action": "ogr2ogr",
"command": [
"--debug", "ON",
"-f",
"GeoJSON",
"/vsis3/j40-sit-justice40-data-harvester-data/sources/tabblock2010_01_pophu.json",
"/vsizip//vsicurl/https://j40-sit-justice40-data-harvester-data.s3.amazonaws.com/census/tabblock2010_01_pophu.zip"
]
}

View file

@ -1,24 +0,0 @@
{
"action": "tippecanoe",
"pre": [
"curl https://gp-sit-tileservice-tile-cache.s3.amazonaws.com/usds/usa.csv -o /tmp/usa.csv",
"curl https://gp-sit-tileservice-tile-cache.s3.amazonaws.com/usds/tristate.mbtiles -o /tmp/tristate.mbtiles"
],
"post": [
"aws s3 cp /tmp/tl_2010_bg_with_data.mbtiles s3://j40-sit-justice40-data-harvester-data/output/tl_2010_bg_with_data.mbtiles",
"tile-join --force -pk -pC -n tl_2010_bg -e /tmp/tiles /tmp/tl_2010_bg_with_data.mbtiles",
"aws s3 sync /tmp/tiles s3://j40-sit-justice40-data-harvester-data/output/tiles"
],
"command": [
"tile-join",
"--force",
"-pk",
"-n",
"tl_2010_bg",
"-o",
"/tmp/tl_2010_bg_with_data.mbtiles",
"-c",
"/tmp/usa.csv",
"/tmp/tristate.mbtiles"
]
}

View file

@ -1,11 +0,0 @@
/**
* Create an appropriate GDAL path to an S3 object
*
function buildDestinationVSIS3Path(_options, name) {
return `/vsis3/${bucket}/${key}`;
}
*/
module.exports = {
// buildDestinationVSIS3Path
}

View file

@ -1,130 +0,0 @@
// Standard modules
const fs = require('fs');
const path = require('path');
const { DateTime } = require('luxon');
const logger = console;
// AWS APIs
const AWS = require('aws-sdk');
// Local modules
const util = require('./util');
const gdal = require('./gdal');
const s3 = require('./s3');
const ecs = require('./ecs');
async function handler(event) {
// Build the options for the lambda
const options = initialize(event);
// Determine what action to take
switch (event.action) {
// Execute a raw command against the gdal container
case 'gdal':
return await ecs.executeRawCommand(options, event);
// Assume that we're running ogr2ogr
case 'ogr2ogr':
return await ecs.executeRawCommand(options, {
...event,
command: ['ogr2ogr', ...event.command]
});
case 'tippecanoe':
return await ecs.executeRawCommand(options, event);
// Combine USDS data with external data sources
case 'enrichment':
return await enrichDataWithUSDSAttributes(options, event);
default:
logger.warn(`Unknown action ${event.action}. Exiting`);
break;
}
}
async function enrichDataWithUSDSAttributes(options, event) {
const { logger } = options.deps;
const { util, ecs, s3 } = options.deps.local;
// Use the event.age to calculate the custoff for any input files
const cutoff = util.getTimestampCutoff(options);
logger.info(`Cutoff time of ${cutoff}`);
// Scan the source S3 bucket for items that need to be processed
const { sourceBucketName, sourceBucketPrefix } = event;
const sourceS3Records = await s3.fetchUpdatedS3Objects(options, sourceBucketName, sourceBucketPrefix, cutoff);
// If there are no input record, exit early
if (sourceS3Records.length === 0) {
logger.info(`There are no objects in s3://${sourceBucketName}/${sourceBucketPrefix} that have been modified after the cutoff date`);
return;
}
// Scan for the census records
const { censusBucketName, censusBucketPrefix } = event;
const censusS3Records = await s3.fetchS3Objects(options, censusBucketName, censusBucketPrefix);
// If there are no census datasets, exit early
if (censusS3Records.length === 0) {
logger.info(`There are no objects in s3://${censusBucketName}/${censusBucketPrefix}`);
return;
}
// Create a set of substitution variables for each S3 record that will be applied to the
// action template
const censusVariables = censusS3Records.map(r => util.createSubstitutionVariablesFromS3Record(options, r, 'census'));
const sourceVariables = sourceS3Records.map(r => util.createSubstitutionVariablesFromS3Record(options, r, 'source'));
// Kick off an ECS task for each (source, census) pair.
for ( const census of censusVariables ) {
for ( const source of sourceVariables) {
// Merge the variables together
const vars = { ...census, ...source };
// Let the logs know what's happening
logger.info(`Enriching ${vars['census.Key']} with ${vars['source.Key']}...`);
// Apply the substitutions to the pre, post, and command arrays
const pre = util.applyVariableSubstitutionToArray(options, vars, event.pre);
const post = util.applyVariableSubstitutionToArray(options, vars, event.post);
const command = util.applyVariableSubstitutionToArray(options, vars, event.command);
await ecs.executeRawCommand(options, {
...event,
pre,
command: ['ogr2ogr', ...command],
post
});
}
}
}
/**
* Wrap all dependencies in an object in order to inject as appropriate.
*/
function initialize(event) {
logger.debug('event:', JSON.stringify(event, null, 2));
return {
deps: {
DateTime,
fs,
logger,
path,
s3: new AWS.S3(),
ecs: new AWS.ECS(),
local: {
ecs,
gdal,
s3,
util
}
},
env: process.env,
event
};
}
module.exports = {
handler
};

View file

@ -1,968 +0,0 @@
{
"name": "detect-changes-for-worker",
"version": "0.0.1",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"@babel/code-frame": {
"version": "7.12.11",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.11.tgz",
"integrity": "sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==",
"dev": true,
"requires": {
"@babel/highlight": "^7.10.4"
}
},
"@babel/helper-validator-identifier": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz",
"integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==",
"dev": true
},
"@babel/highlight": {
"version": "7.14.5",
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz",
"integrity": "sha512-qf9u2WFWVV0MppaL877j2dBtQIDgmidgjGk5VIMw3OadXvYaXn66U1BFlH2t4+t3i+8PhedppRv+i40ABzd+gg==",
"dev": true,
"requires": {
"@babel/helper-validator-identifier": "^7.14.5",
"chalk": "^2.0.0",
"js-tokens": "^4.0.0"
},
"dependencies": {
"chalk": {
"version": "2.4.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
"integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
"dev": true,
"requires": {
"ansi-styles": "^3.2.1",
"escape-string-regexp": "^1.0.5",
"supports-color": "^5.3.0"
}
},
"escape-string-regexp": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
"integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
"dev": true
}
}
},
"@eslint/eslintrc": {
"version": "0.4.2",
"resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz",
"integrity": "sha512-8nmGq/4ycLpIwzvhI4tNDmQztZ8sp+hI7cyG8i1nQDhkAbRzHpXPidRAHlNvCZQpJTKw5ItIpMw9RSToGF00mg==",
"dev": true,
"requires": {
"ajv": "^6.12.4",
"debug": "^4.1.1",
"espree": "^7.3.0",
"globals": "^13.9.0",
"ignore": "^4.0.6",
"import-fresh": "^3.2.1",
"js-yaml": "^3.13.1",
"minimatch": "^3.0.4",
"strip-json-comments": "^3.1.1"
}
},
"acorn": {
"version": "7.4.1",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
"integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
"dev": true
},
"acorn-jsx": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz",
"integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==",
"dev": true
},
"ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
"integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
"dev": true,
"requires": {
"fast-deep-equal": "^3.1.1",
"fast-json-stable-stringify": "^2.0.0",
"json-schema-traverse": "^0.4.1",
"uri-js": "^4.2.2"
}
},
"ansi-colors": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz",
"integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==",
"dev": true
},
"ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true
},
"ansi-styles": {
"version": "3.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
"integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
"dev": true,
"requires": {
"color-convert": "^1.9.0"
}
},
"argparse": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
"dev": true,
"requires": {
"sprintf-js": "~1.0.2"
}
},
"astral-regex": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz",
"integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==",
"dev": true
},
"balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true
},
"brace-expansion": {
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"dev": true,
"requires": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"callsites": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
"integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
"dev": true
},
"chalk": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz",
"integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==",
"dev": true,
"requires": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
},
"has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true
},
"supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"requires": {
"has-flag": "^4.0.0"
}
}
}
},
"color-convert": {
"version": "1.9.3",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
"integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
"dev": true,
"requires": {
"color-name": "1.1.3"
}
},
"color-name": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
"integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
"dev": true
},
"concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
"dev": true
},
"cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
"dev": true,
"requires": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
}
},
"debug": {
"version": "4.3.1",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
"integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
"dev": true,
"requires": {
"ms": "2.1.2"
}
},
"deep-is": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz",
"integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=",
"dev": true
},
"doctrine": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
"integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
"dev": true,
"requires": {
"esutils": "^2.0.2"
}
},
"emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
"dev": true
},
"enquirer": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz",
"integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==",
"dev": true,
"requires": {
"ansi-colors": "^4.1.1"
}
},
"escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"dev": true
},
"eslint": {
"version": "7.29.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-7.29.0.tgz",
"integrity": "sha512-82G/JToB9qIy/ArBzIWG9xvvwL3R86AlCjtGw+A29OMZDqhTybz/MByORSukGxeI+YPCR4coYyITKk8BFH9nDA==",
"dev": true,
"requires": {
"@babel/code-frame": "7.12.11",
"@eslint/eslintrc": "^0.4.2",
"ajv": "^6.10.0",
"chalk": "^4.0.0",
"cross-spawn": "^7.0.2",
"debug": "^4.0.1",
"doctrine": "^3.0.0",
"enquirer": "^2.3.5",
"escape-string-regexp": "^4.0.0",
"eslint-scope": "^5.1.1",
"eslint-utils": "^2.1.0",
"eslint-visitor-keys": "^2.0.0",
"espree": "^7.3.1",
"esquery": "^1.4.0",
"esutils": "^2.0.2",
"fast-deep-equal": "^3.1.3",
"file-entry-cache": "^6.0.1",
"functional-red-black-tree": "^1.0.1",
"glob-parent": "^5.1.2",
"globals": "^13.6.0",
"ignore": "^4.0.6",
"import-fresh": "^3.0.0",
"imurmurhash": "^0.1.4",
"is-glob": "^4.0.0",
"js-yaml": "^3.13.1",
"json-stable-stringify-without-jsonify": "^1.0.1",
"levn": "^0.4.1",
"lodash.merge": "^4.6.2",
"minimatch": "^3.0.4",
"natural-compare": "^1.4.0",
"optionator": "^0.9.1",
"progress": "^2.0.0",
"regexpp": "^3.1.0",
"semver": "^7.2.1",
"strip-ansi": "^6.0.0",
"strip-json-comments": "^3.1.0",
"table": "^6.0.9",
"text-table": "^0.2.0",
"v8-compile-cache": "^2.0.3"
}
},
"eslint-scope": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
"integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
"dev": true,
"requires": {
"esrecurse": "^4.3.0",
"estraverse": "^4.1.1"
}
},
"eslint-utils": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz",
"integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==",
"dev": true,
"requires": {
"eslint-visitor-keys": "^1.1.0"
},
"dependencies": {
"eslint-visitor-keys": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
"integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
"dev": true
}
}
},
"eslint-visitor-keys": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz",
"integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==",
"dev": true
},
"espree": {
"version": "7.3.1",
"resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz",
"integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==",
"dev": true,
"requires": {
"acorn": "^7.4.0",
"acorn-jsx": "^5.3.1",
"eslint-visitor-keys": "^1.3.0"
},
"dependencies": {
"eslint-visitor-keys": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
"integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
"dev": true
}
}
},
"esprima": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
"integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
"dev": true
},
"esquery": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz",
"integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==",
"dev": true,
"requires": {
"estraverse": "^5.1.0"
},
"dependencies": {
"estraverse": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==",
"dev": true
}
}
},
"esrecurse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
"integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
"dev": true,
"requires": {
"estraverse": "^5.2.0"
},
"dependencies": {
"estraverse": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
"integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==",
"dev": true
}
}
},
"estraverse": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
"integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
"dev": true
},
"esutils": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
"integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
"dev": true
},
"fast-deep-equal": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
"dev": true
},
"fast-json-stable-stringify": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
"integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
"dev": true
},
"fast-levenshtein": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
"integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=",
"dev": true
},
"file-entry-cache": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
"integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
"dev": true,
"requires": {
"flat-cache": "^3.0.4"
}
},
"flat-cache": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
"integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
"dev": true,
"requires": {
"flatted": "^3.1.0",
"rimraf": "^3.0.2"
}
},
"flatted": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.1.1.tgz",
"integrity": "sha512-zAoAQiudy+r5SvnSw3KJy5os/oRJYHzrzja/tBDqrZtNhUw8bt6y8OBzMWcjWr+8liV8Eb6yOhw8WZ7VFZ5ZzA==",
"dev": true
},
"fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
"dev": true
},
"functional-red-black-tree": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz",
"integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=",
"dev": true
},
"glob": {
"version": "7.1.7",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz",
"integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==",
"dev": true,
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.0.4",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"requires": {
"is-glob": "^4.0.1"
}
},
"globals": {
"version": "13.9.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.9.0.tgz",
"integrity": "sha512-74/FduwI/JaIrr1H8e71UbDE+5x7pIPs1C2rrwC52SszOo043CsWOZEMW7o2Y58xwm9b+0RBKDxY5n2sUpEFxA==",
"dev": true,
"requires": {
"type-fest": "^0.20.2"
}
},
"has-flag": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
"integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
"dev": true
},
"ignore": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
"integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
"dev": true
},
"import-fresh": {
"version": "3.3.0",
"resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
"integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
"dev": true,
"requires": {
"parent-module": "^1.0.0",
"resolve-from": "^4.0.0"
}
},
"imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
"dev": true
},
"inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
"dev": true,
"requires": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true
},
"is-extglob": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
"integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
"dev": true
},
"is-fullwidth-code-point": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
"integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
"dev": true
},
"is-glob": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
"integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
"dev": true,
"requires": {
"is-extglob": "^2.1.1"
}
},
"isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
"dev": true
},
"js-tokens": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
"dev": true
},
"js-yaml": {
"version": "3.14.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
"integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
"dev": true,
"requires": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
}
},
"json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
"integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
"dev": true
},
"json-stable-stringify-without-jsonify": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
"integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=",
"dev": true
},
"levn": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
"integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
"dev": true,
"requires": {
"prelude-ls": "^1.2.1",
"type-check": "~0.4.0"
}
},
"lodash.clonedeep": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz",
"integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8=",
"dev": true
},
"lodash.merge": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
"integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
"dev": true
},
"lodash.truncate": {
"version": "4.4.2",
"resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz",
"integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=",
"dev": true
},
"lru-cache": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
"requires": {
"yallist": "^4.0.0"
}
},
"luxon": {
"version": "1.27.0",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-1.27.0.tgz",
"integrity": "sha512-VKsFsPggTA0DvnxtJdiExAucKdAnwbCCNlMM5ENvHlxubqWd0xhZcdb4XgZ7QFNhaRhilXCFxHuoObP5BNA4PA=="
},
"minimatch": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
"integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
"dev": true,
"requires": {
"brace-expansion": "^1.1.7"
}
},
"ms": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
"integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
"dev": true
},
"natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=",
"dev": true
},
"once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
"dev": true,
"requires": {
"wrappy": "1"
}
},
"optionator": {
"version": "0.9.1",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz",
"integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==",
"dev": true,
"requires": {
"deep-is": "^0.1.3",
"fast-levenshtein": "^2.0.6",
"levn": "^0.4.1",
"prelude-ls": "^1.2.1",
"type-check": "^0.4.0",
"word-wrap": "^1.2.3"
}
},
"parent-module": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
"integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
"dev": true,
"requires": {
"callsites": "^3.0.0"
}
},
"path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
"dev": true
},
"path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true
},
"prelude-ls": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
"integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
"dev": true
},
"progress": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
"integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==",
"dev": true
},
"punycode": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
"integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
"dev": true
},
"regexpp": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz",
"integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==",
"dev": true
},
"require-from-string": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"dev": true
},
"resolve-from": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
"integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
"dev": true
},
"rimraf": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"requires": {
"glob": "^7.1.3"
}
},
"semver": {
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"dev": true,
"requires": {
"lru-cache": "^6.0.0"
}
},
"shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
"requires": {
"shebang-regex": "^3.0.0"
}
},
"shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true
},
"slice-ansi": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz",
"integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==",
"dev": true,
"requires": {
"ansi-styles": "^4.0.0",
"astral-regex": "^2.0.0",
"is-fullwidth-code-point": "^3.0.0"
},
"dependencies": {
"ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"dev": true,
"requires": {
"color-convert": "^2.0.1"
}
},
"color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"dev": true,
"requires": {
"color-name": "~1.1.4"
}
},
"color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"dev": true
}
}
},
"sprintf-js": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
"integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
"dev": true
},
"string-width": {
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
"integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.0"
}
},
"strip-ansi": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
"integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
"dev": true,
"requires": {
"ansi-regex": "^5.0.0"
}
},
"strip-json-comments": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
"integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
"dev": true
},
"supports-color": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
"integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
"dev": true,
"requires": {
"has-flag": "^3.0.0"
}
},
"table": {
"version": "6.7.1",
"resolved": "https://registry.npmjs.org/table/-/table-6.7.1.tgz",
"integrity": "sha512-ZGum47Yi6KOOFDE8m223td53ath2enHcYLgOCjGr5ngu8bdIARQk6mN/wRMv4yMRcHnCSnHbCEha4sobQx5yWg==",
"dev": true,
"requires": {
"ajv": "^8.0.1",
"lodash.clonedeep": "^4.5.0",
"lodash.truncate": "^4.4.2",
"slice-ansi": "^4.0.0",
"string-width": "^4.2.0",
"strip-ansi": "^6.0.0"
},
"dependencies": {
"ajv": {
"version": "8.6.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.6.0.tgz",
"integrity": "sha512-cnUG4NSBiM4YFBxgZIj/In3/6KX+rQ2l2YPRVcvAMQGWEPKuXoPIhxzwqh31jA3IPbI4qEOp/5ILI4ynioXsGQ==",
"dev": true,
"requires": {
"fast-deep-equal": "^3.1.1",
"json-schema-traverse": "^1.0.0",
"require-from-string": "^2.0.2",
"uri-js": "^4.2.2"
}
},
"json-schema-traverse": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true
}
}
},
"text-table": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
"integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
"dev": true
},
"type-check": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
"integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
"dev": true,
"requires": {
"prelude-ls": "^1.2.1"
}
},
"type-fest": {
"version": "0.20.2",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
"integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
"dev": true
},
"uri-js": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
"integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
"dev": true,
"requires": {
"punycode": "^2.1.0"
}
},
"v8-compile-cache": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz",
"integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==",
"dev": true
},
"which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
"requires": {
"isexe": "^2.0.0"
}
},
"word-wrap": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
"integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
"dev": true
},
"wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
"dev": true
},
"yallist": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
"dev": true
}
}
}

View file

@ -1,18 +0,0 @@
{
"name": "detect-changes-for-worker",
"version": "0.0.1",
"description": "",
"scripts": {
"test:gdal": "cat ./events/gdal.json | docker run --rm -v ${PWD}:/var/task --env-file ./test.env -i -e DOCKER_LAMBDA_USE_STDIN=1 lambci/lambda:nodejs12.x index.handler",
"test:tippecanoe": "cat ./events/tippecanoe.json | docker run --rm -v ${PWD}:/var/task --env-file ./test.env -i -e DOCKER_LAMBDA_USE_STDIN=1 lambci/lambda:nodejs12.x index.handler",
"test:enrichment": "cat ./events/enrichment.json | docker run --rm -v ${PWD}:/var/task --env-file ./test.env -i -e DOCKER_LAMBDA_USE_STDIN=1 lambci/lambda:nodejs12.x index.handler"
},
"author": "Xentity",
"license": "ISC",
"dependencies": {
"luxon": "^1.27.0"
},
"devDependencies": {
"eslint": "^7.29.0"
}
}

View file

@ -1,72 +0,0 @@
/**
* Helper function to determine if we should interpret an S3 object as
* a "simple" file or a folder.
*/
function isSimpleObject(c, prefix) {
// If the object ends with a separator charater, interpret that as a folder
if (c.Key.endsWith('/')) {
return false;
}
// If the object is more deeply nested than the prefix, then ignore, e.g.
// prefix = /foo/bar = two separators
// c.Key = /foo/bar/baz = three separators [skip]
// c.Key = /foo/bar.txt = two separators [pass]
// This doesn't give the *exact* count, but all we really care about is that
// the value is the same for the prefix and the S3 Key.
const separatorCount = c.Key.split('/').length;
const prefixSeparatorCount = prefix.split('/').length;
return separatorCount === prefixSeparatorCount;
}
/**
* Return all of the simple S3 objects from a prefix that have a LastModified
* date after a cutoff date.
*
* This returns objects that have recently changed for re-processing.
*/
function fetchUpdatedS3Objects (options, bucket, prefix, cutoff) {
// Define a filter function that only looks at object on a single level of the
// bucket and removes any objects with a LastModified timestamp prior to the cutoff
const threshold = cutoff.toMillis();
const filterFunc = (c) => isSimpleObject(c, prefix) && (threshold < c.LastModified.getTime());
return fetchS3Objects(options, bucket, prefix, filterFunc);
}
/**
* Basic utility function to return S3 object from a bucket that match a given prefix. An
* optional filtering function can be passed in.
*/
async function fetchS3Objects (options, bucket, prefix, filterFunc = () => true) {
const { s3 } = options.deps;
const objects = [];
// Limit the results to items in this bucket with a specific prefix
const params = {
Bucket: bucket,
Prefix: prefix
};
do {
// Get all of the initial objects
const response = await s3.listObjectsV2(params).promise();
// Optionally, filter out objects
const contents = response.Contents.filter(filterFunc);
objects.push(...contents);
params.ContinuationToken = response.IsTruncated
? response.NextContinuationToken
: null;
} while (params.ContinuationToken);
return objects;
}
module.exports = {
isSimpleObject,
fetchS3Objects,
fetchUpdatedS3Objects
};

View file

@ -1,25 +0,0 @@
{
"containerDefinitions": [
{
"name": "ECSUSDSJustice40Worker",
"image": "osgeo/gdal:alpine-small-latest",
"cpu": 1024,
"environment": [
{
"name": "AWS_REGION",
"value": "${REGION}"
}
],
"command": [
"ogr2ogr",
"-f", "GeoJSON",
"-sql", "${sql}",
"${output}",
"${input}"
],
"memory": 1024,
"essential": true
}
],
"family": ""
}

View file

@ -1,21 +0,0 @@
{
"containerDefinitions": [
{
"name": "ECSUSDSJustice40Worker",
"image": "osgeo/gdal:alpine-small-latest",
"cpu": 1024,
"environment": [
{
"name": "AWS_REGION",
"value": "${REGION}"
}
],
"command": [
"ogr2ogr"
],
"memory": 1024,
"essential": true
}
],
"family": ""
}

View file

@ -1,19 +0,0 @@
REGION=us-east-1
STAGE=sit
# ESC Cluster name that will from the containers
ECS_CLUSTER=j40-sit-justice40-data-harvester-ECSCluster-ktXGGU9zjwkb
# VPC Private Subnet that has a NAT GAteway
VPC_SUBNET_ID=subnet-07e68cb57322f7b1f
# Names of the container and task names.
GDAL_TASK_DEFINITION=sit-justice40-data-harvester-gdal
GDAL_CONTAINER_DEFINITION=sit-justice40-data-harvester-osgeo-gdal
TIPPECANOE_TASK_DEFINITION=sit-justice40-data-harvester-tippecanoe
TIPPECANOE_CONTAINER_DEFINITION=sit-justice40-data-harvester-mgiddens-tippecanoe
# AWS Credentials
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
AWS_DEFAULT_REGION=us-east-1

View file

@ -1,70 +0,0 @@
/**
* Create a luxon object representing a cutoff based on the `age`
* passed in from the event.
*
* A null or zero value for age returns the current time.
*/
function getTimestampCutoff(options) {
const { event } = options;
const { DateTime } = options.deps;
if (!event.age) {
return DateTime.fromMillis(0);
}
return DateTime.now().minus({ seconds: event.age });
}
/**
* Create a set of substitution variables from an S3 record
*/
function createSubstitutionVariablesFromS3Record(options, record, prefix) {
const { path } = options.deps;
const fullKey = record.Key;
const baseKey = path.basename(fullKey);
const baseKeyExt = path.extname(baseKey);
const baseKeyNoExt = path.basename(baseKey, baseKeyExt);
// Define all of the valid substitution variables
const vars = {};
vars[`${prefix}.Key:full`] = fullKey;
vars[`${prefix}.Key`] = baseKey;
vars[`${prefix}.Key:base`] = baseKeyNoExt;
vars[`${prefix}.Key:ext`] = baseKeyExt;
return vars;
}
/**
* Given a collection of key/value input variables, replace
* occurences of ${key} in the input with the corresponding
* values
*/
function applyVariableSubstitution(options, vars, input) {
let result = input;
for (const [key, value] of Object.entries(vars)) {
const token = '${' + key + '}';
// Use the split-join-method because the tokens have special characters which
// confuses the Regular Expression constructor
// @see https://stackoverflow.com/a/17606289/332406
result = result.split(token).join(value);
}
return result;
}
/**
* Generaliztion of the previsou function.
*/
function applyVariableSubstitutionToArray(options, vars, inputs) {
return (inputs || []).map(input => applyVariableSubstitution(options, vars, input));
}
module.exports = {
applyVariableSubstitution,
applyVariableSubstitutionToArray,
createSubstitutionVariablesFromS3Record,
getTimestampCutoff
};

File diff suppressed because it is too large Load diff

View file

@ -1,20 +0,0 @@
{
"name": "infrastructure",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"repository": {
"type": "git",
"url": "none"
},
"author": "",
"license": "UNLICENSED",
"devDependencies": {
"serverless": "^2.48.0",
"serverless-certificate-creator": "^1.5.3",
"serverless-pseudo-parameters": "^2.5.0"
}
}

View file

@ -1,84 +0,0 @@
Resources:
S3DataBucketPolicyCDN:
Type: AWS::S3::BucketPolicy
Properties:
Bucket:
Ref: DataBucket
PolicyDocument:
Statement:
- Effect: "Allow"
Action:
- "s3:GetObject"
Resource:
Fn::Join:
- ""
- - "arn:aws:s3:::"
- Ref: DataBucket
- "/*"
Principal: "*"
DataBucketCachePolicy:
Type: AWS::CloudFront::CachePolicy
Properties:
CachePolicyConfig:
Name: ${self:provider.stage}-${self:service}-cloudfront-cache-policy
Comment: CloudFront Cache Policy for justice40 data harvester
DefaultTTL: "86400" # one day, only if Origin does _not_ send `Cache-Control` or `Expires` headers
MaxTTL: "31536000" # one year, used to validate when origin sends `Cache-Control` or `Expires` headers
MinTTL: "1" # one second
ParametersInCacheKeyAndForwardedToOrigin:
EnableAcceptEncodingGzip: false
EnableAcceptEncodingBrotli: false
CookiesConfig:
CookieBehavior: none
HeadersConfig:
HeaderBehavior: none
QueryStringsConfig:
QueryStringBehavior: none
DataDistribution:
Type: AWS::CloudFront::Distribution
Properties:
DistributionConfig:
Origins:
- Id: DataBucket
DomainName:
# e.g. j40-sit-justice40-data-harvester-data.s3-website-us-east-1.amazonaws.com
Fn::Join:
- ""
- - ${self:custom.namespaceShort}-
- ${self:provider.stage}-
- ${self:service}-
- data
- ".s3-website-"
- Ref: AWS::Region
- ".amazonaws.com"
CustomOriginConfig:
HTTPPort: '80'
HTTPSPort: '443'
OriginProtocolPolicy: http-only
OriginSSLProtocols: [ "TLSv1", "TLSv1.1", "TLSv1.2" ]
OriginCustomHeaders:
- HeaderName: Origin # if the `Origin` header isn't present, S3 won't send CORS headers, this forces CORS to always be included
HeaderValue: geoplatform.gov # this doesn't need to be anything specific, since Allow-Origin: * is our CORS policy, it just has to have a value
Enabled: true
HttpVersion: http2
Comment: CDN for justice40 data bucket
Aliases:
- ${self:custom.environment.HOSTED_ZONE_SUBDOMAIN}.${self:custom.environment.HOSTED_ZONE_DOMAIN}
PriceClass: PriceClass_All
DefaultCacheBehavior:
AllowedMethods: [HEAD, GET, OPTIONS]
CachedMethods: [HEAD, GET]
CachePolicyId:
Ref: DataBucketCachePolicy
MinTTL: '0'
DefaultTTL: '0'
TargetOriginId: DataBucket
ViewerProtocolPolicy: redirect-to-https
CustomErrorResponses: []
ViewerCertificate:
AcmCertificateArn: ${self:custom.environment.CLOUDFRONT_CERTIFICATE_ARN}
SslSupportMethod: sni-only

View file

@ -1,164 +0,0 @@
Parameters:
ServiceNameOgr2Ogr:
Type: String
Default: ogr2ogr-gdal-3.6
Description: The name of the service
Resources:
ECSCluster:
Type: AWS::ECS::Cluster
Properties:
Tags:
- Key: Stage
Value: ${self:provider.stage}
- Key: Namespace
Value: ${self:custom.namespace}
- Key: Name
Value: ${self:custom.namespaceShort}-${self:provider.stage}-ecs-cluster
# Task execution role allowing access to resources.
ECSTaskExecutionRoleShared:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: [ecs-tasks.amazonaws.com]
Action: ['sts:AssumeRole']
Path: /
Policies:
- PolicyName: AmazonECSTaskExecutionRolePolicy
PolicyDocument:
Statement:
- Effect: Allow
Action:
# Allow the ECS tasks to upload logs to CloudWatch
- 'logs:CreateLogStream'
- 'logs:PutLogEvents'
- 'logs:CreateLogStream'
- 'logs:DescribeLogStreams'
Resource: '*'
ECSTaskRoleShared:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Statement:
- Effect: Allow
Principal:
Service: "ecs-tasks.amazonaws.com"
Action: ['sts:AssumeRole']
Path: /
Policies:
- PolicyName: "${self:provider.stage}-${self:service}-task-policy"
PolicyDocument:
Statement:
- Effect: Allow
Action:
- secretsmanager:GetSecretValue
Resource: "*"
- Effect: Allow
Action:
- kms:Decrypt
Resource: "*"
# EventBridge permissions.
- Effect: Allow
Action:
- events:PutEvents
Resource:
- arn:aws:events:${self:provider.region}:#{AWS::AccountId}:*
# Allow the ECS Tasks to access our specific S3 bucket
# @see https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html for Multi-Part Upload requirement
- Effect: Allow
Action:
- s3:GetBucketAcl
- s3:ListBucket
- s3:ListBucketMultipartUploads
Resource:
- arn:aws:s3:::${self:custom.environment.DATA_BUCKET}
- Effect: Allow
Action:
- s3:PutObject
- s3:PutObjectAcl
- s3:GetObject
- s3:GetObjectAcl
- s3:GetObjectVersion
- s3:GetObjectVersionAcl
- s3:DeleteObject
- s3:DeleteObjectVersion
- s3:AbortMultipartUpload
- s3:ListMultipartUploadParts
Resource:
- arn:aws:s3:::${self:custom.environment.DATA_BUCKET}/*
TaskDefinitionOgr2Ogr:
Type: AWS::ECS::TaskDefinition
Properties:
Family: ${self:custom.environment.GDAL_TASK_DEFINITION_NAME}
Cpu: 1024
Memory: 2048
NetworkMode: awsvpc
RequiresCompatibilities:
- FARGATE
ExecutionRoleArn:
Fn::GetAtt: [ ECSTaskExecutionRoleShared, Arn ]
TaskRoleArn:
Fn::GetAtt: [ ECSTaskRoleShared, Arn ]
ContainerDefinitions:
- Name: ${self:custom.environment.GDAL_CONTAINER_DEFINITION_NAME}
Cpu: 1024
Memory: 2048
Image: osgeo/gdal:alpine-small-latest
Environment:
- Name: REGION
Value: ${self:provider.region}
- Name: STAGE
Value: ${self:provider.stage}
- Name: NODE_ENV
Value: ${self:provider.stage}
- Name: ENV_NAME
Value: ${self:provider.stage}
LogConfiguration:
LogDriver: 'awslogs'
Options:
awslogs-group: ${self:provider.stage}-${self:service}
awslogs-region: ${self:provider.region}
awslogs-stream-prefix: ${self:service}
TaskDefinitionTippecanoe:
Type: AWS::ECS::TaskDefinition
Properties:
Family: ${self:custom.environment.TIPPECANOE_TASK_DEFINITION_NAME}
Cpu: 1024
Memory: 2048
NetworkMode: awsvpc
RequiresCompatibilities:
- FARGATE
ExecutionRoleArn:
Fn::GetAtt: [ ECSTaskExecutionRoleShared, Arn ]
TaskRoleArn:
Fn::GetAtt: [ ECSTaskRoleShared, Arn ]
ContainerDefinitions:
- Name: ${self:custom.environment.TIPPECANOE_CONTAINER_DEFINITION_NAME}
Cpu: 1024
Memory: 2048
Image: mikegiddens/tippecanoe:latest
Environment:
- Name: REGION
Value: ${self:provider.region}
- Name: STAGE
Value: ${self:provider.stage}
- Name: NODE_ENV
Value: ${self:provider.stage}
- Name: ENV_NAME
Value: ${self:provider.stage}
LogConfiguration:
LogDriver: 'awslogs'
Options:
awslogs-group: ${self:provider.stage}-${self:service}
awslogs-region: ${self:provider.region}
awslogs-stream-prefix: ${self:service}

View file

@ -1,17 +0,0 @@
Resources:
ARecordDataHarvester:
Type: AWS::Route53::RecordSetGroup
Condition: ShouldOnlyCreateResourcesInSIT
DependsOn:
- DataDistribution
Properties:
HostedZoneId: ${self:custom.environment.HOSTED_ZONE_ID_DOMAIN}
RecordSets:
- Name: ${self:custom.environment.HOSTED_ZONE_SUBDOMAIN}.${self:custom.environment.HOSTED_ZONE_DOMAIN}.
Type: A
AliasTarget:
HostedZoneId: Z2FDTNDATAQYW2 # AWS global value https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-route53-aliastarget.html#cfn-route53-aliastarget-hostedzoneid
DNSName:
Fn::GetAtt: [ DataDistribution, DomainName ]

View file

@ -1,18 +0,0 @@
Resources:
DataBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: ${self:custom.environment.DATA_BUCKET}
AccessControl: PublicRead
CorsConfiguration:
CorsRules:
- AllowedOrigins:
- "*"
AllowedMethods:
- GET
AllowedHeaders:
- Content-Length
WebsiteConfiguration:
IndexDocument: index.html
ErrorDocument: error.html

View file

@ -1,87 +0,0 @@
service: justice40-data-harvester
configValidationMode: error
frameworkVersion: ">=2.48.0"
provider:
name: aws
runtime: nodejs12.x
stage: ${opt:stage, 'sit'}
region: ${opt:region, 'us-east-1'}
profile: ${self:provider.stage}
lambdaHashingVersion: "20201221"
deploymentBucket:
name: ${self:custom.environment.DEPLOYMENT_BUCKET_PREFIX}-${self:provider.stage}-${self:provider.region}-${self:service}
blockPublicAccess: true
maxPreviousDeploymentArtifacts: 5
stackName: ${self:custom.environment.STACK_NAME_PREFIX}${self:provider.stage}-${self:service}
iam:
role:
statements:
- Effect: "Allow"
# Condition:
# ArnEquals:
# ecs:cluster:
# Fn::GetAtt: [ ECSCluster, Arn ]
Action: "ecs:RunTask"
Resource: "*"
- Effect: "Allow"
# Condition:
# ArnEquals:
# ecs:cluster:
# Fn::GetAtt: [ ECSCluster, Arn ]
Action:
- "iam:ListInstanceProfiles"
- "iam:ListRoles"
- "iam:PassRole"
Resource: "*"
- Effect: Allow
Action:
- "s3:ListBucket"
Resource:
- Fn::Join:
- ""
- - "arn:aws:s3:::"
- Ref: DataBucket
- "/*"
- Effect: Allow
Action:
- "s3:DeleteObject"
- "s3:GetObject"
- "s3:PutObject"
- "s3:PutObjectAcl"
Resource:
- Fn::Join:
- ""
- - "arn:aws:s3:::"
- Ref: DataBucket
plugins:
- serverless-certificate-creator
- serverless-pseudo-parameters
custom:
environment: ${file(./environment.yml):${self:provider.stage}}
namespace: justice40 # Used to tag resources with a "Namespace".
namespaceShort: j40 # Used to prefix stack name, deployment bucket, resource "Name" tags, etc.
customCertificate:
certificateName: ${self:provider.stage}-${self:service}.${self:custom.environment.HOSTED_ZONE_DOMAIN}
hostedZoneIds: ${self:custom.environment.HOSTED_ZONE_ID_DOMAIN}
region: ${self:provider.region}
tags:
Name: ${self:provider.stage}-${self:service}.${self:custom.environment.HOSTED_ZONE_DOMAIN}
Environment: ${self:provider.stage}
rewriteRecords: true
enabled: ${self:custom.environment.SHOULD_CREATE_SSL_CERTIFICATE}
functions: ${file(./functions.yml)}
resources:
- ${file(./conditions.yml)}
- ${file(./resources-s3.yml)}
- ${file(./resources-cloudfront.yml)}
- ${file(./resources-ecs.yml)}
- ${file(./resources-route53.yml)}