Ticket 492: Integrate Area Median Income and Poverty measures into ETL (#660)

* Loading AMI and poverty data
This commit is contained in:
Lucas Merrill Brown 2021-09-13 15:36:35 -05:00 committed by GitHub
commit 7d13be7651
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 474 additions and 91 deletions

View file

@ -39,7 +39,9 @@ DATA_SCORE_TILES_FILE_PATH = DATA_SCORE_TILES_DIR / "usa.csv"
SCORE_DOWNLOADABLE_DIR = DATA_SCORE_DIR / "downloadable"
SCORE_DOWNLOADABLE_CSV_FILE_PATH = SCORE_DOWNLOADABLE_DIR / "usa.csv"
SCORE_DOWNLOADABLE_EXCEL_FILE_PATH = SCORE_DOWNLOADABLE_DIR / "usa.xlsx"
SCORE_DOWNLOADABLE_ZIP_FILE_PATH = SCORE_DOWNLOADABLE_DIR / "Screening_Tool_Data.zip"
SCORE_DOWNLOADABLE_ZIP_FILE_PATH = (
SCORE_DOWNLOADABLE_DIR / "Screening_Tool_Data.zip"
)
# Column subsets
CENSUS_COUNTIES_COLUMNS = ["USPS", "GEOID", "NAME"]

View file

@ -31,10 +31,28 @@ class ScoreETL(ExtractTransformLoad):
"Poverty (Less than 200% of federal poverty line)"
)
self.HIGH_SCHOOL_FIELD_NAME = "Percent individuals age 25 or over with less than high school degree"
self.STATE_MEDIAN_INCOME_FIELD_NAME: str = (
"Median household income (State; 2019 inflation-adjusted dollars)"
)
self.MEDIAN_INCOME_FIELD_NAME = (
"Median household income in the past 12 months"
)
self.MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD_NAME = (
"Median household income (% of state median household income)"
)
# Note: these variable names are slightly different (missing the word `PERCENT`) than those in the source ETL to avoid pylint's duplicate
# code error. - LMB
self.POVERTY_LESS_THAN_100_FPL_FIELD_NAME = (
"Percent of individuals < 100% Federal Poverty Line"
)
self.POVERTY_LESS_THAN_150_FPL_FIELD_NAME = (
"Percent of individuals < 150% Federal Poverty Line"
)
self.POVERTY_LESS_THAN_200_FPL_FIELD_NAME = (
"Percent of individuals < 200% Federal Poverty Line"
)
# There's another aggregation level (a second level of "buckets").
self.AGGREGATION_POLLUTION = "Pollution Burden"
self.AGGREGATION_POPULATION = "Population Characteristics"
@ -51,6 +69,7 @@ class ScoreETL(ExtractTransformLoad):
self.housing_and_transportation_df: pd.DataFrame
self.hud_housing_df: pd.DataFrame
self.cdc_places_df: pd.DataFrame
self.census_acs_median_incomes_df: pd.DataFrame
def data_sets(self) -> list:
# Define a named tuple that will be used for each data set input.
@ -112,6 +131,21 @@ class ScoreETL(ExtractTransformLoad):
renamed_field="Physical health not good for >=14 days among adults aged >=18 years",
bucket=None,
),
DataSet(
input_field=self.POVERTY_LESS_THAN_100_FPL_FIELD_NAME,
renamed_field=self.POVERTY_LESS_THAN_100_FPL_FIELD_NAME,
bucket=None,
),
DataSet(
input_field=self.POVERTY_LESS_THAN_150_FPL_FIELD_NAME,
renamed_field=self.POVERTY_LESS_THAN_150_FPL_FIELD_NAME,
bucket=None,
),
DataSet(
input_field=self.POVERTY_LESS_THAN_200_FPL_FIELD_NAME,
renamed_field=self.POVERTY_LESS_THAN_200_FPL_FIELD_NAME,
bucket=None,
),
# The following data sets have buckets, because they're used in Score C
DataSet(
input_field="CANCER",
@ -211,6 +245,7 @@ class ScoreETL(ExtractTransformLoad):
]
def extract(self) -> None:
logger.info("Loading data sets from disk.")
# EJSCreen csv Load
ejscreen_csv = self.DATA_PATH / "dataset" / "ejscreen_2019" / "usa.csv"
self.ejscreen_df = pd.read_csv(
@ -257,6 +292,19 @@ class ScoreETL(ExtractTransformLoad):
low_memory=False,
)
# Load census AMI data
census_acs_median_incomes_csv = (
self.DATA_PATH
/ "dataset"
/ "census_acs_median_income_2019"
/ "usa.csv"
)
self.census_acs_median_incomes_df = pd.read_csv(
census_acs_median_incomes_csv,
dtype={self.GEOID_FIELD_NAME: "string"},
low_memory=False,
)
def _join_cbg_dfs(self, census_block_group_dfs: list) -> pd.DataFrame:
logger.info("Joining Census Block Group dataframes")
census_block_group_df = functools.reduce(
@ -275,7 +323,7 @@ class ScoreETL(ExtractTransformLoad):
f"One of the input CSVs uses {self.GEOID_FIELD_NAME} with a different length."
)
return census_block_group_df
def _join_tract_dfs(self, census_tract_dfs: list) -> pd.DataFrame:
logger.info("Joining Census Tract dataframes")
census_tract_df = functools.reduce(
@ -350,11 +398,10 @@ class ScoreETL(ExtractTransformLoad):
# Multiply the "Pollution Burden" score and the "Population Characteristics"
# together to produce the cumulative impact score.
df["Score C"] = (
df[self.AGGREGATION_POLLUTION]
* df[self.AGGREGATION_POPULATION]
df[self.AGGREGATION_POLLUTION] * df[self.AGGREGATION_POPULATION]
)
return df
def _add_scores_d_and_e(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Adding Scores D and E")
fields_to_use_in_score = [
@ -434,9 +481,7 @@ class ScoreETL(ExtractTransformLoad):
)
| (df["Proximity to RMP sites (percentile)"] > 0.9)
| (
df[
"Current asthma among adults aged >=18 years (percentile)"
]
df["Current asthma among adults aged >=18 years (percentile)"]
> 0.9
)
| (
@ -476,15 +521,21 @@ class ScoreETL(ExtractTransformLoad):
)
return df
def _add_score_g(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Adding Score G")
# TODO: add scoring
return df
# TODO Move a lot of this to the ETL part of the pipeline
def _prepare_initial_df(self, data_sets: list) -> pd.DataFrame:
logger.info("Preparing initial dataframe")
# Join all the data sources that use census block groups
census_block_group_dfs = [
self.ejscreen_df,
self.census_df,
self.ejscreen_df,
self.census_df,
self.housing_and_transportation_df,
self.census_acs_median_incomes_df,
]
census_block_group_df = self._join_cbg_dfs(census_block_group_dfs)
@ -504,11 +555,23 @@ class ScoreETL(ExtractTransformLoad):
census_tract_df, on=self.GEOID_TRACT_FIELD_NAME
)
# If GEOID10s are read as numbers instead of strings, the initial 0 is dropped,
# If GEOID10s are read as numbers instead of strings, the initial 0 is dropped,
# and then we get too many CBG rows (one for 012345 and one for 12345).
if len(census_block_group_df) > 220333:
raise ValueError("Too many rows in the join.")
if len(census_block_group_df) > self.EXPECTED_MAX_CENSUS_BLOCK_GROUPS:
raise ValueError(
f"Too many rows in the join: {len(census_block_group_df)}"
)
# Calculate median income variables.
# First, calculate the income of the block group as a fraction of the state income.
# TODO: handle null values for CBG median income, which are `-666666666`.
df[self.MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD_NAME] = (
df[self.MEDIAN_INCOME_FIELD_NAME]
/ df[self.STATE_MEDIAN_INCOME_FIELD_NAME]
)
# TODO: Calculate the income of the block group as a fraction of the AMI (either state or metropolitan, depending on reference).
# TODO Refactor to no longer use the data_sets list and do all renaming in ETL step
# Rename columns:
renaming_dict = {
@ -537,9 +600,9 @@ class ScoreETL(ExtractTransformLoad):
# calculate percentiles
for data_set in data_sets:
df[
f"{data_set.renamed_field}{self.PERCENTILE_FIELD_SUFFIX}"
] = df[data_set.renamed_field].rank(pct=True)
df[f"{data_set.renamed_field}{self.PERCENTILE_FIELD_SUFFIX}"] = df[
data_set.renamed_field
].rank(pct=True)
# Do some math:
# (
@ -567,14 +630,14 @@ class ScoreETL(ExtractTransformLoad):
df[f"{data_set.renamed_field}{self.MIN_MAX_FIELD_SUFFIX}"] = (
df[data_set.renamed_field] - min_value
) / (max_value - min_value)
return df
def transform(self) -> None:
## IMPORTANT: THIS METHOD IS CLOSE TO THE LIMIT OF STATEMENTS
logger.info("Transforming Score Data")
# get data sets list
data_sets = self.data_sets()
@ -600,9 +663,10 @@ class ScoreETL(ExtractTransformLoad):
# Calculate "Score F", which uses "either/or" thresholds.
self.df = self._add_score_f(self.df)
# Calculate "Score G", which uses AMI and poverty.
self.df = self._add_score_g(self.df)
def load(self) -> None:
logger.info("Saving Score CSV")
# write nationwide csv
self.SCORE_CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_csv(self.SCORE_CSV_PATH / "usa.csv", index=False)

View file

@ -57,11 +57,15 @@ class PostScoreETL(ExtractTransformLoad):
def _extract_score(self, score_path: Path) -> pd.DataFrame:
logger.info("Reading Score CSV")
return pd.read_csv(
score_path,
dtype={"GEOID10": "string", "Total population": "int64"},
df = pd.read_csv(score_path, dtype={"GEOID10": "string"})
# Convert total population to an int:
df["Total population"] = df["Total population"].astype(
int, errors="ignore"
)
return df
def _extract_national_cbg(self, national_cbg_path: Path) -> pd.DataFrame:
logger.info("Reading national CBG")
return pd.read_csv(
@ -91,18 +95,23 @@ class PostScoreETL(ExtractTransformLoad):
constants.DATA_CENSUS_CSV_FILE_PATH
)
def _transform_counties(self, initial_counties_df: pd.DataFrame) -> pd.DataFrame:
def _transform_counties(
self, initial_counties_df: pd.DataFrame
) -> pd.DataFrame:
"""
Necessary modifications to the counties dataframe
"""
# Rename some of the columns to prepare for merge
new_df = initial_counties_df[constants.CENSUS_COUNTIES_COLUMNS]
new_df.rename(
columns={"USPS": "State Abbreviation", "NAME": "County Name"}, inplace=True
columns={"USPS": "State Abbreviation", "NAME": "County Name"},
inplace=True,
)
return new_df
def _transform_states(self, initial_states_df: pd.DataFrame) -> pd.DataFrame:
def _transform_states(
self, initial_states_df: pd.DataFrame
) -> pd.DataFrame:
"""
Necessary modifications to the states dataframe
"""
@ -174,7 +183,9 @@ class PostScoreETL(ExtractTransformLoad):
def _create_tile_data(
self, score_county_state_merged_df: pd.DataFrame
) -> pd.DataFrame:
score_tiles = score_county_state_merged_df[constants.TILES_SCORE_COLUMNS]
score_tiles = score_county_state_merged_df[
constants.TILES_SCORE_COLUMNS
]
decimals = pd.Series(
[constants.TILES_ROUND_NUM_DECIMALS]
* len(constants.TILES_SCORE_FLOAT_COLUMNS),
@ -185,7 +196,9 @@ class PostScoreETL(ExtractTransformLoad):
def _create_downloadable_data(
self, score_county_state_merged_df: pd.DataFrame
) -> pd.DataFrame:
return score_county_state_merged_df[constants.DOWNLOADABLE_SCORE_COLUMNS]
return score_county_state_merged_df[
constants.DOWNLOADABLE_SCORE_COLUMNS
]
def transform(self) -> None:
logger.info("Transforming data sources for Score + County CSV")
@ -206,7 +219,9 @@ class PostScoreETL(ExtractTransformLoad):
self.output_downloadable_df = self._create_downloadable_data(
output_score_county_state_merged_df
)
self.output_score_county_state_merged_df = output_score_county_state_merged_df
self.output_score_county_state_merged_df = (
output_score_county_state_merged_df
)
def _load_score_csv(
self, score_county_state_merged: pd.DataFrame, score_csv_path: Path

View file

@ -83,7 +83,9 @@ def states_transformed_expected():
return pd.DataFrame.from_dict(
data={
"State Code": pd.Series(["01", "02", "04"], dtype="string"),
"State Name": pd.Series(["Alabama", "Alaska", "Arizona"], dtype="object"),
"State Name": pd.Series(
["Alabama", "Alaska", "Arizona"], dtype="object"
),
"State Abbreviation": pd.Series(["AL", "AK", "AZ"], dtype="string"),
},
)
@ -91,14 +93,18 @@ def states_transformed_expected():
@pytest.fixture()
def score_transformed_expected():
return pd.read_pickle(pytest.SNAPSHOT_DIR / "score_transformed_expected.pkl")
return pd.read_pickle(
pytest.SNAPSHOT_DIR / "score_transformed_expected.pkl"
)
@pytest.fixture()
def national_cbg_df():
return pd.DataFrame.from_dict(
data={
"GEOID10": pd.Series(["010010201001", "010010201002"], dtype="string"),
"GEOID10": pd.Series(
["010010201001", "010010201002"], dtype="string"
),
},
)
@ -115,4 +121,6 @@ def tile_data_expected():
@pytest.fixture()
def downloadable_data_expected():
return pd.read_pickle(pytest.SNAPSHOT_DIR / "downloadable_data_expected.pkl")
return pd.read_pickle(
pytest.SNAPSHOT_DIR / "downloadable_data_expected.pkl"
)

View file

@ -33,16 +33,22 @@ def test_extract_score(etl, score_data_initial):
# Transform Tests
def test_transform_counties(etl, county_data_initial, counties_transformed_expected):
def test_transform_counties(
etl, county_data_initial, counties_transformed_expected
):
extracted_counties = etl._extract_counties(county_data_initial)
counties_transformed_actual = etl._transform_counties(extracted_counties)
pdt.assert_frame_equal(counties_transformed_actual, counties_transformed_expected)
pdt.assert_frame_equal(
counties_transformed_actual, counties_transformed_expected
)
def test_transform_states(etl, state_data_initial, states_transformed_expected):
extracted_states = etl._extract_states(state_data_initial)
states_transformed_actual = etl._transform_states(extracted_states)
pdt.assert_frame_equal(states_transformed_actual, states_transformed_expected)
pdt.assert_frame_equal(
states_transformed_actual, states_transformed_expected
)
def test_transform_score(etl, score_data_initial, score_transformed_expected):
@ -82,8 +88,12 @@ def test_create_tile_data(etl, score_data_expected, tile_data_expected):
)
def test_create_downloadable_data(etl, score_data_expected, downloadable_data_expected):
output_downloadable_df_actual = etl._create_downloadable_data(score_data_expected)
def test_create_downloadable_data(
etl, score_data_expected, downloadable_data_expected
):
output_downloadable_df_actual = etl._create_downloadable_data(
score_data_expected
)
pdt.assert_frame_equal(
output_downloadable_df_actual,
downloadable_data_expected,
@ -101,7 +111,9 @@ def test_load_score_csv(etl, score_data_expected):
def test_load_tile_csv(etl, tile_data_expected):
reload(constants)
etl._load_score_csv(tile_data_expected, constants.DATA_SCORE_TILES_FILE_PATH)
etl._load_score_csv(
tile_data_expected, constants.DATA_SCORE_TILES_FILE_PATH
)
assert constants.DATA_SCORE_TILES_FILE_PATH.is_file()