Refactor DOE Energy Burden and COI to use YAML (#1796)

* added tribalId for Supplemental dataset (#1804)

* Setting zoom levels for tribal map (#1810)

* NRI dataset and initial score YAML configuration (#1534)

* update be staging gha

* NRI dataset and initial score YAML configuration

* checkpoint

* adding data checks for release branch

* passing tests

* adding INPUT_EXTRACTED_FILE_NAME to base class

* lint

* columns to keep and tests

* update be staging gha

* checkpoint

* update be staging gha

* NRI dataset and initial score YAML configuration

* checkpoint

* adding data checks for release branch

* passing tests

* adding INPUT_EXTRACTED_FILE_NAME to base class

* lint

* columns to keep and tests

* checkpoint

* PR Review

* renoving source url

* tests

* stop execution of ETL if there's a YAML schema issue

* update be staging gha

* adding source url as class var again

* clean up

* force cache bust

* gha cache bust

* dynamically set score vars from YAML

* docsctrings

* removing last updated year - optional reverse percentile

* passing tests

* sort order

* column ordening

* PR review

* class level vars

* Updating DatasetsConfig

* fix pylint errors

* moving metadata hint back to code

Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>

* Correct copy typo (#1809)

* Add basic test suite for COI (#1518)

* Update COI to use new yaml (#1518)

* Add tests for DOE energy budren (1518

* Add dataset config for energy budren (1518)

* Refactor ETL to use datasets.yml (#1518)

* Add fake GEOIDs to COI tests (#1518)

* Refactor _setup_etl_instance_and_run_extract to base (#1518)

For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...

* Add output-path tests (#1518)

* Update YAML to match constant (#1518)

* Don't blindly set float format (#1518)

* Add defaults for extract (#1518)

* Run YAML load on all subclasses (#1518)

* Update description fields (#1518)

* Update YAML per final format (#1518)

* Update fixture tract IDs (#1518)

* Update base class refactor (#1518)

Now that NRI is final I needed to make a small number of updates to my
refactored code.

* Remove old comment (#1518)

* Fix type signature and return (#1518)

* Update per code review (#1518)

Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
This commit is contained in:
Matt Bowen 2022-08-10 16:02:59 -04:00 committed by GitHub
commit 9635ef5ee2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
44 changed files with 698 additions and 3640 deletions

View file

@ -1,12 +1,15 @@
import enum
import pathlib
import sys
import typing
from typing import Optional
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.score.schemas.datasets import DatasetsConfig
from data_pipeline.utils import (
load_yaml_dict_from_file,
unzip_file_from_url,
remove_all_from_dir,
get_module_logger,
@ -30,6 +33,9 @@ class ExtractTransformLoad:
Attributes:
DATA_PATH (pathlib.Path): Local path where all data will be stored
TMP_PATH (pathlib.Path): Local path where temporary data will be stored
TODO: Fill missing attrs here
GEOID_FIELD_NAME (str): The common column name for a Census Block Group identifier
GEOID_TRACT_FIELD_NAME (str): The common column name for a Census Tract identifier
"""
@ -40,6 +46,8 @@ class ExtractTransformLoad:
DATA_PATH: pathlib.Path = APP_ROOT / "data"
TMP_PATH: pathlib.Path = DATA_PATH / "tmp"
CONTENT_CONFIG: pathlib.Path = APP_ROOT / "content" / "config"
DATASET_CONFIG_PATH: pathlib.Path = APP_ROOT / "etl" / "score" / "config"
DATASET_CONFIG: Optional[dict] = None
# Parameters
GEOID_FIELD_NAME: str = "GEOID10"
@ -55,6 +63,9 @@ class ExtractTransformLoad:
# SOURCE_URL is used to extract source data in extract().
SOURCE_URL: str = None
# INPUT_EXTRACTED_FILE_NAME is the name of the file after extract().
INPUT_EXTRACTED_FILE_NAME: str = None
# GEO_LEVEL is used to identify whether output data is at the unit of the tract or
# census block group.
# TODO: add tests that enforce seeing the expected geographic identifier field
@ -64,6 +75,13 @@ class ExtractTransformLoad:
# COLUMNS_TO_KEEP is used to identify which columns to keep in the output df.
COLUMNS_TO_KEEP: typing.List[str] = None
# INPUT_GEOID_TRACT_FIELD_NAME is the field name that identifies the Census Tract ID
# on the input file
INPUT_GEOID_TRACT_FIELD_NAME: str = None
# NULL_REPRESENTATION is how nulls are represented on the input field
NULL_REPRESENTATION: str = None
# Thirteen digits in a census block group ID.
EXPECTED_CENSUS_BLOCK_GROUPS_CHARACTER_LENGTH: int = 13
# TODO: investigate. Census says there are only 217,740 CBGs in the US. This might
@ -77,8 +95,56 @@ class ExtractTransformLoad:
# periods. https://github.com/usds/justice40-tool/issues/964
EXPECTED_MAX_CENSUS_TRACTS: int = 74160
# We use output_df as the final dataframe to use to write to the CSV
# It is used on the "load" base class method
output_df: pd.DataFrame = None
def __init_subclass__(cls) -> None:
cls.DATASET_CONFIG = cls.yaml_config_load()
@classmethod
def yaml_config_load(cls) -> Optional[dict]:
"""Generate config dictionary and set instance variables from YAML dataset."""
if cls.NAME is not None:
# check if the class instance has score YAML definitions
datasets_config = load_yaml_dict_from_file(
cls.DATASET_CONFIG_PATH / "datasets.yml",
DatasetsConfig,
)
# get the config for this dataset
try:
dataset_config = next(
item
for item in datasets_config.get("datasets")
if item["module_name"] == cls.NAME
)
except StopIteration:
# Note: it'd be nice to log the name of the dataframe, but that's not accessible in this scope.
logger.error(
f"Exception encountered while extracting dataset config for dataset {cls.NAME}"
)
sys.exit()
# set some of the basic fields
cls.INPUT_GEOID_TRACT_FIELD_NAME = dataset_config[
"input_geoid_tract_field_name"
]
# get the columns to write on the CSV
# and set the constants
cls.COLUMNS_TO_KEEP = [
cls.GEOID_TRACT_FIELD_NAME, # always index with geoid tract id
]
for field in dataset_config["load_fields"]:
cls.COLUMNS_TO_KEEP.append(field["long_name"])
setattr(cls, field["df_field_name"], field["long_name"])
# set the constants for the class
setattr(cls, field["df_field_name"], field["long_name"])
return dataset_config
return None
# This is a classmethod so it can be used by `get_data_frame` without
# needing to create an instance of the class. This is a use case in `etl_score`.
@classmethod
@ -87,16 +153,10 @@ class ExtractTransformLoad:
if cls.NAME is None:
raise NotImplementedError(
f"Child ETL class needs to specify `cls.NAME` (currently "
f"{cls.NAME}) and `cls.LAST_UPDATED_YEAR` (currently "
f"{cls.LAST_UPDATED_YEAR})."
f"{cls.NAME})."
)
output_file_path = (
cls.DATA_PATH
/ "dataset"
/ f"{cls.NAME}_{cls.LAST_UPDATED_YEAR}"
/ "usa.csv"
)
output_file_path = cls.DATA_PATH / "dataset" / f"{cls.NAME}" / "usa.csv"
return output_file_path
def get_tmp_path(self) -> pathlib.Path:
@ -120,14 +180,18 @@ class ExtractTransformLoad:
to get the file from a source url, unzips it and stores it on an
extract_path."""
# this can be accessed via super().extract()
if source_url and extract_path:
unzip_file_from_url(
file_url=source_url,
download_path=self.get_tmp_path(),
unzipped_file_path=extract_path,
verify=verify,
)
if source_url is None:
source_url = self.SOURCE_URL
if extract_path is None:
extract_path = self.get_tmp_path()
unzip_file_from_url(
file_url=source_url,
download_path=self.get_tmp_path(),
unzipped_file_path=extract_path,
verify=verify,
)
def transform(self) -> None:
"""Transform the data extracted into a format that can be consumed by the
@ -229,8 +293,7 @@ class ExtractTransformLoad:
Data is written in the specified local data folder or remote AWS S3 bucket.
Uses the directory from `self.OUTPUT_DIR` and the file name from
`self._get_output_file_path`.
Uses the directory and the file name from `self._get_output_file_path`.
"""
logger.info(f"Saving `{self.NAME}` CSV")

View file

@ -0,0 +1,131 @@
---
datasets:
- long_name: "FEMA National Risk Index"
short_name: "nri"
module_name: national_risk_index
input_geoid_tract_field_name: "TRACTFIPS"
load_fields:
- short_name: "ex_loss"
df_field_name: "RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_FIELD_NAME"
long_name: "FEMA Risk Index Expected Annual Loss Score"
field_type: float
number_of_decimals_in_output: 6
- short_name: "ex_pop_loss"
df_field_name: "EXPECTED_POPULATION_LOSS_RATE_FIELD_NAME"
long_name: "Expected population loss rate (Natural Hazards Risk Index)"
description_short:
"Rate of fatalities and injuries resulting from natural hazards each year"
description_long:
"Rate relative to the population of fatalities and injuries due to fourteen
types of natural hazards each year that have some link to climate change:
avalanche, coastal flooding, cold wave, drought, hail, heat wave, hurricane,
ice storm, landslide, riverine flooding, strong wind, tornado, wildfire, and
winter weather. Population loss is defined as the Spatial Hazard Events and
Losses and National Centers for Environmental Informations (NCEI) reported
number of fatalities and injuries caused by the hazard occurrence. To combine
fatalities and injuries for the computation of population loss value, an
injury is counted as one-tenth (1/10) of a fatality. The NCEI Storm Events
Database classifies injuries and fatalities as direct or indirect. Both direct
and indirect injuries and fatalities are counted as population loss. This
total number of injuries and fatalities is then divided by the population in
the census tract to get a per-capita rate of population risk."
field_type: float
number_of_decimals_in_output: 6
include_in_tiles: true
include_in_downloadable_files: true
create_percentile: true
- short_name: "ex_ag_loss"
df_field_name: "EXPECTED_AGRICULTURE_LOSS_RATE_FIELD_NAME"
long_name: "Expected agricultural loss rate (Natural Hazards Risk Index)"
description_short:
"Economic loss rate to agricultural value resulting from natural hazards each
year"
description_long:
"Percent of agricultural value at risk from losses due to fourteen types of
natural hazards that have some link to climate change: avalanche, coastal
flooding, cold wave, drought, hail, heat wave, hurricane, ice storm,
landslide, riverine flooding, strong wind, tornado, wildfire, and winter
weather. Rate calculated by dividing the agricultural value at risk in a
census tract by the total agricultural value in that census tract."
field_type: float
number_of_decimals_in_output: 6
include_in_tiles: true
include_in_downloadable_files: true
create_percentile: true
- short_name: "ex_bldg_loss"
df_field_name: "EXPECTED_BUILDING_LOSS_RATE_FIELD_NAME"
long_name: "Expected building loss rate (Natural Hazards Risk Index)"
description_short:
"Economic loss rate to building value resulting from natural hazards each year"
description_long:
"Percent of building value at risk from losses due to fourteen types of
natural hazards that have some link to climate change: avalanche, coastal
flooding, cold wave, drought, hail, heat wave, hurricane, ice storm,
landslide, riverine flooding, strong wind, tornado, wildfire, and winter
weather. Rate calculated by dividing the building value at risk in a census
tract by the total building value in that census tract."
field_type: float
number_of_decimals_in_output: 6
include_in_tiles: true
include_in_downloadable_files: true
create_percentile: true
- short_name: "has_ag_val"
df_field_name: "CONTAINS_AGRIVALUE"
long_name: "Contains agricultural value"
field_type: bool
- long_name: "Child Opportunity Index 2.0 database"
short_name: "coi"
module_name: "child_opportunity_index"
input_geoid_tract_field_name: "geoid"
load_fields:
- short_name: "he_heat"
df_field_name: "EXTREME_HEAT_FIELD"
long_name: "Summer days above 90F"
field_type: float
include_in_downloadable_files: true
include_in_tiles: true
- short_name: "he_food"
long_name: "Percent low access to healthy food"
df_field_name: "HEALTHY_FOOD_FIELD"
field_type: float
include_in_downloadable_files: true
include_in_tiles: true
- short_name: "he_green"
long_name: "Percent impenetrable surface areas"
df_field_name: "IMPENETRABLE_SURFACES_FIELD"
field_type: float
include_in_downloadable_files: true
include_in_tiles: true
- short_name: "ed_reading"
df_field_name: "READING_FIELD"
long_name: "Third grade reading proficiency"
field_type: float
include_in_downloadable_files: true
include_in_tiles: true
- long_name: "Low-Income Energy Affordabililty Data"
short_name: "LEAD"
module_name: "doe_energy_burden"
input_geoid_tract_field_name: "FIP"
load_fields:
- short_name: "EBP_PFS"
df_field_name: "REVISED_ENERGY_BURDEN_FIELD_NAME"
long_name: "Energy burden"
field_type: float
include_in_downloadable_files: true
include_in_tiles: true
- long_name: "Example ETL"
short_name: "Example"
module_name: "example_dataset"
input_geoid_tract_field_name: "GEOID10_TRACT"
load_fields:
- short_name: "EXAMPLE_FIELD"
df_field_name: "Input Field 1"
long_name: "Example Field 1"
field_type: float
include_in_tiles: true
include_in_downloadable_files: true

View file

@ -442,6 +442,7 @@ class ScoreETL(ExtractTransformLoad):
# for instance, 3rd grade reading level : Low 3rd grade reading level.
# This low field will not exist yet, it is only calculated for the
# percentile.
# TODO: This will come from the YAML dataset config
ReversePercentile(
field_name=field_names.READING_FIELD,
low_field_name=field_names.LOW_READING_FIELD,

View file

@ -0,0 +1,83 @@
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional
class FieldType(Enum):
STRING = "string"
INT64 = "int64"
BOOL = "bool"
FLOAT = "float"
PERCENTAGE = "percentage"
@dataclass
class DatasetsConfig:
@dataclass
class Dataset:
"""A class that defines a dataset and its load variables.
Attributes:
long_name (str): A human readable title for the dataset.
short_name (str): used to compose the short variable names for tiles/arcgis. All short variable names will be prepended
with the short name of the data set it comes from, i.e. `nri__ex_loss`.
module_name (str): A string that matches both the Python module name for the dataset and the `NAME` property on the ETL class.
load_fields (LoadField): A list of type LoadField that will drive the score ETL and side effects (tiles, downloadables).
"""
@dataclass
class LoadField:
"""A class to define the fields to be saved on the dataset's output.
These fields will be then imported by the score generation ETL.
Attributes:
short_name (str): Used in conjunction with the dataset's `short_name` for files where short names are needed.
df_field_name (str): Name for the field in the etl class.
long_name (str): Column name for the dataset's output csv.
field_type (FieldType): An enum that dictates what type of field this is.
description_short (Optional str): Description used if the field appears in the side panel.
description_long (Optional str): Description used if the field appears in the Methodology page.
number_of_decimals_in_output (Optional int): Used to represent number of decimals in side effects, like Excel. Defaults to 2 decimals.
include_in_tiles (Optional bool): Include this field on the tile export. Defaults to False.
include_in_downloadable_files (Optional bool): Include this field on the CSV and Excel exports. Defaults to False.
create_percentile (Optional bool): Whether or not the backend processing should create a percentile field (ranked in ascending order)
from the values in this field. Defaults to False.
create_reverse_percentile (Optional bool): Whether or not the backend processing should create a "reverse percentile" field (ranked in
descending order) from the values in this field. Defaults to False.
include_in_comparison_tool_as_index (Optional bool): Whether or not to include this field in the comparison tool
as an index used as comparison (e.g., this field might be a state or national index that identifies priority communities).
The field itself must be a boolean for the comparison tool to work appropriately. Defaults to False.
include_in_comparison_tool_as_statistical_descriptor (Optional bool): Whether or not to include this field in the comparison tool as a
statistical descriptor of census tracts (e.g., this field might income levels, life expectancy, etc). This will be
used to generate reports that produce information such as, tracts identified by Index A but not Index B have higher
income levels but lower life expectancy. Defaults to False.
"""
short_name: str
df_field_name: str
long_name: str
field_type: FieldType = field(
metadata={"by_value": True}
) # This will be used on the `etl_score_post` for the
# data manipulation. The `by_value` metadata prop will load the field type's Enum value instead of the index, i.e. "string"
# and not STRING
description_short: Optional[str] = None
description_long: Optional[str] = None
number_of_decimals_in_output: Optional[int] = 2
include_in_tiles: Optional[bool] = False
include_in_downloadable_files: Optional[bool] = False
create_percentile: Optional[bool] = False
create_reverse_percentile: Optional[bool] = False
include_in_comparison_tool_as_index: Optional[bool] = False
include_in_comparison_tool_as_statistical_descriptor: Optional[
bool
] = False
long_name: str
short_name: str
module_name: str
input_geoid_tract_field_name: str
load_fields: List[LoadField]
datasets: List[Dataset]

View file

@ -1,9 +1,8 @@
from pathlib import Path
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
@ -21,15 +20,27 @@ class ChildOpportunityIndex(ExtractTransformLoad):
Full technical documents: https://www.diversitydatakids.org/sites/default/files/2020-02/ddk_coi2.0_technical_documentation_20200212.pdf.
Github repo: https://github.com/diversitydatakids/COI/
"""
# Metadata for the baseclass
NAME = "child_opportunity_index"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
# Define these for easy code completion
EXTREME_HEAT_FIELD: str
HEALTHY_FOOD_FIELD: str
IMPENETRABLE_SURFACES_FIELD: str
READING_FIELD: str
def __init__(self):
self.COI_FILE_URL = (
self.SOURCE_URL = (
"https://data.diversitydatakids.org/datastore/zip/f16fff12-b1e5-4f60-85d3-"
"3a0ededa30a0?format=csv"
)
# TODO: Decide about nixing this
self.TRACT_INPUT_COLUMN_NAME = self.INPUT_GEOID_TRACT_FIELD_NAME
self.OUTPUT_PATH: Path = (
self.DATA_PATH / "dataset" / "child_opportunity_index"
)
@ -40,31 +51,19 @@ class ChildOpportunityIndex(ExtractTransformLoad):
self.IMPENETRABLE_SURFACES_INPUT_FIELD = "HE_GREEN"
self.READING_INPUT_FIELD = "ED_READING"
# Constants for output
self.COLUMNS_TO_KEEP = [
self.GEOID_TRACT_FIELD_NAME,
field_names.EXTREME_HEAT_FIELD,
field_names.HEALTHY_FOOD_FIELD,
field_names.IMPENETRABLE_SURFACES_FIELD,
field_names.READING_FIELD,
]
self.raw_df: pd.DataFrame
self.output_df: pd.DataFrame
def extract(self) -> None:
logger.info("Starting 51MB data download.")
unzip_file_from_url(
file_url=self.COI_FILE_URL,
download_path=self.get_tmp_path(),
unzipped_file_path=self.get_tmp_path() / "child_opportunity_index",
super().extract(
source_url=self.SOURCE_URL,
extract_path=self.get_tmp_path(),
)
self.raw_df = pd.read_csv(
filepath_or_buffer=self.get_tmp_path()
/ "child_opportunity_index"
/ "raw.csv",
def transform(self) -> None:
logger.info("Starting transforms.")
raw_df = pd.read_csv(
filepath_or_buffer=self.get_tmp_path() / "raw.csv",
# The following need to remain as strings for all of their digits, not get
# converted to numbers.
dtype={
@ -73,16 +72,13 @@ class ChildOpportunityIndex(ExtractTransformLoad):
low_memory=False,
)
def transform(self) -> None:
logger.info("Starting transforms.")
output_df = self.raw_df.rename(
output_df = raw_df.rename(
columns={
self.TRACT_INPUT_COLUMN_NAME: self.GEOID_TRACT_FIELD_NAME,
self.EXTREME_HEAT_INPUT_FIELD: field_names.EXTREME_HEAT_FIELD,
self.HEALTHY_FOOD_INPUT_FIELD: field_names.HEALTHY_FOOD_FIELD,
self.IMPENETRABLE_SURFACES_INPUT_FIELD: field_names.IMPENETRABLE_SURFACES_FIELD,
self.READING_INPUT_FIELD: field_names.READING_FIELD,
self.EXTREME_HEAT_INPUT_FIELD: self.EXTREME_HEAT_FIELD,
self.HEALTHY_FOOD_INPUT_FIELD: self.HEALTHY_FOOD_FIELD,
self.IMPENETRABLE_SURFACES_INPUT_FIELD: self.IMPENETRABLE_SURFACES_FIELD,
self.READING_INPUT_FIELD: self.READING_FIELD,
}
)
@ -95,8 +91,8 @@ class ChildOpportunityIndex(ExtractTransformLoad):
# Convert percents from 0-100 to 0-1 to standardize with our other fields.
percent_fields_to_convert = [
field_names.HEALTHY_FOOD_FIELD,
field_names.IMPENETRABLE_SURFACES_FIELD,
self.HEALTHY_FOOD_FIELD,
self.IMPENETRABLE_SURFACES_FIELD,
]
for percent_field_to_convert in percent_fields_to_convert:
@ -105,11 +101,3 @@ class ChildOpportunityIndex(ExtractTransformLoad):
)
self.output_df = output_df
def load(self) -> None:
logger.info("Saving CSV")
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
self.output_df[self.COLUMNS_TO_KEEP].to_csv(
path_or_buf=self.OUTPUT_PATH / "usa.csv", index=False
)

View file

@ -2,63 +2,48 @@ from pathlib import Path
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class DOEEnergyBurden(ExtractTransformLoad):
def __init__(self):
self.DOE_FILE_URL = (
settings.AWS_JUSTICE40_DATASOURCES_URL
+ "/DOE_LEAD_AMI_TRACT_2018_ALL.csv.zip"
)
NAME = "doe_energy_burden"
SOURCE_URL: str = (
settings.AWS_JUSTICE40_DATASOURCES_URL
+ "/DOE_LEAD_AMI_TRACT_2018_ALL.csv.zip"
)
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
REVISED_ENERGY_BURDEN_FIELD_NAME: str
def __init__(self):
self.OUTPUT_PATH: Path = (
self.DATA_PATH / "dataset" / "doe_energy_burden"
)
self.TRACT_INPUT_COLUMN_NAME = "FIP"
self.INPUT_ENERGY_BURDEN_FIELD_NAME = "BURDEN"
self.REVISED_ENERGY_BURDEN_FIELD_NAME = "Energy burden"
# Constants for output
self.COLUMNS_TO_KEEP = [
self.GEOID_TRACT_FIELD_NAME,
self.REVISED_ENERGY_BURDEN_FIELD_NAME,
]
self.raw_df: pd.DataFrame
self.output_df: pd.DataFrame
def extract(self) -> None:
logger.info("Starting data download.")
unzip_file_from_url(
file_url=self.DOE_FILE_URL,
download_path=self.get_tmp_path(),
unzipped_file_path=self.get_tmp_path() / "doe_energy_burden",
)
self.raw_df = pd.read_csv(
def transform(self) -> None:
logger.info("Starting DOE Energy Burden transforms.")
raw_df: pd.DataFrame = pd.read_csv(
filepath_or_buffer=self.get_tmp_path()
/ "doe_energy_burden"
/ "DOE_LEAD_AMI_TRACT_2018_ALL.csv",
# The following need to remain as strings for all of their digits, not get converted to numbers.
dtype={
self.TRACT_INPUT_COLUMN_NAME: "string",
self.INPUT_GEOID_TRACT_FIELD_NAME: "string",
},
low_memory=False,
)
def transform(self) -> None:
logger.info("Starting transforms.")
output_df = self.raw_df.rename(
logger.info("Renaming columns and ensuring output format is correct")
output_df = raw_df.rename(
columns={
self.INPUT_ENERGY_BURDEN_FIELD_NAME: self.REVISED_ENERGY_BURDEN_FIELD_NAME,
self.TRACT_INPUT_COLUMN_NAME: self.GEOID_TRACT_FIELD_NAME,
self.INPUT_GEOID_TRACT_FIELD_NAME: self.GEOID_TRACT_FIELD_NAME,
}
)
@ -75,7 +60,4 @@ class DOEEnergyBurden(ExtractTransformLoad):
def load(self) -> None:
logger.info("Saving DOE Energy Burden CSV")
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
self.output_df[self.COLUMNS_TO_KEEP].to_csv(
path_or_buf=self.OUTPUT_PATH / "usa.csv", index=False
)
super().load()

View file

@ -15,10 +15,16 @@ class NationalRiskIndexETL(ExtractTransformLoad):
"""ETL class for the FEMA National Risk Index dataset"""
NAME = "national_risk_index"
LAST_UPDATED_YEAR = 2020
SOURCE_URL = "https://hazards.fema.gov/nri/Content/StaticDocuments/DataDownload//NRI_Table_CensusTracts/NRI_Table_CensusTracts.zip"
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
# Output score variables (values set on datasets.yml) for linting purposes
RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_FIELD_NAME: str
EXPECTED_BUILDING_LOSS_RATE_FIELD_NAME: str
EXPECTED_AGRICULTURE_LOSS_RATE_FIELD_NAME: str
EXPECTED_POPULATION_LOSS_RATE_FIELD_NAME: str
CONTAINS_AGRIVALUE: str
## TEMPORARILY HERE
## To get this value up in time for launch, we've hard coded it. We would like
## to, in the future, have this pull the 10th percentile (or nth percentile)
@ -27,54 +33,31 @@ class NationalRiskIndexETL(ExtractTransformLoad):
AGRIVALUE_LOWER_BOUND = 408000
def __init__(self):
# define the full path for the input CSV file
self.INPUT_CSV = self.get_tmp_path() / "NRI_Table_CensusTracts.csv"
# this is the main dataframe
self.df: pd.DataFrame
# Start dataset-specific vars here
self.RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_INPUT_FIELD_NAME = (
"EAL_SCORE"
)
self.RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_FIELD_NAME = (
"FEMA Risk Index Expected Annual Loss Score"
)
self.EXPECTED_ANNUAL_LOSS_BUILDING_VALUE_INPUT_FIELD_NAME = "EAL_VALB"
self.EXPECTED_ANNUAL_LOSS_AGRICULTURAL_VALUE_INPUT_FIELD_NAME = (
"EAL_VALA"
)
self.EXPECTED_ANNUAL_LOSS_POPULATION_VALUE_INPUT_FIELD_NAME = "EAL_VALP"
self.AGRICULTURAL_VALUE_INPUT_FIELD_NAME = "AGRIVALUE"
self.POPULATION_INPUT_FIELD_NAME = "POPULATION"
self.BUILDING_VALUE_INPUT_FIELD_NAME = "BUILDVALUE"
self.EXPECTED_BUILDING_LOSS_RATE_FIELD_NAME = (
"Expected building loss rate (Natural Hazards Risk Index)"
)
self.EXPECTED_AGRICULTURE_LOSS_RATE_FIELD_NAME = (
"Expected agricultural loss rate (Natural Hazards Risk Index)"
)
self.EXPECTED_POPULATION_LOSS_RATE_FIELD_NAME = (
"Expected population loss rate (Natural Hazards Risk Index)"
)
self.CONTAINS_AGRIVALUE = "Contains agricultural value"
self.COLUMNS_TO_KEEP = [
self.GEOID_TRACT_FIELD_NAME,
self.RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_FIELD_NAME,
self.EXPECTED_POPULATION_LOSS_RATE_FIELD_NAME,
self.EXPECTED_AGRICULTURE_LOSS_RATE_FIELD_NAME,
self.EXPECTED_BUILDING_LOSS_RATE_FIELD_NAME,
self.CONTAINS_AGRIVALUE,
]
self.df: pd.DataFrame
def extract(self) -> None:
"""Unzips NRI dataset from the FEMA data source and writes the files
to the temporary data folder for use in the transform() method
"""
logger.info("Downloading 405MB National Risk Index Data")
super().extract(
source_url=self.SOURCE_URL,
extract_path=self.get_tmp_path(),
@ -90,19 +73,18 @@ class NationalRiskIndexETL(ExtractTransformLoad):
"""
logger.info("Transforming National Risk Index Data")
NRI_TRACT_COL = "TRACTFIPS" # Census Tract Column in NRI data
# read in the unzipped csv from NRI data source then rename the
# Census Tract column for merging
df_nri: pd.DataFrame = pd.read_csv(
self.INPUT_CSV,
dtype={NRI_TRACT_COL: "string"},
dtype={self.INPUT_GEOID_TRACT_FIELD_NAME: "string"},
na_values=["None"],
low_memory=False,
)
df_nri.rename(
columns={
NRI_TRACT_COL: self.GEOID_TRACT_FIELD_NAME,
self.INPUT_GEOID_TRACT_FIELD_NAME: self.GEOID_TRACT_FIELD_NAME,
self.RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_INPUT_FIELD_NAME: self.RISK_INDEX_EXPECTED_ANNUAL_LOSS_SCORE_FIELD_NAME,
},
inplace=True,
@ -170,6 +152,7 @@ class NationalRiskIndexETL(ExtractTransformLoad):
].clip(
lower=self.AGRIVALUE_LOWER_BOUND
)
# This produces a boolean that is True in the case of non-zero agricultural value
df_nri[self.CONTAINS_AGRIVALUE] = (
df_nri[self.AGRICULTURAL_VALUE_INPUT_FIELD_NAME] > 0
@ -185,6 +168,7 @@ class NationalRiskIndexETL(ExtractTransformLoad):
# Note: `round` is smart enough to only apply to float columns.
df_nri = df_nri.round(10)
# Assign the final df to the class' output_df for the load method
self.output_df = df_nri
def load(self) -> None:

View file

@ -81,13 +81,13 @@ class TribalETL(ExtractTransformLoad):
bia_aian_supplemental_df = gpd.read_file(tribal_geojson_path)
bia_aian_supplemental_df.drop(
["OBJECTID", "GISAcres", "Source", "Shape_Length", "Shape_Area"],
["GISAcres", "Source", "Shape_Length", "Shape_Area"],
axis=1,
inplace=True,
)
bia_aian_supplemental_df.rename(
columns={"Land_Area_": "landAreaName"},
columns={"OBJECTID": "tribalId", "Land_Area_": "landAreaName"},
inplace=True,
)