Add ability to cache ETL data sources (#2169)

* Add a rough prototype allowing a developer to pre-download data sources for all ETLs

* Update code to be more production-ish

* Move fetch to Extract part of ETL
* Create a downloader to house all downloading operations
* Remove unnecessary "name" in data source

* Format source files with black

* Fix issues from pylint and get the tests working with the new folder structure

* Clean up files with black

* Fix unzip test

* Add caching notes to README

* Fix tests (linting and case sensitivity bug)

* Address PR comments and add API keys for census where missing

* Merging comparator changes from main into this branch for the sake of the PR

* Add note on using cache (-u) during pipeline
This commit is contained in:
Travis Newby 2023-03-03 12:26:24 -06:00 committed by GitHub
commit 6f39033dde
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
52 changed files with 1787 additions and 686 deletions

View file

@ -3,12 +3,16 @@ from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
from data_pipeline.etl.datasource import DataSource
from data_pipeline.etl.datasource import ZIPDataSource
logger = get_module_logger(__name__)
class HistoricRedliningETL(ExtractTransformLoad):
NAME = "historic_redlining"
GEO_LEVEL: ValidGeoLevel = ValidGeoLevel.CENSUS_TRACT
EXPECTED_MISSING_STATES = [
"10",
@ -25,14 +29,14 @@ class HistoricRedliningETL(ExtractTransformLoad):
]
PUERTO_RICO_EXPECTED_IN_DATA = False
ALASKA_AND_HAWAII_EXPECTED_IN_DATA: bool = False
SOURCE_URL = settings.AWS_JUSTICE40_DATASOURCES_URL + "/HRS_2010.zip"
def __init__(self):
self.CSV_PATH = self.DATA_PATH / "dataset" / "historic_redlining"
self.HISTORIC_REDLINING_FILE_PATH = (
self.get_tmp_path() / "HRS_2010.xlsx"
)
# fetch
self.hrs_url = settings.AWS_JUSTICE40_DATASOURCES_URL + "/HRS_2010.zip"
# input
self.hrs_source = self.get_sources_path() / "HRS_2010.xlsx"
self.REDLINING_SCALAR = "Tract-level redlining score"
@ -40,30 +44,47 @@ class HistoricRedliningETL(ExtractTransformLoad):
self.GEOID_TRACT_FIELD_NAME,
self.REDLINING_SCALAR,
]
self.df: pd.DataFrame
self.historic_redlining_data: pd.DataFrame
def get_data_sources(self) -> [DataSource]:
return [
ZIPDataSource(
source=self.hrs_url, destination=self.get_sources_path()
)
]
def extract(self, use_cached_data_sources: bool = False) -> None:
super().extract(
use_cached_data_sources
) # download and extract data sources
self.historic_redlining_data = pd.read_excel(self.hrs_source)
def transform(self) -> None:
# this is obviously temporary
historic_redlining_data = pd.read_excel(
self.HISTORIC_REDLINING_FILE_PATH
self.historic_redlining_data[self.GEOID_TRACT_FIELD_NAME] = (
self.historic_redlining_data["GEOID10"].astype(str).str.zfill(11)
)
historic_redlining_data[self.GEOID_TRACT_FIELD_NAME] = (
historic_redlining_data["GEOID10"].astype(str).str.zfill(11)
)
historic_redlining_data = historic_redlining_data.rename(
self.historic_redlining_data = self.historic_redlining_data.rename(
columns={"HRS2010": self.REDLINING_SCALAR}
)
logger.debug(f"{historic_redlining_data.columns}")
logger.debug(f"{self.historic_redlining_data.columns}")
# Calculate lots of different score thresholds for convenience
for threshold in [3.25, 3.5, 3.75]:
historic_redlining_data[
self.historic_redlining_data[
f"{self.REDLINING_SCALAR} meets or exceeds {round(threshold, 2)}"
] = (historic_redlining_data[self.REDLINING_SCALAR] >= threshold)
] = (
self.historic_redlining_data[self.REDLINING_SCALAR] >= threshold
)
## NOTE We add to columns to keep here
self.COLUMNS_TO_KEEP.append(
f"{self.REDLINING_SCALAR} meets or exceeds {round(threshold, 2)}"
)
self.output_df = historic_redlining_data
self.output_df = self.historic_redlining_data