mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-07-29 02:31:17 -07:00
Add ability to cache ETL data sources (#2169)
* Add a rough prototype allowing a developer to pre-download data sources for all ETLs * Update code to be more production-ish * Move fetch to Extract part of ETL * Create a downloader to house all downloading operations * Remove unnecessary "name" in data source * Format source files with black * Fix issues from pylint and get the tests working with the new folder structure * Clean up files with black * Fix unzip test * Add caching notes to README * Fix tests (linting and case sensitivity bug) * Address PR comments and add API keys for census where missing * Merging comparator changes from main into this branch for the sake of the PR * Add note on using cache (-u) during pipeline
This commit is contained in:
parent
4d9c1dd11e
commit
6f39033dde
52 changed files with 1787 additions and 686 deletions
|
@ -5,6 +5,8 @@ from data_pipeline.config import settings
|
|||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.etl.datasource import DataSource
|
||||
from data_pipeline.etl.datasource import ZIPDataSource
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
@ -15,7 +17,7 @@ class FloodRiskETL(ExtractTransformLoad):
|
|||
NAME = "fsf_flood_risk"
|
||||
# These data were emailed to the J40 team while first street got
|
||||
# their official data sharing channels setup.
|
||||
SOURCE_URL = settings.AWS_JUSTICE40_DATASOURCES_URL + "/fsf_flood.zip"
|
||||
|
||||
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
|
||||
LOAD_YAML_CONFIG: bool = True
|
||||
|
||||
|
@ -27,13 +29,16 @@ class FloodRiskETL(ExtractTransformLoad):
|
|||
SHARE_OF_PROPERTIES_AT_RISK_FROM_FLOODING_IN_30_YEARS: str
|
||||
|
||||
def __init__(self):
|
||||
# define the full path for the input CSV file
|
||||
self.INPUT_CSV = (
|
||||
self.get_tmp_path() / "fsf_flood" / "flood-tract2010.csv"
|
||||
|
||||
# fetch
|
||||
self.flood_tract_url = (
|
||||
settings.AWS_JUSTICE40_DATASOURCES_URL + "/fsf_flood.zip"
|
||||
)
|
||||
|
||||
# this is the main dataframe
|
||||
self.df: pd.DataFrame
|
||||
# input
|
||||
self.flood_tract_source = (
|
||||
self.get_sources_path() / "fsf_flood" / "flood-tract2010.csv"
|
||||
)
|
||||
|
||||
# Start dataset-specific vars here
|
||||
self.COUNT_PROPERTIES_NATIVE_FIELD_NAME = "count_properties"
|
||||
|
@ -41,6 +46,29 @@ class FloodRiskETL(ExtractTransformLoad):
|
|||
self.COUNT_PROPERTIES_AT_RISK_30_YEARS = "mid_depth_100_year30"
|
||||
self.CLIP_PROPERTIES_COUNT = 250
|
||||
|
||||
self.df_fsf_flood: pd.DataFrame
|
||||
|
||||
def get_data_sources(self) -> [DataSource]:
|
||||
return [
|
||||
ZIPDataSource(
|
||||
source=self.flood_tract_url, destination=self.get_sources_path()
|
||||
)
|
||||
]
|
||||
|
||||
def extract(self, use_cached_data_sources: bool = False) -> None:
|
||||
|
||||
super().extract(
|
||||
use_cached_data_sources
|
||||
) # download and extract data sources
|
||||
|
||||
# read in the unzipped csv data source then rename the
|
||||
# Census Tract column for merging
|
||||
self.df_fsf_flood = pd.read_csv(
|
||||
self.flood_tract_source,
|
||||
dtype={self.INPUT_GEOID_TRACT_FIELD_NAME: str},
|
||||
low_memory=False,
|
||||
)
|
||||
|
||||
def transform(self) -> None:
|
||||
"""Reads the unzipped data file into memory and applies the following
|
||||
transformations to prepare it for the load() method:
|
||||
|
@ -49,35 +77,29 @@ class FloodRiskETL(ExtractTransformLoad):
|
|||
- Calculates share of properties at risk, left-clipping number of properties at 250
|
||||
"""
|
||||
|
||||
# read in the unzipped csv data source then rename the
|
||||
# Census Tract column for merging
|
||||
df_fsf_flood: pd.DataFrame = pd.read_csv(
|
||||
self.INPUT_CSV,
|
||||
dtype={self.INPUT_GEOID_TRACT_FIELD_NAME: str},
|
||||
low_memory=False,
|
||||
)
|
||||
|
||||
df_fsf_flood[self.GEOID_TRACT_FIELD_NAME] = df_fsf_flood[
|
||||
self.df_fsf_flood[self.GEOID_TRACT_FIELD_NAME] = self.df_fsf_flood[
|
||||
self.INPUT_GEOID_TRACT_FIELD_NAME
|
||||
].str.zfill(11)
|
||||
|
||||
df_fsf_flood[self.COUNT_PROPERTIES] = df_fsf_flood[
|
||||
self.df_fsf_flood[self.COUNT_PROPERTIES] = self.df_fsf_flood[
|
||||
self.COUNT_PROPERTIES_NATIVE_FIELD_NAME
|
||||
].clip(lower=self.CLIP_PROPERTIES_COUNT)
|
||||
|
||||
df_fsf_flood[self.SHARE_OF_PROPERTIES_AT_RISK_FROM_FLOODING_TODAY] = (
|
||||
df_fsf_flood[self.COUNT_PROPERTIES_AT_RISK_TODAY]
|
||||
/ df_fsf_flood[self.COUNT_PROPERTIES]
|
||||
self.df_fsf_flood[
|
||||
self.SHARE_OF_PROPERTIES_AT_RISK_FROM_FLOODING_TODAY
|
||||
] = (
|
||||
self.df_fsf_flood[self.COUNT_PROPERTIES_AT_RISK_TODAY]
|
||||
/ self.df_fsf_flood[self.COUNT_PROPERTIES]
|
||||
)
|
||||
df_fsf_flood[
|
||||
self.df_fsf_flood[
|
||||
self.SHARE_OF_PROPERTIES_AT_RISK_FROM_FLOODING_IN_30_YEARS
|
||||
] = (
|
||||
df_fsf_flood[self.COUNT_PROPERTIES_AT_RISK_30_YEARS]
|
||||
/ df_fsf_flood[self.COUNT_PROPERTIES]
|
||||
self.df_fsf_flood[self.COUNT_PROPERTIES_AT_RISK_30_YEARS]
|
||||
/ self.df_fsf_flood[self.COUNT_PROPERTIES]
|
||||
)
|
||||
|
||||
# Assign the final df to the class' output_df for the load method with rename
|
||||
self.output_df = df_fsf_flood.rename(
|
||||
self.output_df = self.df_fsf_flood.rename(
|
||||
columns={
|
||||
self.COUNT_PROPERTIES_AT_RISK_TODAY: self.PROPERTIES_AT_RISK_FROM_FLOODING_TODAY,
|
||||
self.COUNT_PROPERTIES_AT_RISK_30_YEARS: self.PROPERTIES_AT_RISK_FROM_FLOODING_IN_30_YEARS,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue