mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-08-03 17:24:19 -07:00
Add ability to cache ETL data sources (#2169)
* Add a rough prototype allowing a developer to pre-download data sources for all ETLs * Update code to be more production-ish * Move fetch to Extract part of ETL * Create a downloader to house all downloading operations * Remove unnecessary "name" in data source * Format source files with black * Fix issues from pylint and get the tests working with the new folder structure * Clean up files with black * Fix unzip test * Add caching notes to README * Fix tests (linting and case sensitivity bug) * Address PR comments and add API keys for census where missing * Merging comparator changes from main into this branch for the sake of the PR * Add note on using cache (-u) during pipeline
This commit is contained in:
parent
4d9c1dd11e
commit
6f39033dde
52 changed files with 1787 additions and 686 deletions
|
@ -3,6 +3,8 @@
|
|||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.datasource import DataSource
|
||||
from data_pipeline.etl.datasource import ZIPDataSource
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.config import settings
|
||||
|
@ -15,14 +17,6 @@ class TravelCompositeETL(ExtractTransformLoad):
|
|||
|
||||
NAME = "travel_composite"
|
||||
|
||||
if settings.DATASOURCE_RETRIEVAL_FROM_AWS:
|
||||
SOURCE_URL = (
|
||||
f"{settings.AWS_JUSTICE40_DATASOURCES_URL}/raw-data-sources/"
|
||||
"dot_travel_composite/Shapefile_and_Metadata.zip"
|
||||
)
|
||||
else:
|
||||
SOURCE_URL = "https://www.transportation.gov/sites/dot.gov/files/Shapefile_and_Metadata.zip"
|
||||
|
||||
GEO_LEVEL = ValidGeoLevel.CENSUS_TRACT
|
||||
PUERTO_RICO_EXPECTED_IN_DATA = False
|
||||
LOAD_YAML_CONFIG: bool = True
|
||||
|
@ -31,14 +25,29 @@ class TravelCompositeETL(ExtractTransformLoad):
|
|||
TRAVEL_BURDEN_FIELD_NAME: str
|
||||
|
||||
def __init__(self):
|
||||
|
||||
# fetch
|
||||
if settings.DATASOURCE_RETRIEVAL_FROM_AWS:
|
||||
self.travel_composite_url = (
|
||||
f"{settings.AWS_JUSTICE40_DATASOURCES_URL}/raw-data-sources/"
|
||||
"dot_travel_composite/Shapefile_and_Metadata.zip"
|
||||
)
|
||||
else:
|
||||
self.travel_composite_url = "https://www.transportation.gov/sites/dot.gov/files/Shapefile_and_Metadata.zip"
|
||||
|
||||
# input
|
||||
# define the full path for the input CSV file
|
||||
self.INPUT_SHP = (
|
||||
self.get_tmp_path() / "DOT_Disadvantage_Layer_Final_April2022.shp"
|
||||
self.disadvantage_layer_shape_source = (
|
||||
self.get_sources_path()
|
||||
/ "DOT_Disadvantage_Layer_Final_April2022.shp"
|
||||
)
|
||||
|
||||
# output
|
||||
# this is the main dataframe
|
||||
self.df: pd.DataFrame
|
||||
|
||||
self.df_dot: pd.DataFrame
|
||||
|
||||
# Start dataset-specific vars here
|
||||
## Average of Transportation Indicator Percentiles (calculated)
|
||||
## Calculated: Average of (EPL_TCB+EPL_NWKI+EPL_NOVEH+EPL_COMMUTE) excluding NULLS
|
||||
|
@ -46,6 +55,22 @@ class TravelCompositeETL(ExtractTransformLoad):
|
|||
self.INPUT_TRAVEL_DISADVANTAGE_FIELD_NAME = "Transp_TH"
|
||||
self.INPUT_GEOID_TRACT_FIELD_NAME = "FIPS"
|
||||
|
||||
def get_data_sources(self) -> [DataSource]:
|
||||
return [
|
||||
ZIPDataSource(
|
||||
source=self.travel_composite_url,
|
||||
destination=self.get_sources_path(),
|
||||
)
|
||||
]
|
||||
|
||||
def extract(self, use_cached_data_sources: bool = False) -> None:
|
||||
|
||||
super().extract(
|
||||
use_cached_data_sources
|
||||
) # download and extract data sources
|
||||
|
||||
self.df_dot = gpd.read_file(self.disadvantage_layer_shape_source)
|
||||
|
||||
def transform(self) -> None:
|
||||
"""Reads the unzipped data file into memory and applies the following
|
||||
transformations to prepare it for the load() method:
|
||||
|
@ -54,15 +79,15 @@ class TravelCompositeETL(ExtractTransformLoad):
|
|||
- Converts to CSV
|
||||
"""
|
||||
|
||||
# read in the unzipped shapefile from data source
|
||||
# reformat it to be standard df, remove unassigned rows, and
|
||||
# then rename the Census Tract column for merging
|
||||
df_dot: pd.DataFrame = gpd.read_file(self.INPUT_SHP)
|
||||
df_dot = df_dot.rename(
|
||||
|
||||
self.df_dot = self.df_dot.rename(
|
||||
columns={
|
||||
self.INPUT_GEOID_TRACT_FIELD_NAME: self.GEOID_TRACT_FIELD_NAME,
|
||||
self.INPUT_TRAVEL_DISADVANTAGE_FIELD_NAME: self.TRAVEL_BURDEN_FIELD_NAME,
|
||||
}
|
||||
).dropna(subset=[self.GEOID_TRACT_FIELD_NAME])
|
||||
|
||||
# Assign the final df to the class' output_df for the load method
|
||||
self.output_df = df_dot
|
||||
self.output_df = self.df_dot
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue