Add ability to cache ETL data sources (#2169)

* Add a rough prototype allowing a developer to pre-download data sources for all ETLs

* Update code to be more production-ish

* Move fetch to Extract part of ETL
* Create a downloader to house all downloading operations
* Remove unnecessary "name" in data source

* Format source files with black

* Fix issues from pylint and get the tests working with the new folder structure

* Clean up files with black

* Fix unzip test

* Add caching notes to README

* Fix tests (linting and case sensitivity bug)

* Address PR comments and add API keys for census where missing

* Merging comparator changes from main into this branch for the sake of the PR

* Add note on using cache (-u) during pipeline
This commit is contained in:
Travis Newby 2023-03-03 12:26:24 -06:00 committed by GitHub
commit 6f39033dde
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
52 changed files with 1787 additions and 686 deletions

View file

@ -1,7 +1,9 @@
import pandas as pd
import requests
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.datasource import DataSource
from data_pipeline.etl.datasource import FileDataSource
from data_pipeline.utils import get_module_logger
@ -11,44 +13,51 @@ logger = get_module_logger(__name__)
class HudRecapETL(ExtractTransformLoad):
def __init__(self):
# fetch
if settings.DATASOURCE_RETRIEVAL_FROM_AWS:
self.HUD_RECAP_CSV_URL = (
self.hud_recap_csv_url = (
f"{settings.AWS_JUSTICE40_DATASOURCES_URL}/raw-data-sources/"
"hud_recap/Racially_or_Ethnically_Concentrated_Areas_of_Poverty__R_ECAPs_.csv"
)
else:
self.HUD_RECAP_CSV_URL = (
self.hud_recap_csv_url = (
"https://opendata.arcgis.com/api/v3/datasets/"
"56de4edea8264fe5a344da9811ef5d6e_0/downloads/data?format=csv&spatialRefId=4326"
)
self.HUD_RECAP_CSV = (
self.get_tmp_path()
# input
self.hud_recap_source = (
self.get_sources_path()
/ "Racially_or_Ethnically_Concentrated_Areas_of_Poverty__R_ECAPs_.csv"
)
# output
self.CSV_PATH = self.DATA_PATH / "dataset" / "hud_recap"
# Definining some variable names
# Defining some variable names
self.HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME = (
"hud_recap_priority_community"
)
self.df: pd.DataFrame
def extract(self) -> None:
download = requests.get(
self.HUD_RECAP_CSV_URL,
verify=None,
timeout=settings.REQUESTS_DEFAULT_TIMOUT,
)
file_contents = download.content
csv_file = open(self.HUD_RECAP_CSV, "wb")
csv_file.write(file_contents)
csv_file.close()
def get_data_sources(self) -> [DataSource]:
return [
FileDataSource(
source=self.hud_recap_csv_url, destination=self.hud_recap_source
)
]
def extract(self, use_cached_data_sources: bool = False) -> None:
super().extract(
use_cached_data_sources
) # download and extract data sources
# Load comparison index (CalEnviroScreen 4)
self.df = pd.read_csv(self.hud_recap_source, dtype={"GEOID": "string"})
def transform(self) -> None:
# Load comparison index (CalEnviroScreen 4)
self.df = pd.read_csv(self.HUD_RECAP_CSV, dtype={"GEOID": "string"})
self.df.rename(
columns={