Issue 308 python linting (#443)

* Adds flake8, pylint, liccheck, flake8 to dependencies for data-pipeline

* Sets up and runs black autoformatting

* Adds flake8 to tox linting

* Fixes flake8 error F541 f string missing placeholders

* Fixes flake8 E501 line too long

* Fixes flake8 F401 imported but not used

* Adds pylint to tox and disables the following pylint errors:
- C0114: module docstrings
- R0201: method could have been a function
- R0903: too few public methods
- C0103: name case styling
- W0511: fix me
- W1203: f-string interpolation in logging

* Adds utils.py to tox.ini linting, runs black on utils.py

* Fixes import related pylint errors: C0411 and C0412

* Fixes or ignores remaining pylint errors (for discussion later)

* Adds safety and liccheck to tox.ini
This commit is contained in:
Billy Daly 2021-08-02 12:16:38 -04:00 committed by GitHub
commit 5504528fdf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 709 additions and 228 deletions

View file

@ -1,11 +1,10 @@
from pathlib import Path
import pathlib
from config import settings
from utils import unzip_file_from_url, remove_all_from_dir
class ExtractTransformLoad(object):
class ExtractTransformLoad:
"""
A class used to instantiate an ETL object to retrieve and process data from
datasets.
@ -34,9 +33,7 @@ class ExtractTransformLoad(object):
pass
def extract(
self, source_url: str = None, extract_path: Path = None
) -> None:
def extract(self, source_url: str = None, extract_path: Path = None) -> None:
"""Extract the data from
a remote source. By default it provides code to get the file from a source url,
unzips it and stores it on an extract_path."""

View file

@ -67,9 +67,7 @@ def etl_runner(dataset_to_run: str = None) -> None:
# Run the ETLs for the dataset_list
for dataset in dataset_list:
etl_module = importlib.import_module(
f"etl.sources.{dataset['module_dir']}.etl"
)
etl_module = importlib.import_module(f"etl.sources.{dataset['module_dir']}.etl")
etl_class = getattr(etl_module, dataset["class_name"])
etl_instance = etl_class()

View file

@ -4,7 +4,6 @@ import pandas as pd
from etl.base import ExtractTransformLoad
from utils import get_module_logger
from etl.sources.census.etl_utils import get_state_fips_codes
logger = get_module_logger(__name__)
@ -28,10 +27,10 @@ class ScoreETL(ExtractTransformLoad):
self.UNEMPLOYED_FIELD_NAME = "Unemployed civilians (percent)"
self.LINGUISTIC_ISOLATION_FIELD_NAME = "Linguistic isolation (percent)"
self.HOUSING_BURDEN_FIELD_NAME = "Housing burden (percent)"
self.POVERTY_FIELD_NAME = (
"Poverty (Less than 200% of federal poverty line)"
self.POVERTY_FIELD_NAME = "Poverty (Less than 200% of federal poverty line)"
self.HIGH_SCHOOL_FIELD_NAME = (
"Percent individuals age 25 or over with less than high school degree"
)
self.HIGH_SCHOOL_FIELD_NAME = "Percent individuals age 25 or over with less than high school degree"
# There's another aggregation level (a second level of "buckets").
self.AGGREGATION_POLLUTION = "Pollution Burden"
@ -55,9 +54,7 @@ class ScoreETL(ExtractTransformLoad):
self.ejscreen_df = pd.read_csv(
ejscreen_csv, dtype={"ID": "string"}, low_memory=False
)
self.ejscreen_df.rename(
columns={"ID": self.GEOID_FIELD_NAME}, inplace=True
)
self.ejscreen_df.rename(columns={"ID": self.GEOID_FIELD_NAME}, inplace=True)
# Load census data
census_csv = self.DATA_PATH / "dataset" / "census_acs_2019" / "usa.csv"
@ -69,10 +66,7 @@ class ScoreETL(ExtractTransformLoad):
# Load housing and transportation data
housing_and_transportation_index_csv = (
self.DATA_PATH
/ "dataset"
/ "housing_and_transportation_index"
/ "usa.csv"
self.DATA_PATH / "dataset" / "housing_and_transportation_index" / "usa.csv"
)
self.housing_and_transportation_df = pd.read_csv(
housing_and_transportation_index_csv,
@ -89,7 +83,7 @@ class ScoreETL(ExtractTransformLoad):
)
def transform(self) -> None:
logger.info(f"Transforming Score Data")
logger.info("Transforming Score Data")
# Join all the data sources that use census block groups
census_block_group_dfs = [
@ -106,10 +100,7 @@ class ScoreETL(ExtractTransformLoad):
)
# Sanity check the join.
if (
len(census_block_group_df[self.GEOID_FIELD_NAME].str.len().unique())
!= 1
):
if len(census_block_group_df[self.GEOID_FIELD_NAME].str.len().unique()) != 1:
raise ValueError(
f"One of the input CSVs uses {self.GEOID_FIELD_NAME} with a different length."
)
@ -119,9 +110,9 @@ class ScoreETL(ExtractTransformLoad):
census_tract_df = self.hud_housing_df
# Calculate the tract for the CBG data.
census_block_group_df[
self.GEOID_TRACT_FIELD_NAME
] = census_block_group_df[self.GEOID_FIELD_NAME].str[0:11]
census_block_group_df[self.GEOID_TRACT_FIELD_NAME] = census_block_group_df[
self.GEOID_FIELD_NAME
].str[0:11]
self.df = census_block_group_df.merge(
census_tract_df, on=self.GEOID_TRACT_FIELD_NAME
@ -254,8 +245,7 @@ class ScoreETL(ExtractTransformLoad):
# Rename columns:
renaming_dict = {
data_set.input_field: data_set.renamed_field
for data_set in data_sets
data_set.input_field: data_set.renamed_field for data_set in data_sets
}
self.df.rename(
@ -310,7 +300,7 @@ class ScoreETL(ExtractTransformLoad):
) / (max_value - min_value)
# Graph distributions and correlations.
min_max_fields = [
min_max_fields = [ # noqa: F841
f"{data_set.renamed_field}{self.MIN_MAX_FIELD_SUFFIX}"
for data_set in data_sets
if data_set.renamed_field != self.GEOID_FIELD_NAME
@ -324,9 +314,7 @@ class ScoreETL(ExtractTransformLoad):
]
].mean(axis=1)
self.df["Score B"] = (
self.df[
"Poverty (Less than 200% of federal poverty line) (percentile)"
]
self.df["Poverty (Less than 200% of federal poverty line) (percentile)"]
* self.df[
"Percent individuals age 25 or over with less than high school degree (percentile)"
]
@ -342,21 +330,26 @@ class ScoreETL(ExtractTransformLoad):
]
self.df[f"{bucket}"] = self.df[fields_in_bucket].mean(axis=1)
# Combine the score from the two Exposures and Environmental Effects buckets into a single score called "Pollution Burden". The math for this score is: (1.0 * Exposures Score + 0.5 * Environment Effects score) / 1.5.
# Combine the score from the two Exposures and Environmental Effects buckets
# into a single score called "Pollution Burden".
# The math for this score is:
# (1.0 * Exposures Score + 0.5 * Environment Effects score) / 1.5.
self.df[self.AGGREGATION_POLLUTION] = (
1.0 * self.df[f"{self.BUCKET_EXPOSURES}"]
+ 0.5 * self.df[f"{self.BUCKET_ENVIRONMENTAL}"]
) / 1.5
# Average the score from the two Sensitive populations and Socioeconomic factors buckets into a single score called "Population Characteristics".
# Average the score from the two Sensitive populations and
# Socioeconomic factors buckets into a single score called
# "Population Characteristics".
self.df[self.AGGREGATION_POPULATION] = self.df[
[f"{self.BUCKET_SENSITIVE}", f"{self.BUCKET_SOCIOECONOMIC}"]
].mean(axis=1)
# Multiply the "Pollution Burden" score and the "Population Characteristics" together to produce the cumulative impact score.
# Multiply the "Pollution Burden" score and the "Population Characteristics"
# together to produce the cumulative impact score.
self.df["Score C"] = (
self.df[self.AGGREGATION_POLLUTION]
* self.df[self.AGGREGATION_POPULATION]
self.df[self.AGGREGATION_POLLUTION] * self.df[self.AGGREGATION_POPULATION]
)
if len(census_block_group_df) > 220333:
@ -371,12 +364,10 @@ class ScoreETL(ExtractTransformLoad):
]
fields_min_max = [
f"{field}{self.MIN_MAX_FIELD_SUFFIX}"
for field in fields_to_use_in_score
f"{field}{self.MIN_MAX_FIELD_SUFFIX}" for field in fields_to_use_in_score
]
fields_percentile = [
f"{field}{self.PERCENTILE_FIELD_SUFFIX}"
for field in fields_to_use_in_score
f"{field}{self.PERCENTILE_FIELD_SUFFIX}" for field in fields_to_use_in_score
]
# Calculate "Score D", which uses min-max normalization
@ -396,17 +387,22 @@ class ScoreETL(ExtractTransformLoad):
"Score E",
"Poverty (Less than 200% of federal poverty line)",
]:
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"] = self.df[score_field].rank(pct=True)
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"] = self.df[
score_field
].rank(pct=True)
for threshold in [0.25, 0.3, 0.35, 0.4]:
fraction_converted_to_percent = int(100 * threshold)
self.df[f"{score_field} (top {fraction_converted_to_percent}th percentile)"] = (
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"] >= 1 - threshold
self.df[
f"{score_field} (top {fraction_converted_to_percent}th percentile)"
] = (
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"]
>= 1 - threshold
)
def load(self) -> None:
logger.info(f"Saving Score CSV")
logger.info("Saving Score CSV")
# write nationwide csv
self.SCORE_CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_csv(self.SCORE_CSV_PATH / f"usa.csv", index=False)
self.df.to_csv(self.SCORE_CSV_PATH / "usa.csv", index=False)

View file

@ -1,6 +1,7 @@
import math
import pandas as pd
import geopandas as gpd
import math
from etl.base import ExtractTransformLoad
from utils import get_module_logger
@ -21,9 +22,7 @@ class GeoScoreETL(ExtractTransformLoad):
self.SCORE_CSV_PATH = self.DATA_PATH / "score" / "csv"
self.TILE_SCORE_CSV = self.SCORE_CSV_PATH / "tiles" / "usa.csv"
self.CENSUS_USA_GEOJSON = (
self.DATA_PATH / "census" / "geojson" / "us.json"
)
self.CENSUS_USA_GEOJSON = self.DATA_PATH / "census" / "geojson" / "us.json"
self.TARGET_SCORE_NAME = "Score E (percentile)"
self.TARGET_SCORE_RENAME_TO = "E_SCORE"
@ -36,7 +35,7 @@ class GeoScoreETL(ExtractTransformLoad):
self.geojson_score_usa_low: gpd.GeoDataFrame
def extract(self) -> None:
logger.info(f"Reading US GeoJSON (~6 minutes)")
logger.info("Reading US GeoJSON (~6 minutes)")
self.geojson_usa_df = gpd.read_file(
self.CENSUS_USA_GEOJSON,
dtype={"GEOID10": "string"},
@ -45,7 +44,7 @@ class GeoScoreETL(ExtractTransformLoad):
)
self.geojson_usa_df.head()
logger.info(f"Reading score CSV")
logger.info("Reading score CSV")
self.score_usa_df = pd.read_csv(
self.TILE_SCORE_CSV,
dtype={"GEOID10": "string"},
@ -53,11 +52,11 @@ class GeoScoreETL(ExtractTransformLoad):
)
def transform(self) -> None:
logger.info(f"Pruning Census GeoJSON")
logger.info("Pruning Census GeoJSON")
fields = ["GEOID10", "geometry"]
self.geojson_usa_df = self.geojson_usa_df[fields]
logger.info(f"Merging and compressing score CSV with USA GeoJSON")
logger.info("Merging and compressing score CSV with USA GeoJSON")
self.geojson_score_usa_high = self.score_usa_df.merge(
self.geojson_usa_df, on="GEOID10", how="left"
)
@ -75,7 +74,7 @@ class GeoScoreETL(ExtractTransformLoad):
inplace=True,
)
logger.info(f"Aggregating into tracts (~5 minutes)")
logger.info("Aggregating into tracts (~5 minutes)")
usa_tracts = self._aggregate_to_tracts(usa_simplified)
usa_tracts = gpd.GeoDataFrame(
@ -84,17 +83,15 @@ class GeoScoreETL(ExtractTransformLoad):
crs="EPSG:4326",
)
logger.info(f"Creating buckets from tracts")
logger.info("Creating buckets from tracts")
usa_bucketed = self._create_buckets_from_tracts(
usa_tracts, self.NUMBER_OF_BUCKETS
)
logger.info(f"Aggregating buckets")
logger.info("Aggregating buckets")
usa_aggregated = self._aggregate_buckets(usa_bucketed, agg_func="mean")
compressed = self._breakup_multipolygons(
usa_aggregated, self.NUMBER_OF_BUCKETS
)
compressed = self._breakup_multipolygons(usa_aggregated, self.NUMBER_OF_BUCKETS)
self.geojson_score_usa_low = gpd.GeoDataFrame(
compressed,
@ -118,9 +115,7 @@ class GeoScoreETL(ExtractTransformLoad):
# assign tracts to buckets by D_SCORE
state_tracts.sort_values(self.TARGET_SCORE_RENAME_TO, inplace=True)
SCORE_bucket = []
bucket_size = math.ceil(
len(state_tracts.index) / self.NUMBER_OF_BUCKETS
)
bucket_size = math.ceil(len(state_tracts.index) / self.NUMBER_OF_BUCKETS)
for i in range(len(state_tracts.index)):
SCORE_bucket.extend([math.floor(i / bucket_size)])
state_tracts[f"{self.TARGET_SCORE_RENAME_TO}_bucket"] = SCORE_bucket
@ -155,14 +150,10 @@ class GeoScoreETL(ExtractTransformLoad):
return compressed
def load(self) -> None:
logger.info(f"Writing usa-high (~9 minutes)")
self.geojson_score_usa_high.to_file(
self.SCORE_HIGH_GEOJSON, driver="GeoJSON"
)
logger.info(f"Completed writing usa-high")
logger.info("Writing usa-high (~9 minutes)")
self.geojson_score_usa_high.to_file(self.SCORE_HIGH_GEOJSON, driver="GeoJSON")
logger.info("Completed writing usa-high")
logger.info(f"Writing usa-low (~9 minutes)")
self.geojson_score_usa_low.to_file(
self.SCORE_LOW_GEOJSON, driver="GeoJSON"
)
logger.info(f"Completed writing usa-low")
logger.info("Writing usa-low (~9 minutes)")
self.geojson_score_usa_low.to_file(self.SCORE_LOW_GEOJSON, driver="GeoJSON")
logger.info("Completed writing usa-low")

View file

@ -19,9 +19,7 @@ class PostScoreETL(ExtractTransformLoad):
self.CENSUS_USA_CSV = self.DATA_PATH / "census" / "csv" / "us.csv"
self.SCORE_CSV_PATH = self.DATA_PATH / "score" / "csv"
self.STATE_CSV = (
self.DATA_PATH / "census" / "csv" / "fips_states_2010.csv"
)
self.STATE_CSV = self.DATA_PATH / "census" / "csv" / "fips_states_2010.csv"
self.FULL_SCORE_CSV = self.SCORE_CSV_PATH / "full" / "usa.csv"
self.TILR_SCORE_CSV = self.SCORE_CSV_PATH / "tile" / "usa.csv"
@ -49,7 +47,7 @@ class PostScoreETL(ExtractTransformLoad):
self.TMP_PATH,
)
logger.info(f"Reading Counties CSV")
logger.info("Reading Counties CSV")
self.counties_df = pd.read_csv(
self.CENSUS_COUNTIES_TXT,
sep="\t",
@ -58,16 +56,14 @@ class PostScoreETL(ExtractTransformLoad):
encoding="latin-1",
)
logger.info(f"Reading States CSV")
logger.info("Reading States CSV")
self.states_df = pd.read_csv(
self.STATE_CSV, dtype={"fips": "string", "state_code": "string"}
)
self.score_df = pd.read_csv(
self.FULL_SCORE_CSV, dtype={"GEOID10": "string"}
)
self.score_df = pd.read_csv(self.FULL_SCORE_CSV, dtype={"GEOID10": "string"})
def transform(self) -> None:
logger.info(f"Transforming data sources for Score + County CSV")
logger.info("Transforming data sources for Score + County CSV")
# rename some of the columns to prepare for merge
self.counties_df = self.counties_df[["USPS", "GEOID", "NAME"]]
@ -101,7 +97,7 @@ class PostScoreETL(ExtractTransformLoad):
)
# check if there are census cbgs without score
logger.info(f"Removing CBG rows without score")
logger.info("Removing CBG rows without score")
## load cbgs
cbg_usa_df = pd.read_csv(
@ -121,19 +117,19 @@ class PostScoreETL(ExtractTransformLoad):
null_cbg_df = merged_df[merged_df["Score E (percentile)"].isnull()]
# subsctract data sets
removed_df = pd.concat(
[merged_df, null_cbg_df, null_cbg_df]
).drop_duplicates(keep=False)
removed_df = pd.concat([merged_df, null_cbg_df, null_cbg_df]).drop_duplicates(
keep=False
)
# set the score to the new df
self.score_county_state_merged = removed_df
def load(self) -> None:
logger.info(f"Saving Full Score CSV with County Information")
logger.info("Saving Full Score CSV with County Information")
self.SCORE_CSV_PATH.mkdir(parents=True, exist_ok=True)
self.score_county_state_merged.to_csv(self.FULL_SCORE_CSV, index=False)
logger.info(f"Saving Tile Score CSV")
logger.info("Saving Tile Score CSV")
# TODO: check which are the columns we'll use
# Related to: https://github.com/usds/justice40-tool/issues/302
score_tiles = self.score_county_state_merged[self.TILES_SCORE_COLUMNS]

View file

@ -9,16 +9,12 @@ logger = get_module_logger(__name__)
class CalEnviroScreenETL(ExtractTransformLoad):
def __init__(self):
self.CALENVIROSCREEN_FTP_URL = "https://justice40-data.s3.amazonaws.com/data-sources/CalEnviroScreen_4.0_2021.zip"
self.CALENVIROSCREEN_CSV = (
self.TMP_PATH / "CalEnviroScreen_4.0_2021.csv"
)
self.CALENVIROSCREEN_CSV = self.TMP_PATH / "CalEnviroScreen_4.0_2021.csv"
self.CSV_PATH = self.DATA_PATH / "dataset" / "calenviroscreen4"
# Definining some variable names
self.CALENVIROSCREEN_SCORE_FIELD_NAME = "calenviroscreen_score"
self.CALENVIROSCREEN_PERCENTILE_FIELD_NAME = (
"calenviroscreen_percentile"
)
self.CALENVIROSCREEN_PERCENTILE_FIELD_NAME = "calenviroscreen_percentile"
self.CALENVIROSCREEN_PRIORITY_COMMUNITY_FIELD_NAME = (
"calenviroscreen_priority_community"
)
@ -30,14 +26,14 @@ class CalEnviroScreenETL(ExtractTransformLoad):
self.df: pd.DataFrame
def extract(self) -> None:
logger.info(f"Downloading CalEnviroScreen Data")
logger.info("Downloading CalEnviroScreen Data")
super().extract(
self.CALENVIROSCREEN_FTP_URL,
self.TMP_PATH,
)
def transform(self) -> None:
logger.info(f"Transforming CalEnviroScreen Data")
logger.info("Transforming CalEnviroScreen Data")
# Data from https://calenviroscreen-oehha.hub.arcgis.com/#Data, specifically:
# https://oehha.ca.gov/media/downloads/calenviroscreen/document/calenviroscreen40resultsdatadictionaryd12021.zip
@ -67,7 +63,7 @@ class CalEnviroScreenETL(ExtractTransformLoad):
)
def load(self) -> None:
logger.info(f"Saving CalEnviroScreen CSV")
logger.info("Saving CalEnviroScreen CSV")
# write nationwide csv
self.CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_csv(self.CSV_PATH / f"data06.csv", index=False)
self.df.to_csv(self.CSV_PATH / "data06.csv", index=False)

View file

@ -1,11 +1,12 @@
import csv
import os
import csv
import json
from pathlib import Path
import geopandas as gpd
from .etl_utils import get_state_fips_codes
from utils import unzip_file_from_url, get_module_logger
from .etl_utils import get_state_fips_codes
logger = get_module_logger(__name__)
@ -29,9 +30,7 @@ def download_census_csvs(data_path: Path) -> None:
for fips in state_fips_codes:
# check if file exists
shp_file_path = (
data_path / "census" / "shp" / fips / f"tl_2010_{fips}_bg10.shp"
)
shp_file_path = data_path / "census" / "shp" / fips / f"tl_2010_{fips}_bg10.shp"
logger.info(f"Checking if {fips} file exists")
if not os.path.isfile(shp_file_path):
@ -110,7 +109,7 @@ def download_census_csvs(data_path: Path) -> None:
)
## create national geojson
logger.info(f"Generating national geojson file")
logger.info("Generating national geojson file")
usa_df = gpd.GeoDataFrame()
for file_name in geojson_dir_path.rglob("*.json"):
@ -119,7 +118,7 @@ def download_census_csvs(data_path: Path) -> None:
usa_df = usa_df.append(state_gdf)
usa_df = usa_df.to_crs("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
logger.info(f"Writing national geojson file")
logger.info("Writing national geojson file")
usa_df.to_file(geojson_dir_path / "us.json", driver="GeoJSON")
logger.info("Census block groups downloading complete")

View file

@ -1,7 +1,8 @@
from pathlib import Path
import csv
import pandas as pd
import os
import csv
from pathlib import Path
import pandas as pd
from config import settings
from utils import (
@ -35,7 +36,7 @@ def get_state_fips_codes(data_path: Path) -> list:
# check if file exists
if not os.path.isfile(fips_csv_path):
logger.info(f"Downloading fips from S3 repository")
logger.info("Downloading fips from S3 repository")
unzip_file_from_url(
settings.AWS_JUSTICE40_DATA_URL + "/Census/fips_states_2010.zip",
data_path / "tmp",

View file

@ -11,14 +11,10 @@ logger = get_module_logger(__name__)
class CensusACSETL(ExtractTransformLoad):
def __init__(self):
self.ACS_YEAR = 2019
self.OUTPUT_PATH = (
self.DATA_PATH / "dataset" / f"census_acs_{self.ACS_YEAR}"
)
self.OUTPUT_PATH = self.DATA_PATH / "dataset" / f"census_acs_{self.ACS_YEAR}"
self.UNEMPLOYED_FIELD_NAME = "Unemployed civilians (percent)"
self.LINGUISTIC_ISOLATION_FIELD_NAME = "Linguistic isolation (percent)"
self.LINGUISTIC_ISOLATION_TOTAL_FIELD_NAME = (
"Linguistic isolation (total)"
)
self.LINGUISTIC_ISOLATION_TOTAL_FIELD_NAME = "Linguistic isolation (total)"
self.LINGUISTIC_ISOLATION_FIELDS = [
"C16002_001E",
"C16002_004E",
@ -28,9 +24,7 @@ class CensusACSETL(ExtractTransformLoad):
]
self.df: pd.DataFrame
def _fips_from_censusdata_censusgeo(
self, censusgeo: censusdata.censusgeo
) -> str:
def _fips_from_censusdata_censusgeo(self, censusgeo: censusdata.censusgeo) -> str:
"""Create a FIPS code from the proprietary censusgeo index."""
fips = "".join([value for (key, value) in censusgeo.params()])
return fips
@ -38,9 +32,7 @@ class CensusACSETL(ExtractTransformLoad):
def extract(self) -> None:
dfs = []
for fips in get_state_fips_codes(self.DATA_PATH):
logger.info(
f"Downloading data for state/territory with FIPS code {fips}"
)
logger.info(f"Downloading data for state/territory with FIPS code {fips}")
dfs.append(
censusdata.download(
@ -65,13 +57,11 @@ class CensusACSETL(ExtractTransformLoad):
)
def transform(self) -> None:
logger.info(f"Starting Census ACS Transform")
logger.info("Starting Census ACS Transform")
# Calculate percent unemployment.
# TODO: remove small-sample data that should be `None` instead of a high-variance fraction.
self.df[self.UNEMPLOYED_FIELD_NAME] = (
self.df.B23025_005E / self.df.B23025_003E
)
self.df[self.UNEMPLOYED_FIELD_NAME] = self.df.B23025_005E / self.df.B23025_003E
# Calculate linguistic isolation.
individual_limited_english_fields = [
@ -92,7 +82,7 @@ class CensusACSETL(ExtractTransformLoad):
self.df[self.LINGUISTIC_ISOLATION_FIELD_NAME].describe()
def load(self) -> None:
logger.info(f"Saving Census ACS Data")
logger.info("Saving Census ACS Data")
# mkdir census
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
@ -108,6 +98,6 @@ class CensusACSETL(ExtractTransformLoad):
)
def validate(self) -> None:
logger.info(f"Validating Census ACS Data")
logger.info("Validating Census ACS Data")
pass

View file

@ -8,20 +8,22 @@ logger = get_module_logger(__name__)
class EJScreenETL(ExtractTransformLoad):
def __init__(self):
self.EJSCREEN_FTP_URL = "https://gaftp.epa.gov/EJSCREEN/2019/EJSCREEN_2019_StatePctile.csv.zip"
self.EJSCREEN_FTP_URL = (
"https://gaftp.epa.gov/EJSCREEN/2019/EJSCREEN_2019_StatePctile.csv.zip"
)
self.EJSCREEN_CSV = self.TMP_PATH / "EJSCREEN_2019_StatePctiles.csv"
self.CSV_PATH = self.DATA_PATH / "dataset" / "ejscreen_2019"
self.df: pd.DataFrame
def extract(self) -> None:
logger.info(f"Downloading EJScreen Data")
logger.info("Downloading EJScreen Data")
super().extract(
self.EJSCREEN_FTP_URL,
self.TMP_PATH,
)
def transform(self) -> None:
logger.info(f"Transforming EJScreen Data")
logger.info("Transforming EJScreen Data")
self.df = pd.read_csv(
self.EJSCREEN_CSV,
dtype={"ID": "string"},
@ -31,7 +33,7 @@ class EJScreenETL(ExtractTransformLoad):
)
def load(self) -> None:
logger.info(f"Saving EJScreen CSV")
logger.info("Saving EJScreen CSV")
# write nationwide csv
self.CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_csv(self.CSV_PATH / f"usa.csv", index=False)
self.df.to_csv(self.CSV_PATH / "usa.csv", index=False)

View file

@ -35,9 +35,7 @@ class HousingTransportationETL(ExtractTransformLoad):
)
# New file name:
tmp_csv_file_path = (
zip_file_dir / f"htaindex_data_blkgrps_{fips}.csv"
)
tmp_csv_file_path = zip_file_dir / f"htaindex_data_blkgrps_{fips}.csv"
tmp_df = pd.read_csv(filepath_or_buffer=tmp_csv_file_path)
dfs.append(tmp_df)
@ -45,16 +43,16 @@ class HousingTransportationETL(ExtractTransformLoad):
self.df = pd.concat(dfs)
def transform(self) -> None:
logger.info(f"Transforming Housing and Transportation Data")
logger.info("Transforming Housing and Transportation Data")
# Rename and reformat block group ID
self.df.rename(columns={"blkgrp": self.GEOID_FIELD_NAME}, inplace=True)
self.df[self.GEOID_FIELD_NAME] = self.df[
self.GEOID_FIELD_NAME
].str.replace('"', "")
self.df[self.GEOID_FIELD_NAME] = self.df[self.GEOID_FIELD_NAME].str.replace(
'"', ""
)
def load(self) -> None:
logger.info(f"Saving Housing and Transportation Data")
logger.info("Saving Housing and Transportation Data")
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_csv(path_or_buf=self.OUTPUT_PATH / "usa.csv", index=False)

View file

@ -1,8 +1,7 @@
import pandas as pd
from etl.base import ExtractTransformLoad
from etl.sources.census.etl_utils import get_state_fips_codes
from utils import get_module_logger, unzip_file_from_url, remove_all_from_dir
from utils import get_module_logger
logger = get_module_logger(__name__)
@ -11,33 +10,37 @@ class HudHousingETL(ExtractTransformLoad):
def __init__(self):
self.OUTPUT_PATH = self.DATA_PATH / "dataset" / "hud_housing"
self.GEOID_TRACT_FIELD_NAME = "GEOID10_TRACT"
self.HOUSING_FTP_URL = "https://www.huduser.gov/portal/datasets/cp/2012thru2016-140-csv.zip"
self.HOUSING_FTP_URL = (
"https://www.huduser.gov/portal/datasets/cp/2012thru2016-140-csv.zip"
)
self.HOUSING_ZIP_FILE_DIR = self.TMP_PATH / "hud_housing"
# We measure households earning less than 80% of HUD Area Median Family Income by county
# and paying greater than 30% of their income to housing costs.
self.HOUSING_BURDEN_FIELD_NAME = "Housing burden (percent)"
self.HOUSING_BURDEN_NUMERATOR_FIELD_NAME = "HOUSING_BURDEN_NUMERATOR"
self.HOUSING_BURDEN_DENOMINATOR_FIELD_NAME = (
"HOUSING_BURDEN_DENOMINATOR"
)
self.HOUSING_BURDEN_DENOMINATOR_FIELD_NAME = "HOUSING_BURDEN_DENOMINATOR"
# Note: some variable definitions.
# HUD-adjusted median family income (HAMFI).
# The four housing problems are: incomplete kitchen facilities, incomplete plumbing facilities, more than 1 person per room, and cost burden greater than 30%.
# The four housing problems are:
# - incomplete kitchen facilities,
# - incomplete plumbing facilities,
# - more than 1 person per room,
# - cost burden greater than 30%.
# Table 8 is the desired table.
self.df: pd.DataFrame
def extract(self) -> None:
logger.info(f"Extracting HUD Housing Data")
logger.info("Extracting HUD Housing Data")
super().extract(
self.HOUSING_FTP_URL,
self.HOUSING_ZIP_FILE_DIR,
)
def transform(self) -> None:
logger.info(f"Transforming HUD Housing Data")
logger.info("Transforming HUD Housing Data")
# New file name:
tmp_csv_file_path = (
@ -53,9 +56,7 @@ class HudHousingETL(ExtractTransformLoad):
)
# Rename and reformat block group ID
self.df.rename(
columns={"geoid": self.GEOID_TRACT_FIELD_NAME}, inplace=True
)
self.df.rename(columns={"geoid": self.GEOID_TRACT_FIELD_NAME}, inplace=True)
# The CHAS data has census tract ids such as `14000US01001020100`
# Whereas the rest of our data uses, for the same tract, `01001020100`.
@ -70,69 +71,177 @@ class HudHousingETL(ExtractTransformLoad):
# Owner occupied numerator fields
OWNER_OCCUPIED_NUMERATOR_FIELDS = [
# Key: Column Name Line_Type Tenure Household income Cost burden Facilities
# T8_est7 Subtotal Owner occupied less than or equal to 30% of HAMFI greater than 30% but less than or equal to 50% All
# Column Name
# Line_Type
# Tenure
# Household income
# Cost burden
# Facilities
"T8_est7",
# T8_est10 Subtotal Owner occupied less than or equal to 30% of HAMFI greater than 50% All
# Subtotal
# Owner occupied
# less than or equal to 30% of HAMFI
# greater than 30% but less than or equal to 50%
# All
"T8_est10",
# T8_est20 Subtotal Owner occupied greater than 30% but less than or equal to 50% of HAMFI greater than 30% but less than or equal to 50% All
# Subtotal
# Owner occupied
# less than or equal to 30% of HAMFI
# greater than 50%
# All
"T8_est20",
# T8_est23 Subtotal Owner occupied greater than 30% but less than or equal to 50% of HAMFI greater than 50% All
# Subtotal
# Owner occupied
# greater than 30% but less than or equal to 50% of HAMFI
# greater than 30% but less than or equal to 50%
# All
"T8_est23",
# T8_est33 Subtotal Owner occupied greater than 50% but less than or equal to 80% of HAMFI greater than 30% but less than or equal to 50% All
# Subtotal
# Owner occupied
# greater than 30% but less than or equal to 50% of HAMFI
# greater than 50%
# All
"T8_est33",
# T8_est36 Subtotal Owner occupied greater than 50% but less than or equal to 80% of HAMFI greater than 50% All
# Subtotal
# Owner occupied
# greater than 50% but less than or equal to 80% of HAMFI
# greater than 30% but less than or equal to 50%
# All
"T8_est36",
# Subtotal
# Owner occupied
# greater than 50% but less than or equal to 80% of HAMFI
# greater than 50%
# All
]
# These rows have the values where HAMFI was not computed, b/c of no or negative income.
OWNER_OCCUPIED_NOT_COMPUTED_FIELDS = [
# Key: Column Name Line_Type Tenure Household income Cost burden Facilities
# T8_est13 Subtotal Owner occupied less than or equal to 30% of HAMFI not computed (no/negative income) All
# Column Name
# Line_Type
# Tenure
# Household income
# Cost burden
# Facilities
"T8_est13",
# T8_est26 Subtotal Owner occupied greater than 30% but less than or equal to 50% of HAMFI not computed (no/negative income) All
# Subtotal
# Owner occupied
# less than or equal to 30% of HAMFI
# not computed (no/negative income)
# All
"T8_est26",
# T8_est39 Subtotal Owner occupied greater than 50% but less than or equal to 80% of HAMFI not computed (no/negative income) All
# Subtotal
# Owner occupied
# greater than 30% but less than or equal to 50% of HAMFI
# not computed (no/negative income)
# All
"T8_est39",
# T8_est52 Subtotal Owner occupied greater than 80% but less than or equal to 100% of HAMFI not computed (no/negative income) All
# Subtotal
# Owner occupied
# greater than 50% but less than or equal to 80% of HAMFI
# not computed (no/negative income)
# All
"T8_est52",
# T8_est65 Subtotal Owner occupied greater than 100% of HAMFI not computed (no/negative income) All
# Subtotal
# Owner occupied
# greater than 80% but less than or equal to 100% of HAMFI
# not computed (no/negative income)
# All
"T8_est65",
# Subtotal
# Owner occupied
# greater than 100% of HAMFI
# not computed (no/negative income)
# All
]
# T8_est2 Subtotal Owner occupied All All All
OWNER_OCCUPIED_POPULATION_FIELD = "T8_est2"
# Subtotal
# Owner occupied
# All
# All
# All
# Renter occupied numerator fields
RENTER_OCCUPIED_NUMERATOR_FIELDS = [
# Key: Column Name Line_Type Tenure Household income Cost burden Facilities
# T8_est73 Subtotal Renter occupied less than or equal to 30% of HAMFI greater than 30% but less than or equal to 50% All
# Column Name
# Line_Type
# Tenure
# Household income
# Cost burden
# Facilities
"T8_est73",
# T8_est76 Subtotal Renter occupied less than or equal to 30% of HAMFI greater than 50% All
# Subtotal
# Renter occupied
# less than or equal to 30% of HAMFI
# greater than 30% but less than or equal to 50%
# All
"T8_est76",
# T8_est86 Subtotal Renter occupied greater than 30% but less than or equal to 50% of HAMFI greater than 30% but less than or equal to 50% All
# Subtotal
# Renter occupied
# less than or equal to 30% of HAMFI
# greater than 50%
# All
"T8_est86",
# T8_est89 Subtotal Renter occupied greater than 30% but less than or equal to 50% of HAMFI greater than 50% All
# Subtotal
# Renter occupied
# greater than 30% but less than or equal to 50% of HAMFI
# greater than 30% but less than or equal to 50%
# All
"T8_est89",
# T8_est99 Subtotal Renter occupied greater than 50% but less than or equal to 80% of HAMFI greater than 30% but less than or equal to 50% All
# Subtotal
# Renter occupied
# greater than 30% but less than or equal to 50% of HAMFI
# greater than 50%
# All
"T8_est99",
# T8_est102 Subtotal Renter occupied greater than 50% but less than or equal to 80% of HAMFI greater than 50% All
# Subtotal
# Renter occupied greater than 50% but less than or equal to 80% of HAMFI
# greater than 30% but less than or equal to 50%
# All
"T8_est102",
# Subtotal
# Renter occupied
# greater than 50% but less than or equal to 80% of HAMFI
# greater than 50%
# All
]
# These rows have the values where HAMFI was not computed, b/c of no or negative income.
RENTER_OCCUPIED_NOT_COMPUTED_FIELDS = [
# Key: Column Name Line_Type Tenure Household income Cost burden Facilities
# T8_est79 Subtotal Renter occupied less than or equal to 30% of HAMFI not computed (no/negative income) All
# Column Name
# Line_Type
# Tenure
# Household income
# Cost burden
# Facilities
"T8_est79",
# T8_est92 Subtotal Renter occupied greater than 30% but less than or equal to 50% of HAMFI not computed (no/negative income) All
# Subtotal
# Renter occupied less than or equal to 30% of HAMFI
# not computed (no/negative income)
# All
"T8_est92",
# T8_est105 Subtotal Renter occupied greater than 50% but less than or equal to 80% of HAMFI not computed (no/negative income) All
# Subtotal
# Renter occupied greater than 30% but less than or equal to 50% of HAMFI
# not computed (no/negative income)
# All
"T8_est105",
# T8_est118 Subtotal Renter occupied greater than 80% but less than or equal to 100% of HAMFI not computed (no/negative income) All
# Subtotal
# Renter occupied
# greater than 50% but less than or equal to 80% of HAMFI
# not computed (no/negative income)
# All
"T8_est118",
# T8_est131 Subtotal Renter occupied greater than 100% of HAMFI not computed (no/negative income) All
# Subtotal
# Renter occupied greater than 80% but less than or equal to 100% of HAMFI
# not computed (no/negative income)
# All
"T8_est131",
# Subtotal
# Renter occupied
# greater than 100% of HAMFI
# not computed (no/negative income)
# All
]
# T8_est68 Subtotal Renter occupied All All All
@ -165,14 +274,12 @@ class HudHousingETL(ExtractTransformLoad):
# TODO: add small sample size checks
self.df[self.HOUSING_BURDEN_FIELD_NAME] = self.df[
self.HOUSING_BURDEN_NUMERATOR_FIELD_NAME
].astype(float) / self.df[
self.HOUSING_BURDEN_DENOMINATOR_FIELD_NAME
].astype(
].astype(float) / self.df[self.HOUSING_BURDEN_DENOMINATOR_FIELD_NAME].astype(
float
)
def load(self) -> None:
logger.info(f"Saving HUD Housing Data")
logger.info("Saving HUD Housing Data")
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)

View file

@ -9,7 +9,8 @@ logger = get_module_logger(__name__)
class HudRecapETL(ExtractTransformLoad):
def __init__(self):
self.HUD_RECAP_CSV_URL = "https://opendata.arcgis.com/api/v3/datasets/56de4edea8264fe5a344da9811ef5d6e_0/downloads/data?format=csv&spatialRefId=4326"
# pylint: disable=line-too-long
self.HUD_RECAP_CSV_URL = "https://opendata.arcgis.com/api/v3/datasets/56de4edea8264fe5a344da9811ef5d6e_0/downloads/data?format=csv&spatialRefId=4326" # noqa: E501
self.HUD_RECAP_CSV = (
self.TMP_PATH
/ "Racially_or_Ethnically_Concentrated_Areas_of_Poverty__R_ECAPs_.csv"
@ -22,7 +23,7 @@ class HudRecapETL(ExtractTransformLoad):
self.df: pd.DataFrame
def extract(self) -> None:
logger.info(f"Downloading HUD Recap Data")
logger.info("Downloading HUD Recap Data")
download = requests.get(self.HUD_RECAP_CSV_URL, verify=None)
file_contents = download.content
csv_file = open(self.HUD_RECAP_CSV, "wb")
@ -30,7 +31,7 @@ class HudRecapETL(ExtractTransformLoad):
csv_file.close()
def transform(self) -> None:
logger.info(f"Transforming HUD Recap Data")
logger.info("Transforming HUD Recap Data")
# Load comparison index (CalEnviroScreen 4)
self.df = pd.read_csv(self.HUD_RECAP_CSV, dtype={"GEOID": "string"})
@ -57,7 +58,7 @@ class HudRecapETL(ExtractTransformLoad):
self.df.sort_values(by=self.GEOID_TRACT_FIELD_NAME, inplace=True)
def load(self) -> None:
logger.info(f"Saving HUD Recap CSV")
logger.info("Saving HUD Recap CSV")
# write nationwide csv
self.CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_csv(self.CSV_PATH / f"usa.csv", index=False)
self.df.to_csv(self.CSV_PATH / "usa.csv", index=False)

View file

@ -3,25 +3,72 @@ import geopandas as gpd
from etl.base import ExtractTransformLoad
from utils import get_module_logger
import os
logger = get_module_logger(__name__)
class TreeEquityScoreETL(ExtractTransformLoad):
def __init__(self):
self.TES_URL = "https://national-tes-data-share.s3.amazonaws.com/national_tes_share/"
self.TES_URL = (
"https://national-tes-data-share.s3.amazonaws.com/national_tes_share/"
)
self.TES_CSV = self.TMP_PATH / "tes_2021_data.csv"
self.CSV_PATH = self.DATA_PATH / "dataset" / "tree_equity_score"
self.df: gpd.GeoDataFrame
self.states = ["al", "az", "ar", "ca", "co", "ct", "de", "dc", "fl",
"ga", "id", "il", "in", "ia", "ks", "ky", "la", "me",
"md", "ma", "mi", "mn", "ms", "mo", "mt", "ne", "nv", "nh",
"nj", "nm", "ny", "nc", "nd", "oh", "ok", "or", "pa",
"ri", "sc", "sd", "tn", "tx", "ut", "vt", "va", "wa", "wv", "wi", "wy"]
self.states = [
"al",
"az",
"ar",
"ca",
"co",
"ct",
"de",
"dc",
"fl",
"ga",
"id",
"il",
"in",
"ia",
"ks",
"ky",
"la",
"me",
"md",
"ma",
"mi",
"mn",
"ms",
"mo",
"mt",
"ne",
"nv",
"nh",
"nj",
"nm",
"ny",
"nc",
"nd",
"oh",
"ok",
"or",
"pa",
"ri",
"sc",
"sd",
"tn",
"tx",
"ut",
"vt",
"va",
"wa",
"wv",
"wi",
"wy",
]
def extract(self) -> None:
logger.info(f"Downloading Tree Equity Score Data")
logger.info("Downloading Tree Equity Score Data")
for state in self.states:
super().extract(
f"{self.TES_URL}{state}.zip.zip",
@ -29,14 +76,14 @@ class TreeEquityScoreETL(ExtractTransformLoad):
)
def transform(self) -> None:
logger.info(f"Transforming Tree Equity Score Data")
logger.info("Transforming Tree Equity Score Data")
tes_state_dfs = []
for state in self.states:
tes_state_dfs.append(gpd.read_file(f"{self.TMP_PATH}/{state}/{state}.shp"))
self.df = gpd.GeoDataFrame(pd.concat(tes_state_dfs), crs=tes_state_dfs[0].crs)
def load(self) -> None:
logger.info(f"Saving Tree Equity Score GeoJSON")
logger.info("Saving Tree Equity Score GeoJSON")
# write nationwide csv
self.CSV_PATH.mkdir(parents=True, exist_ok=True)
self.df.to_file(self.CSV_PATH / "tes_conus.geojson", driver='GeoJSON')
self.df.to_file(self.CSV_PATH / "tes_conus.geojson", driver="GeoJSON")