mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-08-09 07:24:19 -07:00
Issue 308 python linting (#443)
* Adds flake8, pylint, liccheck, flake8 to dependencies for data-pipeline * Sets up and runs black autoformatting * Adds flake8 to tox linting * Fixes flake8 error F541 f string missing placeholders * Fixes flake8 E501 line too long * Fixes flake8 F401 imported but not used * Adds pylint to tox and disables the following pylint errors: - C0114: module docstrings - R0201: method could have been a function - R0903: too few public methods - C0103: name case styling - W0511: fix me - W1203: f-string interpolation in logging * Adds utils.py to tox.ini linting, runs black on utils.py * Fixes import related pylint errors: C0411 and C0412 * Fixes or ignores remaining pylint errors (for discussion later) * Adds safety and liccheck to tox.ini
This commit is contained in:
parent
51f7666062
commit
5504528fdf
22 changed files with 709 additions and 228 deletions
|
@ -4,7 +4,6 @@ import pandas as pd
|
|||
|
||||
from etl.base import ExtractTransformLoad
|
||||
from utils import get_module_logger
|
||||
from etl.sources.census.etl_utils import get_state_fips_codes
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
@ -28,10 +27,10 @@ class ScoreETL(ExtractTransformLoad):
|
|||
self.UNEMPLOYED_FIELD_NAME = "Unemployed civilians (percent)"
|
||||
self.LINGUISTIC_ISOLATION_FIELD_NAME = "Linguistic isolation (percent)"
|
||||
self.HOUSING_BURDEN_FIELD_NAME = "Housing burden (percent)"
|
||||
self.POVERTY_FIELD_NAME = (
|
||||
"Poverty (Less than 200% of federal poverty line)"
|
||||
self.POVERTY_FIELD_NAME = "Poverty (Less than 200% of federal poverty line)"
|
||||
self.HIGH_SCHOOL_FIELD_NAME = (
|
||||
"Percent individuals age 25 or over with less than high school degree"
|
||||
)
|
||||
self.HIGH_SCHOOL_FIELD_NAME = "Percent individuals age 25 or over with less than high school degree"
|
||||
|
||||
# There's another aggregation level (a second level of "buckets").
|
||||
self.AGGREGATION_POLLUTION = "Pollution Burden"
|
||||
|
@ -55,9 +54,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
self.ejscreen_df = pd.read_csv(
|
||||
ejscreen_csv, dtype={"ID": "string"}, low_memory=False
|
||||
)
|
||||
self.ejscreen_df.rename(
|
||||
columns={"ID": self.GEOID_FIELD_NAME}, inplace=True
|
||||
)
|
||||
self.ejscreen_df.rename(columns={"ID": self.GEOID_FIELD_NAME}, inplace=True)
|
||||
|
||||
# Load census data
|
||||
census_csv = self.DATA_PATH / "dataset" / "census_acs_2019" / "usa.csv"
|
||||
|
@ -69,10 +66,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
|
||||
# Load housing and transportation data
|
||||
housing_and_transportation_index_csv = (
|
||||
self.DATA_PATH
|
||||
/ "dataset"
|
||||
/ "housing_and_transportation_index"
|
||||
/ "usa.csv"
|
||||
self.DATA_PATH / "dataset" / "housing_and_transportation_index" / "usa.csv"
|
||||
)
|
||||
self.housing_and_transportation_df = pd.read_csv(
|
||||
housing_and_transportation_index_csv,
|
||||
|
@ -89,7 +83,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
)
|
||||
|
||||
def transform(self) -> None:
|
||||
logger.info(f"Transforming Score Data")
|
||||
logger.info("Transforming Score Data")
|
||||
|
||||
# Join all the data sources that use census block groups
|
||||
census_block_group_dfs = [
|
||||
|
@ -106,10 +100,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
)
|
||||
|
||||
# Sanity check the join.
|
||||
if (
|
||||
len(census_block_group_df[self.GEOID_FIELD_NAME].str.len().unique())
|
||||
!= 1
|
||||
):
|
||||
if len(census_block_group_df[self.GEOID_FIELD_NAME].str.len().unique()) != 1:
|
||||
raise ValueError(
|
||||
f"One of the input CSVs uses {self.GEOID_FIELD_NAME} with a different length."
|
||||
)
|
||||
|
@ -119,9 +110,9 @@ class ScoreETL(ExtractTransformLoad):
|
|||
census_tract_df = self.hud_housing_df
|
||||
|
||||
# Calculate the tract for the CBG data.
|
||||
census_block_group_df[
|
||||
self.GEOID_TRACT_FIELD_NAME
|
||||
] = census_block_group_df[self.GEOID_FIELD_NAME].str[0:11]
|
||||
census_block_group_df[self.GEOID_TRACT_FIELD_NAME] = census_block_group_df[
|
||||
self.GEOID_FIELD_NAME
|
||||
].str[0:11]
|
||||
|
||||
self.df = census_block_group_df.merge(
|
||||
census_tract_df, on=self.GEOID_TRACT_FIELD_NAME
|
||||
|
@ -254,8 +245,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
|
||||
# Rename columns:
|
||||
renaming_dict = {
|
||||
data_set.input_field: data_set.renamed_field
|
||||
for data_set in data_sets
|
||||
data_set.input_field: data_set.renamed_field for data_set in data_sets
|
||||
}
|
||||
|
||||
self.df.rename(
|
||||
|
@ -310,7 +300,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
) / (max_value - min_value)
|
||||
|
||||
# Graph distributions and correlations.
|
||||
min_max_fields = [
|
||||
min_max_fields = [ # noqa: F841
|
||||
f"{data_set.renamed_field}{self.MIN_MAX_FIELD_SUFFIX}"
|
||||
for data_set in data_sets
|
||||
if data_set.renamed_field != self.GEOID_FIELD_NAME
|
||||
|
@ -324,9 +314,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
]
|
||||
].mean(axis=1)
|
||||
self.df["Score B"] = (
|
||||
self.df[
|
||||
"Poverty (Less than 200% of federal poverty line) (percentile)"
|
||||
]
|
||||
self.df["Poverty (Less than 200% of federal poverty line) (percentile)"]
|
||||
* self.df[
|
||||
"Percent individuals age 25 or over with less than high school degree (percentile)"
|
||||
]
|
||||
|
@ -342,21 +330,26 @@ class ScoreETL(ExtractTransformLoad):
|
|||
]
|
||||
self.df[f"{bucket}"] = self.df[fields_in_bucket].mean(axis=1)
|
||||
|
||||
# Combine the score from the two Exposures and Environmental Effects buckets into a single score called "Pollution Burden". The math for this score is: (1.0 * Exposures Score + 0.5 * Environment Effects score) / 1.5.
|
||||
# Combine the score from the two Exposures and Environmental Effects buckets
|
||||
# into a single score called "Pollution Burden".
|
||||
# The math for this score is:
|
||||
# (1.0 * Exposures Score + 0.5 * Environment Effects score) / 1.5.
|
||||
self.df[self.AGGREGATION_POLLUTION] = (
|
||||
1.0 * self.df[f"{self.BUCKET_EXPOSURES}"]
|
||||
+ 0.5 * self.df[f"{self.BUCKET_ENVIRONMENTAL}"]
|
||||
) / 1.5
|
||||
|
||||
# Average the score from the two Sensitive populations and Socioeconomic factors buckets into a single score called "Population Characteristics".
|
||||
# Average the score from the two Sensitive populations and
|
||||
# Socioeconomic factors buckets into a single score called
|
||||
# "Population Characteristics".
|
||||
self.df[self.AGGREGATION_POPULATION] = self.df[
|
||||
[f"{self.BUCKET_SENSITIVE}", f"{self.BUCKET_SOCIOECONOMIC}"]
|
||||
].mean(axis=1)
|
||||
|
||||
# Multiply the "Pollution Burden" score and the "Population Characteristics" together to produce the cumulative impact score.
|
||||
# Multiply the "Pollution Burden" score and the "Population Characteristics"
|
||||
# together to produce the cumulative impact score.
|
||||
self.df["Score C"] = (
|
||||
self.df[self.AGGREGATION_POLLUTION]
|
||||
* self.df[self.AGGREGATION_POPULATION]
|
||||
self.df[self.AGGREGATION_POLLUTION] * self.df[self.AGGREGATION_POPULATION]
|
||||
)
|
||||
|
||||
if len(census_block_group_df) > 220333:
|
||||
|
@ -371,12 +364,10 @@ class ScoreETL(ExtractTransformLoad):
|
|||
]
|
||||
|
||||
fields_min_max = [
|
||||
f"{field}{self.MIN_MAX_FIELD_SUFFIX}"
|
||||
for field in fields_to_use_in_score
|
||||
f"{field}{self.MIN_MAX_FIELD_SUFFIX}" for field in fields_to_use_in_score
|
||||
]
|
||||
fields_percentile = [
|
||||
f"{field}{self.PERCENTILE_FIELD_SUFFIX}"
|
||||
for field in fields_to_use_in_score
|
||||
f"{field}{self.PERCENTILE_FIELD_SUFFIX}" for field in fields_to_use_in_score
|
||||
]
|
||||
|
||||
# Calculate "Score D", which uses min-max normalization
|
||||
|
@ -396,17 +387,22 @@ class ScoreETL(ExtractTransformLoad):
|
|||
"Score E",
|
||||
"Poverty (Less than 200% of federal poverty line)",
|
||||
]:
|
||||
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"] = self.df[score_field].rank(pct=True)
|
||||
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"] = self.df[
|
||||
score_field
|
||||
].rank(pct=True)
|
||||
|
||||
for threshold in [0.25, 0.3, 0.35, 0.4]:
|
||||
fraction_converted_to_percent = int(100 * threshold)
|
||||
self.df[f"{score_field} (top {fraction_converted_to_percent}th percentile)"] = (
|
||||
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"] >= 1 - threshold
|
||||
self.df[
|
||||
f"{score_field} (top {fraction_converted_to_percent}th percentile)"
|
||||
] = (
|
||||
self.df[f"{score_field}{self.PERCENTILE_FIELD_SUFFIX}"]
|
||||
>= 1 - threshold
|
||||
)
|
||||
|
||||
def load(self) -> None:
|
||||
logger.info(f"Saving Score CSV")
|
||||
logger.info("Saving Score CSV")
|
||||
|
||||
# write nationwide csv
|
||||
self.SCORE_CSV_PATH.mkdir(parents=True, exist_ok=True)
|
||||
self.df.to_csv(self.SCORE_CSV_PATH / f"usa.csv", index=False)
|
||||
self.df.to_csv(self.SCORE_CSV_PATH / "usa.csv", index=False)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import math
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
import math
|
||||
|
||||
from etl.base import ExtractTransformLoad
|
||||
from utils import get_module_logger
|
||||
|
@ -21,9 +22,7 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
self.SCORE_CSV_PATH = self.DATA_PATH / "score" / "csv"
|
||||
self.TILE_SCORE_CSV = self.SCORE_CSV_PATH / "tiles" / "usa.csv"
|
||||
|
||||
self.CENSUS_USA_GEOJSON = (
|
||||
self.DATA_PATH / "census" / "geojson" / "us.json"
|
||||
)
|
||||
self.CENSUS_USA_GEOJSON = self.DATA_PATH / "census" / "geojson" / "us.json"
|
||||
|
||||
self.TARGET_SCORE_NAME = "Score E (percentile)"
|
||||
self.TARGET_SCORE_RENAME_TO = "E_SCORE"
|
||||
|
@ -36,7 +35,7 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
self.geojson_score_usa_low: gpd.GeoDataFrame
|
||||
|
||||
def extract(self) -> None:
|
||||
logger.info(f"Reading US GeoJSON (~6 minutes)")
|
||||
logger.info("Reading US GeoJSON (~6 minutes)")
|
||||
self.geojson_usa_df = gpd.read_file(
|
||||
self.CENSUS_USA_GEOJSON,
|
||||
dtype={"GEOID10": "string"},
|
||||
|
@ -45,7 +44,7 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
)
|
||||
self.geojson_usa_df.head()
|
||||
|
||||
logger.info(f"Reading score CSV")
|
||||
logger.info("Reading score CSV")
|
||||
self.score_usa_df = pd.read_csv(
|
||||
self.TILE_SCORE_CSV,
|
||||
dtype={"GEOID10": "string"},
|
||||
|
@ -53,11 +52,11 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
)
|
||||
|
||||
def transform(self) -> None:
|
||||
logger.info(f"Pruning Census GeoJSON")
|
||||
logger.info("Pruning Census GeoJSON")
|
||||
fields = ["GEOID10", "geometry"]
|
||||
self.geojson_usa_df = self.geojson_usa_df[fields]
|
||||
|
||||
logger.info(f"Merging and compressing score CSV with USA GeoJSON")
|
||||
logger.info("Merging and compressing score CSV with USA GeoJSON")
|
||||
self.geojson_score_usa_high = self.score_usa_df.merge(
|
||||
self.geojson_usa_df, on="GEOID10", how="left"
|
||||
)
|
||||
|
@ -75,7 +74,7 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
inplace=True,
|
||||
)
|
||||
|
||||
logger.info(f"Aggregating into tracts (~5 minutes)")
|
||||
logger.info("Aggregating into tracts (~5 minutes)")
|
||||
usa_tracts = self._aggregate_to_tracts(usa_simplified)
|
||||
|
||||
usa_tracts = gpd.GeoDataFrame(
|
||||
|
@ -84,17 +83,15 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
crs="EPSG:4326",
|
||||
)
|
||||
|
||||
logger.info(f"Creating buckets from tracts")
|
||||
logger.info("Creating buckets from tracts")
|
||||
usa_bucketed = self._create_buckets_from_tracts(
|
||||
usa_tracts, self.NUMBER_OF_BUCKETS
|
||||
)
|
||||
|
||||
logger.info(f"Aggregating buckets")
|
||||
logger.info("Aggregating buckets")
|
||||
usa_aggregated = self._aggregate_buckets(usa_bucketed, agg_func="mean")
|
||||
|
||||
compressed = self._breakup_multipolygons(
|
||||
usa_aggregated, self.NUMBER_OF_BUCKETS
|
||||
)
|
||||
compressed = self._breakup_multipolygons(usa_aggregated, self.NUMBER_OF_BUCKETS)
|
||||
|
||||
self.geojson_score_usa_low = gpd.GeoDataFrame(
|
||||
compressed,
|
||||
|
@ -118,9 +115,7 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
# assign tracts to buckets by D_SCORE
|
||||
state_tracts.sort_values(self.TARGET_SCORE_RENAME_TO, inplace=True)
|
||||
SCORE_bucket = []
|
||||
bucket_size = math.ceil(
|
||||
len(state_tracts.index) / self.NUMBER_OF_BUCKETS
|
||||
)
|
||||
bucket_size = math.ceil(len(state_tracts.index) / self.NUMBER_OF_BUCKETS)
|
||||
for i in range(len(state_tracts.index)):
|
||||
SCORE_bucket.extend([math.floor(i / bucket_size)])
|
||||
state_tracts[f"{self.TARGET_SCORE_RENAME_TO}_bucket"] = SCORE_bucket
|
||||
|
@ -155,14 +150,10 @@ class GeoScoreETL(ExtractTransformLoad):
|
|||
return compressed
|
||||
|
||||
def load(self) -> None:
|
||||
logger.info(f"Writing usa-high (~9 minutes)")
|
||||
self.geojson_score_usa_high.to_file(
|
||||
self.SCORE_HIGH_GEOJSON, driver="GeoJSON"
|
||||
)
|
||||
logger.info(f"Completed writing usa-high")
|
||||
logger.info("Writing usa-high (~9 minutes)")
|
||||
self.geojson_score_usa_high.to_file(self.SCORE_HIGH_GEOJSON, driver="GeoJSON")
|
||||
logger.info("Completed writing usa-high")
|
||||
|
||||
logger.info(f"Writing usa-low (~9 minutes)")
|
||||
self.geojson_score_usa_low.to_file(
|
||||
self.SCORE_LOW_GEOJSON, driver="GeoJSON"
|
||||
)
|
||||
logger.info(f"Completed writing usa-low")
|
||||
logger.info("Writing usa-low (~9 minutes)")
|
||||
self.geojson_score_usa_low.to_file(self.SCORE_LOW_GEOJSON, driver="GeoJSON")
|
||||
logger.info("Completed writing usa-low")
|
||||
|
|
|
@ -19,9 +19,7 @@ class PostScoreETL(ExtractTransformLoad):
|
|||
self.CENSUS_USA_CSV = self.DATA_PATH / "census" / "csv" / "us.csv"
|
||||
self.SCORE_CSV_PATH = self.DATA_PATH / "score" / "csv"
|
||||
|
||||
self.STATE_CSV = (
|
||||
self.DATA_PATH / "census" / "csv" / "fips_states_2010.csv"
|
||||
)
|
||||
self.STATE_CSV = self.DATA_PATH / "census" / "csv" / "fips_states_2010.csv"
|
||||
|
||||
self.FULL_SCORE_CSV = self.SCORE_CSV_PATH / "full" / "usa.csv"
|
||||
self.TILR_SCORE_CSV = self.SCORE_CSV_PATH / "tile" / "usa.csv"
|
||||
|
@ -49,7 +47,7 @@ class PostScoreETL(ExtractTransformLoad):
|
|||
self.TMP_PATH,
|
||||
)
|
||||
|
||||
logger.info(f"Reading Counties CSV")
|
||||
logger.info("Reading Counties CSV")
|
||||
self.counties_df = pd.read_csv(
|
||||
self.CENSUS_COUNTIES_TXT,
|
||||
sep="\t",
|
||||
|
@ -58,16 +56,14 @@ class PostScoreETL(ExtractTransformLoad):
|
|||
encoding="latin-1",
|
||||
)
|
||||
|
||||
logger.info(f"Reading States CSV")
|
||||
logger.info("Reading States CSV")
|
||||
self.states_df = pd.read_csv(
|
||||
self.STATE_CSV, dtype={"fips": "string", "state_code": "string"}
|
||||
)
|
||||
self.score_df = pd.read_csv(
|
||||
self.FULL_SCORE_CSV, dtype={"GEOID10": "string"}
|
||||
)
|
||||
self.score_df = pd.read_csv(self.FULL_SCORE_CSV, dtype={"GEOID10": "string"})
|
||||
|
||||
def transform(self) -> None:
|
||||
logger.info(f"Transforming data sources for Score + County CSV")
|
||||
logger.info("Transforming data sources for Score + County CSV")
|
||||
|
||||
# rename some of the columns to prepare for merge
|
||||
self.counties_df = self.counties_df[["USPS", "GEOID", "NAME"]]
|
||||
|
@ -101,7 +97,7 @@ class PostScoreETL(ExtractTransformLoad):
|
|||
)
|
||||
|
||||
# check if there are census cbgs without score
|
||||
logger.info(f"Removing CBG rows without score")
|
||||
logger.info("Removing CBG rows without score")
|
||||
|
||||
## load cbgs
|
||||
cbg_usa_df = pd.read_csv(
|
||||
|
@ -121,19 +117,19 @@ class PostScoreETL(ExtractTransformLoad):
|
|||
null_cbg_df = merged_df[merged_df["Score E (percentile)"].isnull()]
|
||||
|
||||
# subsctract data sets
|
||||
removed_df = pd.concat(
|
||||
[merged_df, null_cbg_df, null_cbg_df]
|
||||
).drop_duplicates(keep=False)
|
||||
removed_df = pd.concat([merged_df, null_cbg_df, null_cbg_df]).drop_duplicates(
|
||||
keep=False
|
||||
)
|
||||
|
||||
# set the score to the new df
|
||||
self.score_county_state_merged = removed_df
|
||||
|
||||
def load(self) -> None:
|
||||
logger.info(f"Saving Full Score CSV with County Information")
|
||||
logger.info("Saving Full Score CSV with County Information")
|
||||
self.SCORE_CSV_PATH.mkdir(parents=True, exist_ok=True)
|
||||
self.score_county_state_merged.to_csv(self.FULL_SCORE_CSV, index=False)
|
||||
|
||||
logger.info(f"Saving Tile Score CSV")
|
||||
logger.info("Saving Tile Score CSV")
|
||||
# TODO: check which are the columns we'll use
|
||||
# Related to: https://github.com/usds/justice40-tool/issues/302
|
||||
score_tiles = self.score_county_state_merged[self.TILES_SCORE_COLUMNS]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue