Big ole score refactor (#815)

* WIP

* Create ScoreCalculator

This calculates all the factors for score L for now (with placeholder
formulae because this is a WIP). I think ideallly we'll want to
refactor all the score code to be extracted into this or  similar
classes.

* Add factor logic for score L

Updated factor logic to match score L factors methodology.
Still need to get the Score L field itself working.

Cleanup needed: Pull field names into constants file, extract all score
calculation into score calculator

* Update thresholds and get score L calc working

* Update header name for consistency and update comparison tool

* Initial move of score to score calculator

* WIP big refactor

* Continued WIP on score refactor

* WIP score refactor

* Get to a working score-run

* Refactor to pass df to score init

This makes it easier to pass df around within a class with multiple
methods that require df.

* Updates from Black

* Updates from linting

* Use named imports instead of wildcard; log more

* Additional refactors

* move more field names to field_names constants file
* import constants without a relative path (would break docker)
* run linting
* raise error if add_columns is not implemented in a child class

* Refactor dict to namedtuple in score c

* Update L to use all percentile field

* change high school ed field in L back

Co-authored-by: Shelby Switzer <shelby.switzer@cms.hhs.gov>
This commit is contained in:
Shelby Switzer 2021-11-02 14:12:53 -04:00 committed by GitHub
commit 7bd1a9e59e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 795 additions and 763 deletions

View file

@ -0,0 +1,155 @@
# Suffixes
PERCENTILE_FIELD_SUFFIX = " (percentile)"
MIN_MAX_FIELD_SUFFIX = " (min-max normalized)"
# Score file field names
SCORE_A = "Score A"
SCORE_B = "Score B"
SCORE_C = "Score C"
C_SOCIOECONOMIC = "Socioeconomic Factors"
C_SENSITIVE = "Sensitive populations"
C_ENVIRONMENTAL = "Environmental effects"
C_EXPOSURES = "Exposures"
SCORE_D = "Score D"
SCORE_E = "Score E"
SCORE_F_COMMUNITIES = "Score F (communities)"
SCORE_G = "Score G"
SCORE_G_COMMUNITIES = "Score G (communities)"
SCORE_H = "Score H"
SCORE_H_COMMUNITIES = "Score H (communities)"
SCORE_I = "Score I"
SCORE_I_COMMUNITIES = "Score I (communities)"
SCORE_K = "NMTC (communities)"
SCORE_K_COMMUNITIES = "Score K (communities)"
SCORE_L_COMMUNITIES = "Definition L (communities)"
L_CLIMATE = "Climate Factor (Definition L)"
L_ENERGY = "Energy Factor (Definition L)"
L_TRANSPORTATION = "Transportation Factor (Definition L)"
L_HOUSING = "Housing Factor (Definition L)"
L_POLLUTION = "Pollution Factor (Definition L)"
L_WATER = "Water Factor (Definition L)"
L_HEALTH = "Health Factor (Definition L)"
L_WORKFORCE = "Workforce Factor (Definition L)"
L_NON_WORKFORCE = "Any Non-Workforce Factor (Definition L)"
# Poverty / Income
POVERTY_FIELD = "Poverty (Less than 200% of federal poverty line)"
POVERTY_PERCENTILE_FIELD = (
"Poverty (Less than 200% of federal poverty line) (percentile)"
)
POVERTY_LESS_THAN_200_FPL_FIELD = (
"Percent of individuals < 200% Federal Poverty Line"
)
POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD = (
"Percent of individuals < 200% Federal Poverty Line (percentile)"
)
POVERTY_LESS_THAN_150_FPL_FIELD = (
"Percent of individuals < 150% Federal Poverty Line"
)
POVERTY_LESS_THAN_150_FPL_PERCENTILE_FIELD = (
"Percent of individuals < 150% Federal Poverty Line (percentile)"
)
POVERTY_LESS_THAN_100_FPL_FIELD = (
"Percent of individuals < 100% Federal Poverty Line"
)
POVERTY_LESS_THAN_100_FPL_PERCENTILE_FIELD = (
"Percent of individuals < 100% Federal Poverty Line (percentile)"
)
MEDIAN_INCOME_PERCENT_AMI_FIELD = "Median household income (% of AMI)"
MEDIAN_INCOME_PERCENT_AMI_PERCENTILE_FIELD = "Median household income (% of AMI) (percentile)"
STATE_MEDIAN_INCOME_FIELD = (
"Median household income (State; 2019 inflation-adjusted dollars)"
)
MEDIAN_INCOME_FIELD = "Median household income in the past 12 months"
MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD = (
"Median household income (% of state median household income)"
)
MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD = "Median household income (% of AMI)"
PERSISTENT_POVERTY_FIELD = "Persistent Poverty Census Tract"
AMI_FIELD = "Area Median Income (State or metropolitan)"
# Climate
FEMA_RISK_FIELD = "FEMA Risk Index Expected Annual Loss Score"
FEMA_RISK_PERCENTILE_FIELD = (
"FEMA Risk Index Expected Annual Loss Score (percentile)"
)
# Environment
DIESEL_FIELD = "Diesel particulate matter"
DIESEL_PERCENTILE_FIELD = "Diesel particulate matter (percentile)"
PM25_FIELD = "Particulate matter (PM2.5)"
PM25_PERCENTILE_FIELD = "Particulate matter (PM2.5) (percentile)"
OZONE_FIELD = "Ozone"
TRAFFIC_FIELD = "Traffic proximity and volume"
TRAFFIC_PERCENTILE_FIELD = "Traffic proximity and volume (percentile)"
LEAD_PAINT_FIELD = "Percent pre-1960s housing (lead paint indicator)"
LEAD_PAINT_PERCENTILE_FIELD = (
"Percent pre-1960s housing (lead paint indicator) (percentile)"
)
WASTEWATER_FIELD = "Wastewater discharge"
WASTEWATER_PERCENTILE_FIELD = "Wastewater discharge (percentile)"
AGGREGATION_POLLUTION_FIELD = "Pollution Burden"
RMP_FIELD = "Proximity to RMP sites (percentile)"
RMP_PERCENTILE_FIELD = "Proximity to RMP sites (percentile)"
TSDF_FIELD = "Proximity to TSDF sites"
NPL_FIELD = "Proximity to NPL sites"
AIR_TOXICS_CANCER_RISK_FIELD = "Air toxics cancer risk"
# Housing
HOUSING_BURDEN_FIELD = "Housing burden (percent)"
HOUSING_BURDEN_PERCENTILE_FIELD = "Housing burden (percent) (percentile)"
HT_INDEX_FIELD = (
"Housing + Transportation Costs % Income for the Regional Typical Household"
)
# Energy
ENERGY_BURDEN_FIELD = "Energy burden"
ENERGY_BURDEN_PERCENTILE_FIELD = "Energy burden (percentile)"
# Health
DIABETES_FIELD = "Diagnosed diabetes among adults aged >=18 years"
DIABETES_PERCENTILE_FIELD = (
"Diagnosed diabetes among adults aged >=18 years (percentile)"
)
ASTHMA_FIELD = "Current asthma among adults aged >=18 years"
ASTHMA_PERCENTILE_FIELD = (
"Current asthma among adults aged >=18 years (percentile)"
)
HEART_DISEASE_FIELD = "Coronary heart disease among adults aged >=18 years"
HEART_DISEASE_PERCENTILE_FIELD = (
"Coronary heart disease among adults aged >=18 years (percentile)"
)
LIFE_EXPECTANCY_FIELD = "Life expectancy (years)"
LIFE_EXPECTANCY_PERCENTILE_FIELD = "Life expectancy (years) (percentile)"
RESPITORY_HAZARD_FIELD = "Respiratory hazard index"
RESPITORY_HAZARD_PERCENTILE_FIELD = "Respiratory hazard index (percentile)"
CANCER_FIELD = "Cancer (excluding skin cancer) among adults aged >=18 years"
CANCER_PERCENTILE_FIELD = (
"Cancer (excluding skin cancer) among adults aged >=18 years (percentile)"
)
HEALTH_INSURANCE_FIELD = (
"Current lack of health insurance among adults aged 18-64 years"
)
PHYS_HEALTH_NOT_GOOD_FIELD = (
"Physical health not good for >=14 days among adults aged >=18 years"
)
# Other Demographics
TOTAL_POP_FIELD = "Total population"
UNEMPLOYMENT_FIELD = "Unemployed civilians (percent)"
UNEMPLOYMENT_PERCENTILE_FIELD = "Unemployed civilians (percent) (percentile)"
LINGUISTIC_ISO_FIELD = "Linguistic isolation (percent)"
LINGUISTIC_ISO_PERCENTILE_FIELD = "Linguistic isolation (percent) (percentile)"
HOUSEHOLDS_LINGUISTIC_ISO_FIELD = (
"Percent of households in linguistic isolation"
)
HIGH_SCHOOL_ED_FIELD = (
"Percent individuals age 25 or over with less than high school degree"
)
HIGH_SCHOOL_ED_PERCENTILE_FIELD = "Percent individuals age 25 or over with less than high school degree (percentile)"
AGGREGATION_POPULATION_FIELD = "Population Characteristics"
UNDER_5_FIELD = "Individuals under 5 years old"
OVER_64_FIELD = "Individuals over 64 years old"
# Urban Rural Map
URBAN_HERUISTIC_FIELD = "Urban Heuristic Flag"

View file

@ -0,0 +1,9 @@
import pandas as pd
class Score:
def __init__(self, df: pd.DataFrame) -> None:
self.df = df
def add_columns(self) -> pd.DataFrame:
raise NotImplementedError

View file

@ -0,0 +1,19 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreA(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score A")
self.df[field_names.SCORE_A] = self.df[
[
field_names.POVERTY_PERCENTILE_FIELD,
field_names.HIGH_SCHOOL_ED_PERCENTILE_FIELD,
]
].mean(axis=1)
return self.df

View file

@ -0,0 +1,17 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreB(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score B")
self.df[field_names.SCORE_B] = (
self.df[field_names.POVERTY_PERCENTILE_FIELD]
* self.df[field_names.HIGH_SCHOOL_ED_PERCENTILE_FIELD]
)
return self.df

View file

@ -0,0 +1,99 @@
from collections import namedtuple
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreC(Score):
def __init__(self, df: pd.DataFrame) -> None:
Bucket = namedtuple('Bucket', ['name', 'fields'])
self.BUCKET_SOCIOECONOMIC = Bucket(
field_names.C_SOCIOECONOMIC,
[
field_names.HOUSEHOLDS_LINGUISTIC_ISO_FIELD,
field_names.POVERTY_FIELD,
field_names.HIGH_SCHOOL_ED_FIELD,
field_names.UNEMPLOYMENT_FIELD,
field_names.HT_INDEX_FIELD,
]
)
self.BUCKET_SENSITIVE = Bucket(
field_names.C_SENSITIVE,
[
field_names.UNDER_5_FIELD,
field_names.OVER_64_FIELD,
field_names.LINGUISTIC_ISO_FIELD,
]
)
self.BUCKET_ENVIRONMENTAL = Bucket(
field_names.C_ENVIRONMENTAL,
[
field_names.RMP_FIELD,
field_names.TSDF_FIELD,
field_names.NPL_FIELD,
field_names.WASTEWATER_FIELD,
field_names.LEAD_PAINT_FIELD,
]
)
self.BUCKET_EXPOSURES = Bucket(
field_names.C_EXPOSURES,
[
field_names.AIR_TOXICS_CANCER_RISK_FIELD,
field_names.RESPITORY_HAZARD_FIELD,
field_names.DIESEL_FIELD,
field_names.PM25_FIELD,
field_names.OZONE_FIELD,
field_names.TRAFFIC_FIELD,
],
)
self.BUCKETS = [
self.BUCKET_SOCIOECONOMIC,
self.BUCKET_SENSITIVE,
self.BUCKET_ENVIRONMENTAL,
self.BUCKET_EXPOSURES,
]
super().__init__(df)
# "CalEnviroScreen for the US" score
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score C")
# Average all the percentile values in each bucket into a single score for each of the four buckets.
# TODO just use the percentile fields in the list instead
for bucket in self.BUCKETS:
fields_to_average = []
for field in bucket.fields:
fields_to_average.append(
f"{field}{field_names.PERCENTILE_FIELD_SUFFIX}"
)
self.df[f"{bucket.name}"] = self.df[fields_to_average].mean(axis=1)
# Combine the score from the two Exposures and Environmental Effects buckets
# into a single score called "Pollution Burden".
# The math for this score is:
# (1.0 * Exposures Score + 0.5 * Environment Effects score) / 1.5.
self.df[field_names.AGGREGATION_POLLUTION_FIELD] = (
1.0 * self.df[self.BUCKET_EXPOSURES.name]
+ 0.5 * self.df[self.BUCKET_ENVIRONMENTAL.name]
) / 1.5
# Average the score from the two Sensitive populations and
# Socioeconomic factors buckets into a single score called
# "Population Characteristics".
self.df[field_names.AGGREGATION_POPULATION_FIELD] = self.df[
[self.BUCKET_SENSITIVE.name, self.BUCKET_SOCIOECONOMIC.name]
].mean(axis=1)
# Multiply the "Pollution Burden" score and the "Population Characteristics"
# together to produce the cumulative impact score.
self.df[field_names.SCORE_C] = (
self.df[field_names.AGGREGATION_POLLUTION_FIELD]
* self.df[field_names.AGGREGATION_POPULATION_FIELD]
)
return self.df

View file

@ -0,0 +1,35 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreD(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Scores D and E")
fields_to_use_in_score = [
field_names.UNEMPLOYMENT_FIELD,
field_names.LINGUISTIC_ISO_FIELD,
field_names.HOUSING_BURDEN_FIELD,
field_names.POVERTY_FIELD,
field_names.HIGH_SCHOOL_ED_FIELD,
]
fields_min_max = [
f"{field}{field_names.MIN_MAX_FIELD_SUFFIX}"
for field in fields_to_use_in_score
]
fields_percentile = [
f"{field}{field_names.PERCENTILE_FIELD_SUFFIX}"
for field in fields_to_use_in_score
]
# Calculate "Score D", which uses min-max normalization
# and calculate "Score E", which uses percentile normalization for the same fields
self.df[field_names.SCORE_D] = self.df[fields_min_max].mean(axis=1)
self.df[field_names.SCORE_E] = self.df[fields_percentile].mean(axis=1)
return self.df

View file

@ -0,0 +1,46 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreF(Score):
# TODO Make variables and constants clearer (meaning and type)
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score F")
ami_and_high_school_field = "Low AMI, Low HS graduation"
meets_socio_field = "Meets socioeconomic criteria"
meets_burden_field = "Meets burden criteria"
self.df[ami_and_high_school_field] = (
self.df[field_names.MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD] < 0.80
) & (self.df[field_names.HIGH_SCHOOL_ED_FIELD] > 0.2)
self.df[meets_socio_field] = (
self.df[ami_and_high_school_field]
| (self.df[field_names.POVERTY_FIELD] > 0.40)
| (self.df[field_names.LINGUISTIC_ISO_FIELD] > 0.10)
| (self.df[field_names.HIGH_SCHOOL_ED_FIELD] > 0.4)
)
self.df[meets_burden_field] = (
(self.df[field_names.PM25_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.RESPITORY_HAZARD_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.TRAFFIC_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.LEAD_PAINT_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.RMP_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.ASTHMA_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.HEART_DISEASE_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.CANCER_PERCENTILE_FIELD] > 0.9)
| (self.df[field_names.DIABETES_PERCENTILE_FIELD] > 0.9)
)
self.df[field_names.SCORE_F_COMMUNITIES] = (
self.df[meets_socio_field] & self.df[meets_burden_field]
)
return self.df

View file

@ -0,0 +1,35 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreG(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score G")
high_school_cutoff_threshold = 0.05
# Score G is now modified NMTC
self.df[field_names.SCORE_G_COMMUNITIES] = (
(self.df[field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD] < 0.8)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
) | (
(self.df[field_names.POVERTY_LESS_THAN_100_FPL_FIELD] > 0.20)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
)
self.df[field_names.SCORE_G] = self.df[
field_names.SCORE_G_COMMUNITIES
].astype(int)
self.df["Score G (percentile)"] = self.df[field_names.SCORE_G]
return self.df

View file

@ -0,0 +1,33 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreH(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score H")
high_school_cutoff_threshold = 0.06
self.df[field_names.SCORE_H_COMMUNITIES] = (
(self.df[field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD] < 0.8)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
) | (
(self.df[field_names.POVERTY_LESS_THAN_200_FPL_FIELD] > 0.40)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
)
self.df[field_names.SCORE_H] = self.df[
field_names.SCORE_H_COMMUNITIES
].astype(int)
return self.df

View file

@ -0,0 +1,34 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreI(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score I")
high_school_cutoff_threshold = 0.05
self.df[field_names.SCORE_I_COMMUNITIES] = (
(self.df[field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD] < 0.7)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
) | (
(self.df[field_names.POVERTY_LESS_THAN_200_FPL_FIELD] > 0.50)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
)
self.df[field_names.SCORE_I] = self.df[
field_names.SCORE_I_COMMUNITIES
].astype(int)
self.df["Score I (percentile)"] = self.df[field_names.SCORE_I]
return self.df

View file

@ -0,0 +1,34 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreK(Score):
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score K")
high_school_cutoff_threshold = 0.06
self.df[field_names.SCORE_K] = (
(self.df[field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD] < 0.8)
) | (self.df[field_names.POVERTY_LESS_THAN_100_FPL_FIELD] > 0.20)
self.df[field_names.SCORE_K_COMMUNITIES] = (
(self.df[field_names.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD] < 0.8)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
) | (
(self.df[field_names.POVERTY_LESS_THAN_100_FPL_FIELD] > 0.20)
& (
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
> high_school_cutoff_threshold
)
)
return self.df

View file

@ -0,0 +1,228 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreL(Score):
def __init__(self, df: pd.DataFrame) -> None:
self.LOW_INCOME_THRESHOLD: float = 0.60
self.ENVIRONMENTAL_BURDEN_THRESHOLD: float = 0.90
super().__init__(df)
def add_columns(self) -> pd.DataFrame:
logger.info("Adding Score L")
self.df[field_names.L_CLIMATE] = self._climate_factor()
self.df[field_names.L_ENERGY] = self._energy_factor()
self.df[field_names.L_TRANSPORTATION] = self._transportation_factor()
self.df[field_names.L_HOUSING] = self._housing_factor()
self.df[field_names.L_POLLUTION] = self._pollution_factor()
self.df[field_names.L_WATER] = self._water_factor()
self.df[field_names.L_HEALTH] = self._health_factor()
self.df[field_names.L_WORKFORCE] = self._workforce_factor()
factors = [
field_names.L_CLIMATE,
field_names.L_ENERGY,
field_names.L_TRANSPORTATION,
field_names.L_HOUSING,
field_names.L_POLLUTION,
field_names.L_WATER,
field_names.L_HEALTH,
field_names.L_WORKFORCE,
]
self.df[field_names.SCORE_L_COMMUNITIES] = self.df[factors].any(axis=1)
# Note: this is purely used for comparison tool analysis, and can be removed at a later date. - LMB.
non_workforce_factors = [
field_names.L_CLIMATE,
field_names.L_ENERGY,
field_names.L_TRANSPORTATION,
field_names.L_HOUSING,
field_names.L_POLLUTION,
field_names.L_WATER,
field_names.L_HEALTH,
]
self.df[field_names.L_NON_WORKFORCE] = self.df[
non_workforce_factors
].any(axis=1)
return self.df
def _climate_factor(self) -> bool:
# In Xth percentile or above for FEMAs Risk Index (Source: FEMA
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
return (
self.df[field_names.POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD]
> self.LOW_INCOME_THRESHOLD
) & (
self.df[field_names.FEMA_RISK_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
def _energy_factor(self) -> bool:
# In Xth percentile or above for DOEs energy cost burden score (Source: LEAD Score)
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
return (
self.df[field_names.POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD]
> self.LOW_INCOME_THRESHOLD
) & (
self.df[field_names.ENERGY_BURDEN_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
def _transportation_factor(self) -> bool:
# In Xth percentile or above for diesel particulate matter (Source: EPA National Air Toxics Assessment (NATA)
# or
# In Xth percentile or above for PM 2.5 (Source: EPA, Office of Air and Radiation (OAR) fusion of model and monitor data)]
# or
# In Xth percentile or above traffic proximity and volume (Source: 2017 U.S. Department of Transportation (DOT) traffic data
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
transportation_criteria = (
(
self.df[field_names.DIESEL_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.PM25_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.TRAFFIC_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
)
return (
self.df[field_names.POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD]
> self.LOW_INCOME_THRESHOLD
) & transportation_criteria
def _housing_factor(self) -> bool:
# In Xth percentile or above for lead paint (Source: Census's American Community Surveys
# percent of housing units built pre-1960, used as an indicator of potential lead paint exposure in homes)
# or
# In Xth percentile or above for housing cost burden (Source: HUD's Comprehensive Housing Affordability Strategy dataset
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
housing_criteria = (
self.df[field_names.LEAD_PAINT_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
) | (
self.df[field_names.HOUSING_BURDEN_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
return (
self.df[field_names.POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD]
> self.LOW_INCOME_THRESHOLD
) & housing_criteria
def _pollution_factor(self) -> bool:
# TBD
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
return False
def _water_factor(self) -> bool:
# In Xth percentile or above for wastewater discharge (Source: EPA Risk-Screening Environmental Indicators (RSEI) Model)
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
return (
self.df[field_names.POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD]
> self.LOW_INCOME_THRESHOLD
) & (
self.df[field_names.WASTEWATER_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
def _health_factor(self) -> bool:
# In Xth percentile or above for diabetes (Source: CDC Places)
# or
# In Xth percentile or above for asthma (Source: CDC Places)
# or
# In Xth percentile or above for heart disease
# or
# In Xth percentile or above for low life expectancy (Source: CDC Places)
# AND
# Low income: In 60th percentile or above for percent of block group population
# of households where household income is less than or equal to twice the federal
# poverty level. Source: Census's American Community Survey]
health_criteria = (
(
self.df[field_names.DIABETES_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.ASTHMA_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.HEART_DISEASE_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.LIFE_EXPECTANCY_PERCENTILE_FIELD]
# Note: a high life expectancy is good, so take 1 minus the threshold to invert it,
# and then look for life expenctancies lower than that (not greater than).
< 1 - self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
)
return (
self.df[field_names.POVERTY_LESS_THAN_200_FPL_PERCENTILE_FIELD]
> self.LOW_INCOME_THRESHOLD
) & health_criteria
def _workforce_factor(self) -> bool:
# Where unemployment is above X%
# or
# Where median income is less than Y% of the area median income
# or
# Where the percent of households at or below 100% of the federal poverty level is greater than Z%
# or
# Where linguistic isolation is greater than Y%
# AND
# Where the high school degree achievement rates for adults 25 years and older is less than 95%
# (necessary to screen out university block groups)
workforce_criteria = (
(
self.df[field_names.UNEMPLOYMENT_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.MEDIAN_INCOME_PERCENT_AMI_PERCENTILE_FIELD]
# Note: a high median income as a % of AMI is good, so take 1 minus the threshold to invert it.
# and then look for median income lower than that (not greater than).
< 1 - self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.POVERTY_LESS_THAN_100_FPL_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
| (
self.df[field_names.LINGUISTIC_ISO_PERCENTILE_FIELD]
> self.ENVIRONMENTAL_BURDEN_THRESHOLD
)
)
return (
self.df[field_names.HIGH_SCHOOL_ED_FIELD] > 0.05
) & workforce_criteria

View file

@ -0,0 +1,66 @@
import pandas as pd
from data_pipeline.score.score_a import ScoreA
from data_pipeline.score.score_b import ScoreB
from data_pipeline.score.score_c import ScoreC
from data_pipeline.score.score_d import ScoreD
from data_pipeline.score.score_f import ScoreF
from data_pipeline.score.score_g import ScoreG
from data_pipeline.score.score_h import ScoreH
from data_pipeline.score.score_i import ScoreI
from data_pipeline.score.score_k import ScoreK
from data_pipeline.score.score_l import ScoreL
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
class ScoreRunner:
def __init__(self, df: pd.DataFrame):
# Define some global parameters
self.df = df
def calculate_scores(self) -> pd.DataFrame:
# Index scores
self.df = ScoreA(df=self.df).add_columns()
self.df = ScoreB(df=self.df).add_columns()
self.df = ScoreC(df=self.df).add_columns()
self.df = ScoreD(df=self.df).add_columns()
self.df = ScoreF(df=self.df).add_columns()
self.df = ScoreG(df=self.df).add_columns()
self.df = ScoreH(df=self.df).add_columns()
self.df = ScoreI(df=self.df).add_columns()
self.df = ScoreK(df=self.df).add_columns()
self.df = ScoreL(df=self.df).add_columns()
# TODO do this with each score instead of in a bundle
# Create percentiles for these index scores
self.df = self._add_score_percentiles()
return self.df
def _add_score_percentiles(self) -> pd.DataFrame:
logger.info("Adding Score Percentiles")
for score_field in [
field_names.SCORE_A,
field_names.SCORE_B,
field_names.SCORE_C,
field_names.SCORE_D,
field_names.SCORE_E,
]:
self.df[
f"{score_field}{field_names.PERCENTILE_FIELD_SUFFIX}"
] = self.df[score_field].rank(pct=True)
for threshold in [0.25, 0.3, 0.35, 0.4]:
fraction_converted_to_percent = int(100 * threshold)
self.df[
f"{score_field} (top {fraction_converted_to_percent}th percentile)"
] = (
self.df[
f"{score_field}{field_names.PERCENTILE_FIELD_SUFFIX}"
]
>= 1 - threshold
)
return self.df