mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-08-21 15:01:40 -07:00
Scores D & E (#266)
* running black throughout * adding housing * hud housing etl working * got score d and e working * updating scoring comparison * minor fixes * small changes * small comments
This commit is contained in:
parent
67c73dde2a
commit
41e394972c
6 changed files with 625 additions and 74 deletions
|
@ -11,6 +11,7 @@
|
|||
"# 1. `ejscreen_etl.ipynb`\n",
|
||||
"# 2. `census_etl.ipynb`\n",
|
||||
"# 3. `housing_and_transportation_etl.ipynb`\n",
|
||||
"# 4. `hud_housing_etl.ipynb`\n",
|
||||
"\n",
|
||||
"import collections\n",
|
||||
"import functools\n",
|
||||
|
@ -20,7 +21,7 @@
|
|||
"import os\n",
|
||||
"import sys\n",
|
||||
"\n",
|
||||
"module_path = os.path.abspath(os.path.join('..'))\n",
|
||||
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
|
||||
"if module_path not in sys.path:\n",
|
||||
" sys.path.append(module_path)\n",
|
||||
"\n",
|
||||
|
@ -28,6 +29,7 @@
|
|||
"\n",
|
||||
"# Define some global parameters\n",
|
||||
"GEOID_FIELD_NAME = \"GEOID10\"\n",
|
||||
"GEOID_TRACT_FIELD_NAME = \"GEOID10_TRACT\"\n",
|
||||
"BUCKET_SOCIOECONOMIC = \"Socioeconomic Factors\"\n",
|
||||
"BUCKET_SENSITIVE = \"Sensitive populations\"\n",
|
||||
"BUCKET_ENVIRONMENTAL = \"Environmental effects\"\n",
|
||||
|
@ -39,11 +41,22 @@
|
|||
" BUCKET_EXPOSURES,\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# A few specific field names\n",
|
||||
"# TODO: clean this up, I name some fields but not others.\n",
|
||||
"UNEMPLOYED_FIELD_NAME = \"Unemployed civilians (percent)\"\n",
|
||||
"LINGUISTIC_ISOLATION_FIELD_NAME = \"Linguistic isolation (percent)\"\n",
|
||||
"HOUSING_BURDEN_FIELD_NAME = \"Housing burden (percent)\"\n",
|
||||
"POVERTY_FIELD_NAME = \"Poverty (Less than 200% of federal poverty line)\"\n",
|
||||
"HIGH_SCHOOL_FIELD_NAME = (\n",
|
||||
" \"Percent individuals age 25 or over with less than high school degree\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# There's another aggregation level (a second level of \"buckets\").\n",
|
||||
"AGGREGATION_POLLUTION = \"Pollution Burden\"\n",
|
||||
"AGGREGATION_POPULATION = \"Population Characteristics\"\n",
|
||||
"\n",
|
||||
"PERCENTILE_FIELD_SUFFIX = \" (percentile)\"\n",
|
||||
"MIN_MAX_FIELD_SUFFIX = \" (min-max normalized)\"\n",
|
||||
"\n",
|
||||
"DATA_PATH = Path.cwd().parent / \"data\"\n",
|
||||
"SCORE_CSV_PATH = DATA_PATH / \"score\" / \"csv\"\n",
|
||||
|
@ -102,6 +115,23 @@
|
|||
"housing_and_transportation_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a9202e5d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load HUD housing data\n",
|
||||
"hud_housing_csv = DATA_PATH / \"dataset\" / \"hud_housing\" / \"usa.csv\"\n",
|
||||
"hud_housing_df = pd.read_csv(\n",
|
||||
" hud_housing_csv,\n",
|
||||
" dtype={GEOID_TRACT_FIELD_NAME: \"string\"},\n",
|
||||
" low_memory=False,\n",
|
||||
")\n",
|
||||
"hud_housing_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
@ -110,15 +140,59 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"# Join all the data sources that use census block groups\n",
|
||||
"dfs = [ejscreen_df, census_df, housing_and_transportation_df]\n",
|
||||
"census_block_group_dfs = [ejscreen_df, census_df, housing_and_transportation_df]\n",
|
||||
"\n",
|
||||
"df = functools.reduce(\n",
|
||||
"census_block_group_df = functools.reduce(\n",
|
||||
" lambda left, right: pd.merge(\n",
|
||||
" left=left, right=right, on=GEOID_FIELD_NAME, how=\"outer\"\n",
|
||||
" ),\n",
|
||||
" dfs,\n",
|
||||
" census_block_group_dfs,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"if len(census_block_group_df) > 220333:\n",
|
||||
" raise ValueError(\"Too many rows in the join.\")\n",
|
||||
"\n",
|
||||
"census_block_group_df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e79ec27a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Sanity check the join.\n",
|
||||
"if len(census_block_group_df[GEOID_FIELD_NAME].str.len().unique()) != 1:\n",
|
||||
" raise ValueError(\n",
|
||||
" f\"One of the input CSVs uses {GEOID_FIELD_NAME} with a different length.\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3d0d2915",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Join all the data sources that use census tracts\n",
|
||||
"# TODO: when there's more than one data source using census tract, reduce/merge them here.\n",
|
||||
"census_tract_df = hud_housing_df\n",
|
||||
"\n",
|
||||
"# Calculate the tract for the CBG data.\n",
|
||||
"census_block_group_df[GEOID_TRACT_FIELD_NAME] = census_block_group_df[\n",
|
||||
" GEOID_FIELD_NAME\n",
|
||||
"].str[0:11]\n",
|
||||
"\n",
|
||||
"df = census_block_group_df.merge(census_tract_df, on=GEOID_TRACT_FIELD_NAME)\n",
|
||||
"\n",
|
||||
"if len(census_block_group_df) > 220333:\n",
|
||||
" raise ValueError(\"Too many rows in the join.\")\n",
|
||||
"\n",
|
||||
"df.head()"
|
||||
]
|
||||
},
|
||||
|
@ -135,13 +209,18 @@
|
|||
")\n",
|
||||
"\n",
|
||||
"data_sets = [\n",
|
||||
" # The following data sets have `bucket=None`, because it's not used in the score.\n",
|
||||
" # The following data sets have `bucket=None`, because it's not used in the bucket based score (\"Score C\").\n",
|
||||
" DataSet(\n",
|
||||
" input_field=GEOID_FIELD_NAME,\n",
|
||||
" # Use the name `GEOID10` to enable geoplatform.gov's workflow.\n",
|
||||
" renamed_field=GEOID_FIELD_NAME,\n",
|
||||
" bucket=None,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
" input_field=HOUSING_BURDEN_FIELD_NAME,\n",
|
||||
" renamed_field=HOUSING_BURDEN_FIELD_NAME,\n",
|
||||
" bucket=None,\n",
|
||||
" ),\n",
|
||||
" DataSet(input_field=\"ACSTOTPOP\", renamed_field=\"Total population\", bucket=None),\n",
|
||||
" # The following data sets have buckets, because they're used in the score\n",
|
||||
" DataSet(\n",
|
||||
|
@ -206,24 +285,28 @@
|
|||
" bucket=BUCKET_SENSITIVE,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
" input_field=LINGUISTIC_ISOLATION_FIELD_NAME,\n",
|
||||
" renamed_field=LINGUISTIC_ISOLATION_FIELD_NAME,\n",
|
||||
" bucket=BUCKET_SENSITIVE,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
" input_field=\"LINGISOPCT\",\n",
|
||||
" renamed_field=\"Percent of households in linguistic isolation\",\n",
|
||||
" bucket=BUCKET_SOCIOECONOMIC,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
" input_field=\"LOWINCPCT\",\n",
|
||||
" renamed_field=\"Poverty (Less than 200% of federal poverty line)\",\n",
|
||||
" renamed_field=POVERTY_FIELD_NAME,\n",
|
||||
" bucket=BUCKET_SOCIOECONOMIC,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
" input_field=\"LESSHSPCT\",\n",
|
||||
" renamed_field=\"Percent individuals age 25 or over with less than high school degree\",\n",
|
||||
" renamed_field=HIGH_SCHOOL_FIELD_NAME,\n",
|
||||
" bucket=BUCKET_SOCIOECONOMIC,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
" input_field=\"Unemployed Civilians (fraction)\",\n",
|
||||
" # Following EJSCREEN conventions, where fractional data is named as a percent.\n",
|
||||
" renamed_field=\"Unemployed Civilians (percent)\",\n",
|
||||
" input_field=UNEMPLOYED_FIELD_NAME,\n",
|
||||
" renamed_field=UNEMPLOYED_FIELD_NAME,\n",
|
||||
" bucket=BUCKET_SOCIOECONOMIC,\n",
|
||||
" ),\n",
|
||||
" DataSet(\n",
|
||||
|
@ -256,6 +339,21 @@
|
|||
"df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "1280cbd4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Convert all columns to numeric.\n",
|
||||
"for data_set in data_sets:\n",
|
||||
" # Skip GEOID_FIELD_NAME, because it's a string.\n",
|
||||
" if data_set.renamed_field == GEOID_FIELD_NAME:\n",
|
||||
" continue\n",
|
||||
" df[f\"{data_set.renamed_field}\"] = pd.to_numeric(df[data_set.renamed_field])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
@ -274,6 +372,44 @@
|
|||
"df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f2088013",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# calculate min max\n",
|
||||
"# Math:\n",
|
||||
"# (\n",
|
||||
"# Observed value\n",
|
||||
"# - minimum of all values\n",
|
||||
"# )\n",
|
||||
"# divided by\n",
|
||||
"# (\n",
|
||||
"# Maximum of all values\n",
|
||||
"# - minimum of all values\n",
|
||||
"# )\n",
|
||||
"for data_set in data_sets:\n",
|
||||
" # Skip GEOID_FIELD_NAME, because it's a string.\n",
|
||||
" if data_set.renamed_field == GEOID_FIELD_NAME:\n",
|
||||
" continue\n",
|
||||
"\n",
|
||||
" min_value = df[data_set.renamed_field].min(skipna=True)\n",
|
||||
"\n",
|
||||
" max_value = df[data_set.renamed_field].max(skipna=True)\n",
|
||||
"\n",
|
||||
" print(\n",
|
||||
" f\"For data set {data_set.renamed_field}, the min value is {min_value} and the max value is {max_value}.\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" df[f\"{data_set.renamed_field}{MIN_MAX_FIELD_SUFFIX}\"] = (\n",
|
||||
" df[data_set.renamed_field] - min_value\n",
|
||||
" ) / (max_value - min_value)\n",
|
||||
"\n",
|
||||
"df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
@ -333,6 +469,35 @@
|
|||
"df.head()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "f70106f5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fields_to_use_in_score = [\n",
|
||||
" UNEMPLOYED_FIELD_NAME,\n",
|
||||
" LINGUISTIC_ISOLATION_FIELD_NAME,\n",
|
||||
" HOUSING_BURDEN_FIELD_NAME,\n",
|
||||
" POVERTY_FIELD_NAME,\n",
|
||||
" HIGH_SCHOOL_FIELD_NAME,\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"fields_min_max = [f\"{field}{MIN_MAX_FIELD_SUFFIX}\" for field in fields_to_use_in_score]\n",
|
||||
"fields_percentile = [\n",
|
||||
" f\"{field}{PERCENTILE_FIELD_SUFFIX}\" for field in fields_to_use_in_score\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"# Calculate \"Score D\", which uses min-max normalization\n",
|
||||
"# and calculate \"Score E\", which uses percentile normalization for the same fields\n",
|
||||
"df[\"Score D\"] = df[fields_min_max].mean(axis=1)\n",
|
||||
"df[\"Score E\"] = df[fields_percentile].mean(axis=1)\n",
|
||||
"\n",
|
||||
"print(df[\"Score D\"].describe())\n",
|
||||
"print(df[\"Score E\"].describe())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
@ -343,7 +508,7 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"# Create percentiles for the scores\n",
|
||||
"for score_field in [\"Score A\", \"Score B\", \"Score C\"]:\n",
|
||||
"for score_field in [\"Score A\", \"Score B\", \"Score C\", \"Score D\", \"Score E\"]:\n",
|
||||
" df[f\"{score_field}{PERCENTILE_FIELD_SUFFIX}\"] = df[score_field].rank(pct=True)\n",
|
||||
" df[f\"{score_field} (top 25th percentile)\"] = (\n",
|
||||
" df[f\"{score_field}{PERCENTILE_FIELD_SUFFIX}\"] >= 0.75\n",
|
||||
|
@ -376,14 +541,6 @@
|
|||
" # we need to name the file data01.csv for ogr2ogr csv merge to work\n",
|
||||
" df1.to_csv(SCORE_CSV_PATH / f\"data{states_fips}.csv\", index=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "167ebba3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -402,7 +559,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.0"
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue