ETL Classes for Data Sets (#260)

* first commit

* checkpoint

* checkpoint

* first extract module 🎉

* completed census acs etl class

* completed ejscreen etl

* completed etl

* score generation ready

* improving census load and separation

* score generation working 🎉

* completed etls

* new score generation

* PR reviews

* run specific etl; starting docstrings

* docstrings work

* more docstrings

* completed docstrings

* adding pyenv version

* more reasonable poetry req for python

* PR comments
This commit is contained in:
Jorge Escobar 2021-07-12 15:50:44 -04:00 committed by GitHub
commit 842312f69f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
33 changed files with 2628 additions and 2872 deletions

View file

@ -1,141 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "20aa3891",
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"import numpy as np\n",
"import pandas as pd\n",
"import csv\n",
"import sys\n",
"import os\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"from utils import unzip_file_from_url, remove_all_from_dir\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"TMP_PATH = DATA_PATH / \"tmp\"\n",
"CALENVIROSCREEN_FTP_URL = \"https://justice40-data.s3.amazonaws.com/CalEnviroScreen/CalEnviroScreen_4.0_2021.zip\"\n",
"CSV_PATH = DATA_PATH / \"dataset\" / \"calenviroscreen4\"\n",
"\n",
"# Definining some variable names\n",
"CALENVIROSCREEN_SCORE_FIELD_NAME = \"calenviroscreen_score\"\n",
"CALENVIROSCREEN_PERCENTILE_FIELD_NAME = \"calenviroscreen_percentile\"\n",
"CALENVIROSCREEN_PRIORITY_COMMUNITY_FIELD_NAME = \"calenviroscreen_priority_community\"\n",
"GEOID_TRACT_FIELD_NAME = \"GEOID10_TRACT\"\n",
"\n",
"# Choosing constants.\n",
"# None of these numbers are final, but just for the purposes of comparison.\n",
"CALENVIROSCREEN_PRIORITY_COMMUNITY_THRESHOLD = 75\n",
"\n",
"print(DATA_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc3fb9ec",
"metadata": {},
"outputs": [],
"source": [
"# download file from ejscreen ftp\n",
"unzip_file_from_url(CALENVIROSCREEN_FTP_URL, TMP_PATH, TMP_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "15f66756",
"metadata": {},
"outputs": [],
"source": [
"# Data from https://calenviroscreen-oehha.hub.arcgis.com/#Data, specifically:\n",
"# https://oehha.ca.gov/media/downloads/calenviroscreen/document/calenviroscreen40resultsdatadictionaryd12021.zip\n",
"calenviroscreen_4_csv_name = \"CalEnviroScreen_4.0_2021.csv\"\n",
"calenviroscreen_data_path = TMP_PATH.joinpath(calenviroscreen_4_csv_name)\n",
"\n",
"# Load comparison index (CalEnviroScreen 4)\n",
"calenviroscreen_df = pd.read_csv(\n",
" calenviroscreen_data_path, dtype={\"Census Tract\": \"string\"}\n",
")\n",
"\n",
"calenviroscreen_df.rename(\n",
" columns={\n",
" \"Census Tract\": GEOID_TRACT_FIELD_NAME,\n",
" \"DRAFT CES 4.0 Score\": CALENVIROSCREEN_SCORE_FIELD_NAME,\n",
" \"DRAFT CES 4.0 Percentile\": CALENVIROSCREEN_PERCENTILE_FIELD_NAME,\n",
" },\n",
" inplace=True,\n",
")\n",
"\n",
"# Add a leading \"0\" to the Census Tract to match our format in other data frames.\n",
"\n",
"calenviroscreen_df[GEOID_TRACT_FIELD_NAME] = (\n",
" \"0\" + calenviroscreen_df[GEOID_TRACT_FIELD_NAME]\n",
")\n",
"\n",
"# Calculate the top K% of prioritized communities\n",
"calenviroscreen_df[CALENVIROSCREEN_PRIORITY_COMMUNITY_FIELD_NAME] = (\n",
" calenviroscreen_df[CALENVIROSCREEN_PERCENTILE_FIELD_NAME]\n",
" >= CALENVIROSCREEN_PRIORITY_COMMUNITY_THRESHOLD\n",
")\n",
"\n",
"calenviroscreen_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9fa2077a",
"metadata": {},
"outputs": [],
"source": [
"# write csv\n",
"CSV_PATH.mkdir(parents=True, exist_ok=True)\n",
"\n",
"# Matching other conventions in the ETL scripts, write only for the state (FIPS code 06).\n",
"calenviroscreen_df.to_csv(CSV_PATH / \"data06.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "81b977f8",
"metadata": {},
"outputs": [],
"source": [
"# cleanup\n",
"remove_all_from_dir(TMP_PATH)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -1,203 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "0491828b",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import censusdata\n",
"import csv\n",
"from pathlib import Path\n",
"import os\n",
"import sys\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"\n",
"ACS_YEAR = 2019\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"OUTPUT_PATH = DATA_PATH / \"dataset\" / f\"census_acs_{ACS_YEAR}\"\n",
"\n",
"GEOID_FIELD_NAME = \"GEOID10\"\n",
"UNEMPLOYED_FIELD_NAME = \"Unemployed civilians (percent)\"\n",
"LINGUISTIC_ISOLATION_FIELD_NAME = \"Linguistic isolation (percent)\"\n",
"LINGUISTIC_ISOLATION_TOTAL_FIELD_NAME = \"Linguistic isolation (total)\"\n",
"\n",
"LINGUISTIC_ISOLATION_FIELDS = [\n",
" \"C16002_001E\",\n",
" \"C16002_004E\",\n",
" \"C16002_007E\",\n",
" \"C16002_010E\",\n",
" \"C16002_013E\",\n",
"]\n",
"\n",
"# Some display settings to make pandas outputs more readable.\n",
"pd.set_option(\"display.expand_frame_repr\", False)\n",
"pd.set_option(\"display.precision\", 2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "64df0b63",
"metadata": {},
"outputs": [],
"source": [
"# For variable discovery, if necessary.\n",
"# censusdata.search(\n",
"# \"acs5\", 2019, \"label\", \"Limited English speaking\"\n",
"# )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "654f25a1",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Following the tutorial at https://jtleider.github.io/censusdata/example1.html.\n",
"# Full list of fields is at https://www2.census.gov/programs-surveys/acs/summary_file/2019/documentation/user_tools/ACS2019_Table_Shells.xlsx\n",
"censusdata.printtable(censusdata.censustable(src=\"acs5\", year=ACS_YEAR, table=\"B23025\"))\n",
"censusdata.printtable(censusdata.censustable(src=\"acs5\", year=ACS_YEAR, table=\"C16002\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8999cea4",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"def fips_from_censusdata_censusgeo(censusgeo: censusdata.censusgeo) -> str:\n",
" \"\"\"Create a FIPS code from the proprietary censusgeo index.\"\"\"\n",
" fips = \"\".join([value for (key, value) in censusgeo.params()])\n",
" return fips\n",
"\n",
"\n",
"dfs = []\n",
"for fips in get_state_fips_codes(DATA_PATH):\n",
" print(f\"Downloading data for state/territory with FIPS code {fips}\")\n",
"\n",
" dfs.append(\n",
" censusdata.download(\n",
" src=\"acs5\",\n",
" year=ACS_YEAR,\n",
" geo=censusdata.censusgeo(\n",
" [(\"state\", fips), (\"county\", \"*\"), (\"block group\", \"*\")]\n",
" ),\n",
" var=[\n",
" # Emploment fields\n",
" \"B23025_005E\",\n",
" \"B23025_003E\",\n",
" ]\n",
" + LINGUISTIC_ISOLATION_FIELDS,\n",
" )\n",
" )\n",
"\n",
"\n",
"df = pd.concat(dfs)\n",
"\n",
"df[GEOID_FIELD_NAME] = df.index.to_series().apply(func=fips_from_censusdata_censusgeo)\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "803cce31",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# Calculate percent unemployment.\n",
"# TODO: remove small-sample data that should be `None` instead of a high-variance fraction.\n",
"df[UNEMPLOYED_FIELD_NAME] = df.B23025_005E / df.B23025_003E\n",
"\n",
"df[UNEMPLOYED_FIELD_NAME].describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e475472c",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# Calculate linguistic isolation.\n",
"individual_limited_english_fields = [\n",
" \"C16002_004E\",\n",
" \"C16002_007E\",\n",
" \"C16002_010E\",\n",
" \"C16002_013E\",\n",
"]\n",
"\n",
"df[LINGUISTIC_ISOLATION_TOTAL_FIELD_NAME] = df[individual_limited_english_fields].sum(\n",
" axis=1, skipna=True\n",
")\n",
"df[LINGUISTIC_ISOLATION_FIELD_NAME] = (\n",
" df[LINGUISTIC_ISOLATION_TOTAL_FIELD_NAME].astype(float) / df[\"C16002_001E\"]\n",
")\n",
"\n",
"df[LINGUISTIC_ISOLATION_FIELD_NAME].describe()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2a269bb1",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# mkdir census\n",
"OUTPUT_PATH.mkdir(parents=True, exist_ok=True)\n",
"\n",
"columns_to_include = [\n",
" GEOID_FIELD_NAME,\n",
" UNEMPLOYED_FIELD_NAME,\n",
" LINGUISTIC_ISOLATION_FIELD_NAME,\n",
"]\n",
"\n",
"df[columns_to_include].to_csv(path_or_buf=OUTPUT_PATH / \"usa.csv\", index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -1,123 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "20aa3891",
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"import numpy as np\n",
"import pandas as pd\n",
"import csv\n",
"import sys\n",
"import os\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"from utils import unzip_file_from_url, remove_all_from_dir\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"TMP_PATH = DATA_PATH / \"tmp\"\n",
"EJSCREEN_FTP_URL = (\n",
" \"https://gaftp.epa.gov/EJSCREEN/2020/EJSCREEN_2020_StatePctile.csv.zip\"\n",
")\n",
"EJSCREEN_CSV = TMP_PATH / \"EJSCREEN_2020_StatePctile.csv\"\n",
"CSV_PATH = DATA_PATH / \"dataset\" / \"ejscreen_2020\"\n",
"print(DATA_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc3fb9ec",
"metadata": {},
"outputs": [],
"source": [
"# download file from ejscreen ftp\n",
"unzip_file_from_url(EJSCREEN_FTP_URL, TMP_PATH, TMP_PATH)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b25738bb",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"df = pd.read_csv(\n",
" EJSCREEN_CSV,\n",
" dtype={\"ID\": \"string\"},\n",
" # EJSCREEN writes the word \"None\" for NA data.\n",
" na_values=[\"None\"],\n",
" low_memory=False,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9fa2077a",
"metadata": {},
"outputs": [],
"source": [
"# write nationwide csv\n",
"CSV_PATH.mkdir(parents=True, exist_ok=True)\n",
"df.to_csv(CSV_PATH / f\"usa.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5e5cc12a",
"metadata": {},
"outputs": [],
"source": [
"# write per state csvs\n",
"for fips in get_state_fips_codes(DATA_PATH):\n",
" print(f\"Generating data{fips} csv\")\n",
" df1 = df[df.ID.str[:2] == fips]\n",
" # we need to name the file data01.csv for ogr2ogr csv merge to work\n",
" df1.to_csv(CSV_PATH / f\"data{fips}.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "81b977f8",
"metadata": {},
"outputs": [],
"source": [
"# cleanup\n",
"remove_all_from_dir(TMP_PATH)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -1,116 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c21b63a3",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import censusdata\n",
"import csv\n",
"from pathlib import Path\n",
"import os\n",
"import sys\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"from utils import unzip_file_from_url, remove_all_from_dir\n",
"\n",
"ACS_YEAR = 2019\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"TMP_PATH = DATA_PATH / \"tmp\"\n",
"HOUSING_FTP_URL = \"https://htaindex.cnt.org/download/download.php?focus=blkgrp&geoid=\"\n",
"OUTPUT_PATH = DATA_PATH / \"dataset\" / \"housing_and_transportation_index\"\n",
"\n",
"GEOID_FIELD_NAME = \"GEOID10\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6696bc66",
"metadata": {},
"outputs": [],
"source": [
"# Download each state / territory individually\n",
"dfs = []\n",
"zip_file_dir = TMP_PATH / \"housing_and_transportation_index\"\n",
"for fips in get_state_fips_codes(DATA_PATH):\n",
" print(f\"Downloading housing data for state/territory with FIPS code {fips}\")\n",
" unzip_file_from_url(f\"{HOUSING_FTP_URL}{fips}\", TMP_PATH, zip_file_dir)\n",
"\n",
" # New file name:\n",
" tmp_csv_file_path = zip_file_dir / f\"htaindex_data_blkgrps_{fips}.csv\"\n",
" tmp_df = pd.read_csv(filepath_or_buffer=tmp_csv_file_path)\n",
"\n",
" dfs.append(tmp_df)\n",
"\n",
"df = pd.concat(dfs)\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "244e0d03",
"metadata": {},
"outputs": [],
"source": [
"# Rename and reformat block group ID\n",
"df.rename(columns={\"blkgrp\": GEOID_FIELD_NAME}, inplace=True)\n",
"df[GEOID_FIELD_NAME] = df[GEOID_FIELD_NAME].str.replace('\"', \"\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8275c1ef",
"metadata": {},
"outputs": [],
"source": [
"OUTPUT_PATH.mkdir(parents=True, exist_ok=True)\n",
"\n",
"df.to_csv(path_or_buf=OUTPUT_PATH / \"usa.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef5bb862",
"metadata": {},
"outputs": [],
"source": [
"# cleanup\n",
"remove_all_from_dir(TMP_PATH)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -1,274 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "c21b63a3",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import censusdata\n",
"import csv\n",
"from pathlib import Path\n",
"import os\n",
"import re\n",
"import sys\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"from utils import unzip_file_from_url, remove_all_from_dir\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"TMP_PATH = DATA_PATH / \"tmp\"\n",
"OUTPUT_PATH = DATA_PATH / \"dataset\" / \"hud_housing\"\n",
"\n",
"GEOID_TRACT_FIELD_NAME = \"GEOID10_TRACT\"\n",
"\n",
"# We measure households earning less than 80% of HUD Area Median Family Income by county\n",
"# and paying greater than 30% of their income to housing costs.\n",
"HOUSING_BURDEN_FIELD_NAME = \"Housing burden (percent)\"\n",
"HOUSING_BURDEN_NUMERATOR_FIELD_NAME = \"HOUSING_BURDEN_NUMERATOR\"\n",
"HOUSING_BURDEN_DENOMINATOR_FIELD_NAME = \"HOUSING_BURDEN_DENOMINATOR\"\n",
"\n",
"# Note: some variable definitions.\n",
"# HUD-adjusted median family income (HAMFI).\n",
"# The four housing problems are: incomplete kitchen facilities, incomplete plumbing facilities, more than 1 person per room, and cost burden greater than 30%.\n",
"# Table 8 is the desired table."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6696bc66",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# Download the data.\n",
"dfs = []\n",
"zip_file_dir = TMP_PATH / \"hud_housing\"\n",
"\n",
"print(f\"Downloading 225MB housing data\")\n",
"unzip_file_from_url(\n",
" \"https://www.huduser.gov/portal/datasets/cp/2012thru2016-140-csv.zip\",\n",
" TMP_PATH,\n",
" zip_file_dir,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3e954589",
"metadata": {},
"outputs": [],
"source": [
"# New file name:\n",
"tmp_csv_file_path = (\n",
" zip_file_dir\n",
" / \"2012thru2016-140-csv\"\n",
" / \"2012thru2016-140-csv\"\n",
" / \"140\"\n",
" / \"Table8.csv\"\n",
")\n",
"df = pd.read_csv(filepath_or_buffer=tmp_csv_file_path)\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "244e0d03",
"metadata": {},
"outputs": [],
"source": [
"# Rename and reformat block group ID\n",
"df.rename(columns={\"geoid\": GEOID_TRACT_FIELD_NAME}, inplace=True)\n",
"\n",
"# The CHAS data has census tract ids such as `14000US01001020100`\n",
"# Whereas the rest of our data uses, for the same tract, `01001020100`.\n",
"# the characters before `US`:\n",
"df[GEOID_TRACT_FIELD_NAME] = df[GEOID_TRACT_FIELD_NAME].str.replace(\n",
" r\"^.*?US\", \"\", regex=True\n",
")\n",
"\n",
"df[GEOID_TRACT_FIELD_NAME].head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "03250026",
"metadata": {},
"outputs": [],
"source": [
"# Calculate housing burden\n",
"# This is quite a number of steps. It does not appear to be accessible nationally in a simpler format, though.\n",
"# See \"CHAS data dictionary 12-16.xlsx\"\n",
"\n",
"# Owner occupied numerator fields\n",
"OWNER_OCCUPIED_NUMERATOR_FIELDS = [\n",
" # Key: Column Name\tLine_Type\tTenure\tHousehold income\tCost burden\tFacilities\n",
" # T8_est7\tSubtotal\tOwner occupied\tless than or equal to 30% of HAMFI\tgreater than 30% but less than or equal to 50%\tAll\n",
" \"T8_est7\",\n",
" # T8_est10\tSubtotal\tOwner occupied\tless than or equal to 30% of HAMFI\tgreater than 50%\tAll\n",
" \"T8_est10\",\n",
" # T8_est20\tSubtotal\tOwner occupied\tgreater than 30% but less than or equal to 50% of HAMFI\tgreater than 30% but less than or equal to 50%\tAll\n",
" \"T8_est20\",\n",
" # T8_est23\tSubtotal\tOwner occupied\tgreater than 30% but less than or equal to 50% of HAMFI\tgreater than 50%\tAll\n",
" \"T8_est23\",\n",
" # T8_est33\tSubtotal\tOwner occupied\tgreater than 50% but less than or equal to 80% of HAMFI\tgreater than 30% but less than or equal to 50%\tAll\n",
" \"T8_est33\",\n",
" # T8_est36\tSubtotal\tOwner occupied\tgreater than 50% but less than or equal to 80% of HAMFI\tgreater than 50%\tAll\n",
" \"T8_est36\",\n",
"]\n",
"\n",
"# These rows have the values where HAMFI was not computed, b/c of no or negative income.\n",
"OWNER_OCCUPIED_NOT_COMPUTED_FIELDS = [\n",
" # Key: Column Name\tLine_Type\tTenure\tHousehold income\tCost burden\tFacilities\n",
" # T8_est13\tSubtotal\tOwner occupied\tless than or equal to 30% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est13\",\n",
" # T8_est26\tSubtotal\tOwner occupied\tgreater than 30% but less than or equal to 50% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est26\",\n",
" # T8_est39\tSubtotal\tOwner occupied\tgreater than 50% but less than or equal to 80% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est39\",\n",
" # T8_est52\tSubtotal\tOwner occupied\tgreater than 80% but less than or equal to 100% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est52\",\n",
" # T8_est65\tSubtotal\tOwner occupied\tgreater than 100% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est65\",\n",
"]\n",
"\n",
"# T8_est2\tSubtotal\tOwner occupied\tAll\tAll\tAll\n",
"OWNER_OCCUPIED_POPULATION_FIELD = \"T8_est2\"\n",
"\n",
"# Renter occupied numerator fields\n",
"RENTER_OCCUPIED_NUMERATOR_FIELDS = [\n",
" # Key: Column Name\tLine_Type\tTenure\tHousehold income\tCost burden\tFacilities\n",
" # T8_est73\tSubtotal\tRenter occupied\tless than or equal to 30% of HAMFI\tgreater than 30% but less than or equal to 50%\tAll\n",
" \"T8_est73\",\n",
" # T8_est76\tSubtotal\tRenter occupied\tless than or equal to 30% of HAMFI\tgreater than 50%\tAll\n",
" \"T8_est76\",\n",
" # T8_est86\tSubtotal\tRenter occupied\tgreater than 30% but less than or equal to 50% of HAMFI\tgreater than 30% but less than or equal to 50%\tAll\n",
" \"T8_est86\",\n",
" # T8_est89\tSubtotal\tRenter occupied\tgreater than 30% but less than or equal to 50% of HAMFI\tgreater than 50%\tAll\n",
" \"T8_est89\",\n",
" # T8_est99\tSubtotal\tRenter occupied\tgreater than 50% but less than or equal to 80% of HAMFI\tgreater than 30% but less than or equal to 50%\tAll\n",
" \"T8_est99\",\n",
" # T8_est102\tSubtotal\tRenter occupied\tgreater than 50% but less than or equal to 80% of HAMFI\tgreater than 50%\tAll\n",
" \"T8_est102\",\n",
"]\n",
"\n",
"# These rows have the values where HAMFI was not computed, b/c of no or negative income.\n",
"RENTER_OCCUPIED_NOT_COMPUTED_FIELDS = [\n",
" # Key: Column Name\tLine_Type\tTenure\tHousehold income\tCost burden\tFacilities\n",
" # T8_est79\tSubtotal\tRenter occupied\tless than or equal to 30% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est79\",\n",
" # T8_est92\tSubtotal\tRenter occupied\tgreater than 30% but less than or equal to 50% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est92\",\n",
" # T8_est105\tSubtotal\tRenter occupied\tgreater than 50% but less than or equal to 80% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est105\",\n",
" # T8_est118\tSubtotal\tRenter occupied\tgreater than 80% but less than or equal to 100% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est118\",\n",
" # T8_est131\tSubtotal\tRenter occupied\tgreater than 100% of HAMFI\tnot computed (no/negative income)\tAll\n",
" \"T8_est131\",\n",
"]\n",
"\n",
"\n",
"# T8_est68\tSubtotal\tRenter occupied\tAll\tAll\tAll\n",
"RENTER_OCCUPIED_POPULATION_FIELD = \"T8_est68\"\n",
"\n",
"\n",
"# Math:\n",
"# (\n",
"# # of Owner Occupied Units Meeting Criteria\n",
"# + # of Renter Occupied Units Meeting Criteria\n",
"# )\n",
"# divided by\n",
"# (\n",
"# Total # of Owner Occupied Units\n",
"# + Total # of Renter Occupied Units\n",
"# - # of Owner Occupied Units with HAMFI Not Computed\n",
"# - # of Renter Occupied Units with HAMFI Not Computed\n",
"# )\n",
"\n",
"df[HOUSING_BURDEN_NUMERATOR_FIELD_NAME] = df[OWNER_OCCUPIED_NUMERATOR_FIELDS].sum(\n",
" axis=1\n",
") + df[RENTER_OCCUPIED_NUMERATOR_FIELDS].sum(axis=1)\n",
"\n",
"df[HOUSING_BURDEN_DENOMINATOR_FIELD_NAME] = (\n",
" df[OWNER_OCCUPIED_POPULATION_FIELD]\n",
" + df[RENTER_OCCUPIED_POPULATION_FIELD]\n",
" - df[OWNER_OCCUPIED_NOT_COMPUTED_FIELDS].sum(axis=1)\n",
" - df[RENTER_OCCUPIED_NOT_COMPUTED_FIELDS].sum(axis=1)\n",
")\n",
"\n",
"# TODO: add small sample size checks\n",
"df[HOUSING_BURDEN_FIELD_NAME] = df[HOUSING_BURDEN_NUMERATOR_FIELD_NAME].astype(\n",
" float\n",
") / df[HOUSING_BURDEN_DENOMINATOR_FIELD_NAME].astype(float)\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8275c1ef",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"OUTPUT_PATH.mkdir(parents=True, exist_ok=True)\n",
"\n",
"# Drop unnecessary fields\n",
"df[\n",
" [\n",
" GEOID_TRACT_FIELD_NAME,\n",
" HOUSING_BURDEN_NUMERATOR_FIELD_NAME,\n",
" HOUSING_BURDEN_DENOMINATOR_FIELD_NAME,\n",
" HOUSING_BURDEN_FIELD_NAME,\n",
" ]\n",
"].to_csv(path_or_buf=OUTPUT_PATH / \"usa.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef5bb862",
"metadata": {},
"outputs": [],
"source": [
"# cleanup\n",
"remove_all_from_dir(TMP_PATH)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -1,115 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "20aa3891",
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"import numpy as np\n",
"import pandas as pd\n",
"import csv\n",
"import sys\n",
"import os\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"from utils import unzip_file_from_url, remove_all_from_dir\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"TMP_PATH = DATA_PATH / \"tmp\"\n",
"HUD_RECAP_CSV_URL = \"https://opendata.arcgis.com/api/v3/datasets/56de4edea8264fe5a344da9811ef5d6e_0/downloads/data?format=csv&spatialRefId=4326\"\n",
"CSV_PATH = DATA_PATH / \"dataset\" / \"hud_recap\"\n",
"\n",
"# Definining some variable names\n",
"GEOID_TRACT_FIELD_NAME = \"GEOID10_TRACT\"\n",
"HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME = \"hud_recap_priority_community\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b9455da5",
"metadata": {},
"outputs": [],
"source": [
"# Data from https://hudgis-hud.opendata.arcgis.com/datasets/HUD::racially-or-ethnically-concentrated-areas-of-poverty-r-ecaps/about\n",
"df = pd.read_csv(HUD_RECAP_CSV_URL, dtype={\"GEOID\": \"string\"})\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ca63e66c",
"metadata": {},
"outputs": [],
"source": [
"# Rename some fields\n",
"df.rename(\n",
" columns={\n",
" \"GEOID\": GEOID_TRACT_FIELD_NAME,\n",
" # Interestingly, there's no data dictionary for the RECAP data that I could find.\n",
" # However, this site (http://www.schousing.com/library/Tax%20Credit/2020/QAP%20Instructions%20(2).pdf)\n",
" # suggests:\n",
" # \"If RCAP_Current for the tract in which the site is located is 1, the tract is an R/ECAP. If RCAP_Current is 0, it is not.\"\n",
" \"RCAP_Current\": HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME,\n",
" },\n",
" inplace=True,\n",
")\n",
"\n",
"# Convert to boolean\n",
"df[HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME] = df[\n",
" HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME\n",
"].astype(\"bool\")\n",
"\n",
"df[HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME].value_counts()\n",
"\n",
"df.sort_values(by=GEOID_TRACT_FIELD_NAME, inplace=True)\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9fa2077a",
"metadata": {},
"outputs": [],
"source": [
"# write csv\n",
"CSV_PATH.mkdir(parents=True, exist_ok=True)\n",
"\n",
"# Drop unnecessary columns.\n",
"df[[GEOID_TRACT_FIELD_NAME, HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME]].to_csv(\n",
" CSV_PATH / \"usa.csv\", index=False\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View file

@ -1,619 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "a664f981",
"metadata": {},
"outputs": [],
"source": [
"# Before running this notebook, you must run the following notebooks (in any order):\n",
"# 1. `ejscreen_etl.ipynb`\n",
"# 2. `census_etl.ipynb`\n",
"# 3. `housing_and_transportation_etl.ipynb`\n",
"# 4. `hud_housing_etl.ipynb`\n",
"\n",
"import collections\n",
"import functools\n",
"from pathlib import Path\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"import csv\n",
"import os\n",
"import sys\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"\n",
"# Define some global parameters\n",
"GEOID_FIELD_NAME = \"GEOID10\"\n",
"GEOID_TRACT_FIELD_NAME = \"GEOID10_TRACT\"\n",
"BUCKET_SOCIOECONOMIC = \"Socioeconomic Factors\"\n",
"BUCKET_SENSITIVE = \"Sensitive populations\"\n",
"BUCKET_ENVIRONMENTAL = \"Environmental effects\"\n",
"BUCKET_EXPOSURES = \"Exposures\"\n",
"BUCKETS = [\n",
" BUCKET_SOCIOECONOMIC,\n",
" BUCKET_SENSITIVE,\n",
" BUCKET_ENVIRONMENTAL,\n",
" BUCKET_EXPOSURES,\n",
"]\n",
"\n",
"# A few specific field names\n",
"# TODO: clean this up, I name some fields but not others.\n",
"UNEMPLOYED_FIELD_NAME = \"Unemployed civilians (percent)\"\n",
"LINGUISTIC_ISOLATION_FIELD_NAME = \"Linguistic isolation (percent)\"\n",
"HOUSING_BURDEN_FIELD_NAME = \"Housing burden (percent)\"\n",
"POVERTY_FIELD_NAME = \"Poverty (Less than 200% of federal poverty line)\"\n",
"HIGH_SCHOOL_FIELD_NAME = (\n",
" \"Percent individuals age 25 or over with less than high school degree\"\n",
")\n",
"\n",
"# There's another aggregation level (a second level of \"buckets\").\n",
"AGGREGATION_POLLUTION = \"Pollution Burden\"\n",
"AGGREGATION_POPULATION = \"Population Characteristics\"\n",
"\n",
"PERCENTILE_FIELD_SUFFIX = \" (percentile)\"\n",
"MIN_MAX_FIELD_SUFFIX = \" (min-max normalized)\"\n",
"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"SCORE_CSV_PATH = DATA_PATH / \"score\" / \"csv\"\n",
"\n",
"# Tell pandas to display all columns\n",
"pd.set_option(\"display.max_columns\", None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7df430cb",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# EJSCreen csv Load\n",
"ejscreen_csv = DATA_PATH / \"dataset\" / \"ejscreen_2020\" / \"usa.csv\"\n",
"ejscreen_df = pd.read_csv(ejscreen_csv, dtype={\"ID\": \"string\"}, low_memory=False)\n",
"ejscreen_df.rename(columns={\"ID\": GEOID_FIELD_NAME}, inplace=True)\n",
"ejscreen_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "daba69fb",
"metadata": {},
"outputs": [],
"source": [
"# Load census data\n",
"census_csv = DATA_PATH / \"dataset\" / \"census_acs_2019\" / \"usa.csv\"\n",
"census_df = pd.read_csv(\n",
" census_csv, dtype={GEOID_FIELD_NAME: \"string\"}, low_memory=False\n",
")\n",
"census_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "144bdde2",
"metadata": {},
"outputs": [],
"source": [
"# Load housing and transportation data\n",
"housing_and_transportation_index_csv = (\n",
" DATA_PATH / \"dataset\" / \"housing_and_transportation_index\" / \"usa.csv\"\n",
")\n",
"housing_and_transportation_df = pd.read_csv(\n",
" housing_and_transportation_index_csv,\n",
" dtype={GEOID_FIELD_NAME: \"string\"},\n",
" low_memory=False,\n",
")\n",
"housing_and_transportation_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a9202e5d",
"metadata": {},
"outputs": [],
"source": [
"# Load HUD housing data\n",
"hud_housing_csv = DATA_PATH / \"dataset\" / \"hud_housing\" / \"usa.csv\"\n",
"hud_housing_df = pd.read_csv(\n",
" hud_housing_csv,\n",
" dtype={GEOID_TRACT_FIELD_NAME: \"string\"},\n",
" low_memory=False,\n",
")\n",
"hud_housing_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf89efd8",
"metadata": {},
"outputs": [],
"source": [
"# Join all the data sources that use census block groups\n",
"census_block_group_dfs = [ejscreen_df, census_df, housing_and_transportation_df]\n",
"\n",
"census_block_group_df = functools.reduce(\n",
" lambda left, right: pd.merge(\n",
" left=left, right=right, on=GEOID_FIELD_NAME, how=\"outer\"\n",
" ),\n",
" census_block_group_dfs,\n",
")\n",
"\n",
"\n",
"if len(census_block_group_df) > 220333:\n",
" raise ValueError(\"Too many rows in the join.\")\n",
"\n",
"census_block_group_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e79ec27a",
"metadata": {},
"outputs": [],
"source": [
"# Sanity check the join.\n",
"if len(census_block_group_df[GEOID_FIELD_NAME].str.len().unique()) != 1:\n",
" raise ValueError(\n",
" f\"One of the input CSVs uses {GEOID_FIELD_NAME} with a different length.\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d0d2915",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Join all the data sources that use census tracts\n",
"# TODO: when there's more than one data source using census tract, reduce/merge them here.\n",
"census_tract_df = hud_housing_df\n",
"\n",
"# Calculate the tract for the CBG data.\n",
"census_block_group_df[GEOID_TRACT_FIELD_NAME] = census_block_group_df[\n",
" GEOID_FIELD_NAME\n",
"].str[0:11]\n",
"\n",
"df = census_block_group_df.merge(census_tract_df, on=GEOID_TRACT_FIELD_NAME)\n",
"\n",
"if len(census_block_group_df) > 220333:\n",
" raise ValueError(\"Too many rows in the join.\")\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8567900",
"metadata": {},
"outputs": [],
"source": [
"# Define a named tuple that will be used for each data set input.\n",
"DataSet = collections.namedtuple(\n",
" typename=\"DataSet\", field_names=[\"input_field\", \"renamed_field\", \"bucket\"]\n",
")\n",
"\n",
"data_sets = [\n",
" # The following data sets have `bucket=None`, because it's not used in the bucket based score (\"Score C\").\n",
" DataSet(\n",
" input_field=GEOID_FIELD_NAME,\n",
" # Use the name `GEOID10` to enable geoplatform.gov's workflow.\n",
" renamed_field=GEOID_FIELD_NAME,\n",
" bucket=None,\n",
" ),\n",
" DataSet(\n",
" input_field=HOUSING_BURDEN_FIELD_NAME,\n",
" renamed_field=HOUSING_BURDEN_FIELD_NAME,\n",
" bucket=None,\n",
" ),\n",
" DataSet(input_field=\"ACSTOTPOP\", renamed_field=\"Total population\", bucket=None),\n",
" # The following data sets have buckets, because they're used in the score\n",
" DataSet(\n",
" input_field=\"CANCER\",\n",
" renamed_field=\"Air toxics cancer risk\",\n",
" bucket=BUCKET_EXPOSURES,\n",
" ),\n",
" DataSet(\n",
" input_field=\"RESP\",\n",
" renamed_field=\"Respiratory hazard index\",\n",
" bucket=BUCKET_EXPOSURES,\n",
" ),\n",
" DataSet(\n",
" input_field=\"DSLPM\",\n",
" renamed_field=\"Diesel particulate matter\",\n",
" bucket=BUCKET_EXPOSURES,\n",
" ),\n",
" DataSet(\n",
" input_field=\"PM25\",\n",
" renamed_field=\"Particulate matter (PM2.5)\",\n",
" bucket=BUCKET_EXPOSURES,\n",
" ),\n",
" DataSet(input_field=\"OZONE\", renamed_field=\"Ozone\", bucket=BUCKET_EXPOSURES),\n",
" DataSet(\n",
" input_field=\"PTRAF\",\n",
" renamed_field=\"Traffic proximity and volume\",\n",
" bucket=BUCKET_EXPOSURES,\n",
" ),\n",
" DataSet(\n",
" input_field=\"PRMP\",\n",
" renamed_field=\"Proximity to RMP sites\",\n",
" bucket=BUCKET_ENVIRONMENTAL,\n",
" ),\n",
" DataSet(\n",
" input_field=\"PTSDF\",\n",
" renamed_field=\"Proximity to TSDF sites\",\n",
" bucket=BUCKET_ENVIRONMENTAL,\n",
" ),\n",
" DataSet(\n",
" input_field=\"PNPL\",\n",
" renamed_field=\"Proximity to NPL sites\",\n",
" bucket=BUCKET_ENVIRONMENTAL,\n",
" ),\n",
" DataSet(\n",
" input_field=\"PWDIS\",\n",
" renamed_field=\"Wastewater discharge\",\n",
" bucket=BUCKET_ENVIRONMENTAL,\n",
" ),\n",
" DataSet(\n",
" input_field=\"PRE1960PCT\",\n",
" renamed_field=\"Percent pre-1960s housing (lead paint indicator)\",\n",
" bucket=BUCKET_ENVIRONMENTAL,\n",
" ),\n",
" DataSet(\n",
" input_field=\"UNDER5PCT\",\n",
" renamed_field=\"Individuals under 5 years old\",\n",
" bucket=BUCKET_SENSITIVE,\n",
" ),\n",
" DataSet(\n",
" input_field=\"OVER64PCT\",\n",
" renamed_field=\"Individuals over 64 years old\",\n",
" bucket=BUCKET_SENSITIVE,\n",
" ),\n",
" DataSet(\n",
" input_field=LINGUISTIC_ISOLATION_FIELD_NAME,\n",
" renamed_field=LINGUISTIC_ISOLATION_FIELD_NAME,\n",
" bucket=BUCKET_SENSITIVE,\n",
" ),\n",
" DataSet(\n",
" input_field=\"LINGISOPCT\",\n",
" renamed_field=\"Percent of households in linguistic isolation\",\n",
" bucket=BUCKET_SOCIOECONOMIC,\n",
" ),\n",
" DataSet(\n",
" input_field=\"LOWINCPCT\",\n",
" renamed_field=POVERTY_FIELD_NAME,\n",
" bucket=BUCKET_SOCIOECONOMIC,\n",
" ),\n",
" DataSet(\n",
" input_field=\"LESSHSPCT\",\n",
" renamed_field=HIGH_SCHOOL_FIELD_NAME,\n",
" bucket=BUCKET_SOCIOECONOMIC,\n",
" ),\n",
" DataSet(\n",
" input_field=UNEMPLOYED_FIELD_NAME,\n",
" renamed_field=UNEMPLOYED_FIELD_NAME,\n",
" bucket=BUCKET_SOCIOECONOMIC,\n",
" ),\n",
" DataSet(\n",
" input_field=\"ht_ami\",\n",
" renamed_field=\"Housing + Transportation Costs % Income for the Regional Typical Household\",\n",
" bucket=BUCKET_SOCIOECONOMIC,\n",
" ),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e152a655",
"metadata": {},
"outputs": [],
"source": [
"# Rename columns:\n",
"renaming_dict = {data_set.input_field: data_set.renamed_field for data_set in data_sets}\n",
"\n",
"df.rename(\n",
" columns=renaming_dict,\n",
" inplace=True,\n",
" errors=\"raise\",\n",
")\n",
"\n",
"columns_to_keep = [data_set.renamed_field for data_set in data_sets]\n",
"df = df[columns_to_keep]\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1280cbd4",
"metadata": {},
"outputs": [],
"source": [
"# Convert all columns to numeric.\n",
"for data_set in data_sets:\n",
" # Skip GEOID_FIELD_NAME, because it's a string.\n",
" if data_set.renamed_field == GEOID_FIELD_NAME:\n",
" continue\n",
" df[f\"{data_set.renamed_field}\"] = pd.to_numeric(df[data_set.renamed_field])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "27677132",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Calculate percentiles for each data set.\n",
"for data_set in data_sets:\n",
" df[f\"{data_set.renamed_field}{PERCENTILE_FIELD_SUFFIX}\"] = df[\n",
" data_set.renamed_field\n",
" ].rank(pct=True)\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f2088013",
"metadata": {},
"outputs": [],
"source": [
"# Calculate min-max for each data set.\n",
"# Math:\n",
"# (\n",
"# Observed value\n",
"# - minimum of all values\n",
"# )\n",
"# divided by\n",
"# (\n",
"# Maximum of all values\n",
"# - minimum of all values\n",
"# )\n",
"for data_set in data_sets:\n",
" # Skip GEOID_FIELD_NAME, because it's a string.\n",
" if data_set.renamed_field == GEOID_FIELD_NAME:\n",
" continue\n",
"\n",
" min_value = df[data_set.renamed_field].min(skipna=True)\n",
"\n",
" max_value = df[data_set.renamed_field].max(skipna=True)\n",
"\n",
" print(\n",
" f\"For data set {data_set.renamed_field}, the min value is {min_value} and the max value is {max_value}.\"\n",
" )\n",
"\n",
" df[f\"{data_set.renamed_field}{MIN_MAX_FIELD_SUFFIX}\"] = (\n",
" df[data_set.renamed_field] - min_value\n",
" ) / (max_value - min_value)\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4eec326",
"metadata": {},
"outputs": [],
"source": [
"# Graph distributions and correlations.\n",
"min_max_fields = [\n",
" f\"{data_set.renamed_field}{MIN_MAX_FIELD_SUFFIX}\"\n",
" for data_set in data_sets\n",
" if data_set.renamed_field != GEOID_FIELD_NAME\n",
"]\n",
"df.hist(\n",
" column=min_max_fields, layout=(len(min_max_fields), 1), figsize=(10, 30), bins=30\n",
")\n",
"\n",
"plt.tight_layout()\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1f7b864f",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Calculate score \"A\" and score \"B\"\n",
"df[\"Score A\"] = df[\n",
" [\n",
" \"Poverty (Less than 200% of federal poverty line) (percentile)\",\n",
" \"Percent individuals age 25 or over with less than high school degree (percentile)\",\n",
" ]\n",
"].mean(axis=1)\n",
"df[\"Score B\"] = (\n",
" df[\"Poverty (Less than 200% of federal poverty line) (percentile)\"]\n",
" * df[\n",
" \"Percent individuals age 25 or over with less than high school degree (percentile)\"\n",
" ]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0c107baf",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Calculate \"CalEnviroScreen for the US\" score\n",
"# Average all the percentile values in each bucket into a single score for each of the four buckets.\n",
"for bucket in BUCKETS:\n",
" fields_in_bucket = [\n",
" f\"{data_set.renamed_field}{PERCENTILE_FIELD_SUFFIX}\"\n",
" for data_set in data_sets\n",
" if data_set.bucket == bucket\n",
" ]\n",
" df[f\"{bucket}\"] = df[fields_in_bucket].mean(axis=1)\n",
"\n",
"# Combine the score from the two Exposures and Environmental Effects buckets into a single score called \"Pollution Burden\". The math for this score is: (1.0 * Exposures Score + 0.5 * Environment Effects score) / 1.5.\n",
"df[AGGREGATION_POLLUTION] = (\n",
" 1.0 * df[f\"{BUCKET_EXPOSURES}\"] + 0.5 * df[f\"{BUCKET_ENVIRONMENTAL}\"]\n",
") / 1.5\n",
"\n",
"# Average the score from the two Sensitive populations and Socioeconomic factors buckets into a single score called \"Population Characteristics\".\n",
"df[AGGREGATION_POPULATION] = df[\n",
" [f\"{BUCKET_SENSITIVE}\", f\"{BUCKET_SOCIOECONOMIC}\"]\n",
"].mean(axis=1)\n",
"\n",
"# Multiply the \"Pollution Burden\" score and the \"Population Characteristics\" together to produce the cumulative impact score.\n",
"df[\"Score C\"] = df[AGGREGATION_POLLUTION] * df[AGGREGATION_POPULATION]\n",
"\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f70106f5",
"metadata": {},
"outputs": [],
"source": [
"# Calculate scores D and E.\n",
"fields_to_use_in_score_d_and_e = [\n",
" UNEMPLOYED_FIELD_NAME,\n",
" LINGUISTIC_ISOLATION_FIELD_NAME,\n",
" HOUSING_BURDEN_FIELD_NAME,\n",
" POVERTY_FIELD_NAME,\n",
" HIGH_SCHOOL_FIELD_NAME,\n",
"]\n",
"\n",
"fields_min_max = [\n",
" f\"{field}{MIN_MAX_FIELD_SUFFIX}\" for field in fields_to_use_in_score_d_and_e\n",
"]\n",
"fields_percentile = [\n",
" f\"{field}{PERCENTILE_FIELD_SUFFIX}\" for field in fields_to_use_in_score_d_and_e\n",
"]\n",
"\n",
"# Calculate \"Score D\", which uses min-max normalization\n",
"# and calculate \"Score E\", which uses percentile normalization for the same fields\n",
"df[\"Score D\"] = df[fields_min_max].mean(axis=1)\n",
"df[\"Score E\"] = df[fields_percentile].mean(axis=1)\n",
"\n",
"print(df[\"Score D\"].describe())\n",
"print(df[\"Score E\"].describe())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a02e5bac",
"metadata": {},
"outputs": [],
"source": [
"# Graph distributions\n",
"df.hist(\n",
" column=fields_min_max, layout=(len(fields_min_max), 1), figsize=(10, 30), bins=30\n",
")\n",
"plt.tight_layout()\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a0e608c8",
"metadata": {},
"outputs": [],
"source": [
"# Calculate correlations\n",
"df[fields_min_max].corr()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "729aed12",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Create percentiles for the scores\n",
"for score_field in [\"Score A\", \"Score B\", \"Score C\", \"Score D\", \"Score E\"]:\n",
" df[f\"{score_field}{PERCENTILE_FIELD_SUFFIX}\"] = df[score_field].rank(pct=True)\n",
" df[f\"{score_field} (top 25th percentile)\"] = (\n",
" df[f\"{score_field}{PERCENTILE_FIELD_SUFFIX}\"] >= 0.75\n",
" )\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b3a65af4",
"metadata": {},
"outputs": [],
"source": [
"# write nationwide csv\n",
"df.to_csv(SCORE_CSV_PATH / f\"usa.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "58ddd8b3",
"metadata": {},
"outputs": [],
"source": [
"# write per state csvs\n",
"for states_fips in get_state_fips_codes(DATA_PATH):\n",
" print(f\"Generating data{states_fips} csv\")\n",
" df1 = df[df[\"GEOID10\"].str[:2] == states_fips]\n",
" # we need to name the file data01.csv for ogr2ogr csv merge to work\n",
" df1.to_csv(SCORE_CSV_PATH / f\"data{states_fips}.csv\", index=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}