Modularization + Poetry + Docker (#213)

* reorg

* added configuration management; initial click cmds

* reset dirs completed

* major modularization effort

* prepping mbtiles

* first round of PR review updates

* round 2 of feedback review

* checkpoint

* habemus dockerfile 🎉

* updated dock-er-compose with long running container

* census generation works

* logging working

* updated README

* updated README

* last small update to README

* added instructions for log visualization

* census etl update for reusable fips module

* ejscreem etl updated

* further modularization

* score modularization

* tmp cleanup
This commit is contained in:
Jorge Escobar 2021-06-28 16:16:14 -04:00 committed by GitHub
commit 67c73dde2a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 2383 additions and 433 deletions

View file

@ -17,6 +17,14 @@
"from pathlib import Path\n",
"import pandas as pd\n",
"import csv\n",
"import os\n",
"import sys\n",
"\n",
"module_path = os.path.abspath(os.path.join('..'))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from etl.sources.census.etl_utils import get_state_fips_codes\n",
"\n",
"# Define some global parameters\n",
"GEOID_FIELD_NAME = \"GEOID10\"\n",
@ -37,9 +45,8 @@
"\n",
"PERCENTILE_FIELD_SUFFIX = \" (percentile)\"\n",
"\n",
"data_path = Path.cwd().parent / \"data\"\n",
"fips_csv_path = data_path / \"fips_states_2010.csv\"\n",
"score_csv_path = data_path / \"score\" / \"csv\"\n",
"DATA_PATH = Path.cwd().parent / \"data\"\n",
"SCORE_CSV_PATH = DATA_PATH / \"score\" / \"csv\"\n",
"\n",
"# Tell pandas to display all columns\n",
"pd.set_option(\"display.max_columns\", None)"
@ -55,7 +62,7 @@
"outputs": [],
"source": [
"# EJSCreen csv Load\n",
"ejscreen_csv = data_path / \"dataset\" / \"ejscreen_2020\" / \"usa.csv\"\n",
"ejscreen_csv = DATA_PATH / \"dataset\" / \"ejscreen_2020\" / \"usa.csv\"\n",
"ejscreen_df = pd.read_csv(ejscreen_csv, dtype={\"ID\": \"string\"}, low_memory=False)\n",
"ejscreen_df.rename(columns={\"ID\": GEOID_FIELD_NAME}, inplace=True)\n",
"ejscreen_df.head()"
@ -69,7 +76,7 @@
"outputs": [],
"source": [
"# Load census data\n",
"census_csv = data_path / \"dataset\" / \"census_acs_2019\" / \"usa.csv\"\n",
"census_csv = DATA_PATH / \"dataset\" / \"census_acs_2019\" / \"usa.csv\"\n",
"census_df = pd.read_csv(\n",
" census_csv, dtype={GEOID_FIELD_NAME: \"string\"}, low_memory=False\n",
")\n",
@ -85,7 +92,7 @@
"source": [
"# Load housing and transportation data\n",
"housing_and_transportation_index_csv = (\n",
" data_path / \"dataset\" / \"housing_and_transportation_index\" / \"usa.csv\"\n",
" DATA_PATH / \"dataset\" / \"housing_and_transportation_index\" / \"usa.csv\"\n",
")\n",
"housing_and_transportation_df = pd.read_csv(\n",
" housing_and_transportation_index_csv,\n",
@ -352,7 +359,7 @@
"outputs": [],
"source": [
"# write nationwide csv\n",
"df.to_csv(score_csv_path / f\"usa.csv\", index=False)"
"df.to_csv(SCORE_CSV_PATH / f\"usa.csv\", index=False)"
]
},
{
@ -363,19 +370,11 @@
"outputs": [],
"source": [
"# write per state csvs\n",
"with open(fips_csv_path) as csv_file:\n",
" csv_reader = csv.reader(csv_file, delimiter=\",\")\n",
" line_count = 0\n",
"\n",
" for row in csv_reader:\n",
" if line_count == 0:\n",
" line_count += 1\n",
" else:\n",
" states_fips = row[0].strip()\n",
" print(f\"Generating data{states_fips} csv\")\n",
" df1 = df[df[\"GEOID10\"].str[:2] == states_fips]\n",
" # we need to name the file data01.csv for ogr2ogr csv merge to work\n",
" df1.to_csv(score_csv_path / f\"data{states_fips}.csv\", index=False)"
"for states_fips in get_state_fips_codes(DATA_PATH):\n",
" print(f\"Generating data{states_fips} csv\")\n",
" df1 = df[df[\"GEOID10\"].str[:2] == states_fips]\n",
" # we need to name the file data01.csv for ogr2ogr csv merge to work\n",
" df1.to_csv(SCORE_CSV_PATH / f\"data{states_fips}.csv\", index=False)"
]
},
{