2021-08-02 12:16:38 -04:00
|
|
|
import csv
|
2021-06-28 16:16:14 -04:00
|
|
|
import json
|
2021-08-05 15:35:54 -04:00
|
|
|
import os
|
2021-06-28 16:16:14 -04:00
|
|
|
from pathlib import Path
|
2021-08-02 12:16:38 -04:00
|
|
|
|
2021-07-28 16:07:28 -04:00
|
|
|
import geopandas as gpd
|
2021-08-05 15:35:54 -04:00
|
|
|
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
2021-06-28 16:16:14 -04:00
|
|
|
|
2021-08-02 12:16:38 -04:00
|
|
|
from .etl_utils import get_state_fips_codes
|
2021-06-28 16:16:14 -04:00
|
|
|
|
|
|
|
logger = get_module_logger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
def download_census_csvs(data_path: Path) -> None:
|
2021-07-12 15:50:44 -04:00
|
|
|
"""Download all census shape files from the Census FTP and extract the geojson
|
2021-07-28 16:07:28 -04:00
|
|
|
to generate national and by state Census Block Group CSVs and GeoJSONs
|
2021-07-12 15:50:44 -04:00
|
|
|
|
|
|
|
Args:
|
|
|
|
data_path (pathlib.Path): Name of the directory where the files and directories will
|
|
|
|
be created
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
None
|
|
|
|
"""
|
|
|
|
|
2021-06-28 16:16:14 -04:00
|
|
|
# the fips_states_2010.csv is generated from data here
|
|
|
|
# https://www.census.gov/geographies/reference-files/time-series/geo/tallies.html
|
|
|
|
state_fips_codes = get_state_fips_codes(data_path)
|
2021-07-12 15:50:44 -04:00
|
|
|
geojson_dir_path = data_path / "census" / "geojson"
|
|
|
|
|
2021-06-28 16:16:14 -04:00
|
|
|
for fips in state_fips_codes:
|
|
|
|
# check if file exists
|
2021-08-02 12:16:38 -04:00
|
|
|
shp_file_path = data_path / "census" / "shp" / fips / f"tl_2010_{fips}_bg10.shp"
|
2021-06-28 16:16:14 -04:00
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
logger.info(f"Checking if {fips} file exists")
|
2021-06-28 16:16:14 -04:00
|
|
|
if not os.path.isfile(shp_file_path):
|
2021-07-12 15:50:44 -04:00
|
|
|
logger.info(f"Downloading and extracting {fips} shape file")
|
2021-06-28 16:16:14 -04:00
|
|
|
# 2020 tiger data is here: https://www2.census.gov/geo/tiger/TIGER2020/BG/
|
|
|
|
# But using 2010 for now
|
|
|
|
cbg_state_url = f"https://www2.census.gov/geo/tiger/TIGER2010/BG/2010/tl_2010_{fips}_bg10.zip"
|
|
|
|
unzip_file_from_url(
|
2021-07-12 15:50:44 -04:00
|
|
|
cbg_state_url,
|
|
|
|
data_path / "tmp",
|
|
|
|
data_path / "census" / "shp" / fips,
|
2021-06-28 16:16:14 -04:00
|
|
|
)
|
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
cmd = (
|
|
|
|
"ogr2ogr -f GeoJSON data/census/geojson/"
|
|
|
|
+ fips
|
|
|
|
+ ".json data/census/shp/"
|
|
|
|
+ fips
|
|
|
|
+ "/tl_2010_"
|
|
|
|
+ fips
|
|
|
|
+ "_bg10.shp"
|
|
|
|
)
|
|
|
|
os.system(cmd)
|
2021-06-28 16:16:14 -04:00
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
# generate CBG CSV table for pandas
|
|
|
|
## load in memory
|
|
|
|
cbg_national = [] # in-memory global list
|
|
|
|
cbg_per_state: dict = {} # in-memory dict per state
|
|
|
|
for file in os.listdir(geojson_dir_path):
|
|
|
|
if file.endswith(".json"):
|
|
|
|
logger.info(f"Ingesting geoid10 for file {file}")
|
|
|
|
with open(geojson_dir_path / file) as f:
|
|
|
|
geojson = json.load(f)
|
|
|
|
for feature in geojson["features"]:
|
|
|
|
geoid10 = feature["properties"]["GEOID10"]
|
|
|
|
cbg_national.append(str(geoid10))
|
|
|
|
geoid10_state_id = geoid10[:2]
|
|
|
|
if not cbg_per_state.get(geoid10_state_id):
|
|
|
|
cbg_per_state[geoid10_state_id] = []
|
|
|
|
cbg_per_state[geoid10_state_id].append(geoid10)
|
2021-06-28 16:16:14 -04:00
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
csv_dir_path = data_path / "census" / "csv"
|
|
|
|
## write to individual state csv
|
|
|
|
for state_id in cbg_per_state:
|
|
|
|
geoid10_list = cbg_per_state[state_id]
|
|
|
|
with open(
|
|
|
|
csv_dir_path / f"{state_id}.csv", mode="w", newline=""
|
|
|
|
) as cbg_csv_file:
|
2021-06-28 16:16:14 -04:00
|
|
|
cbg_csv_file_writer = csv.writer(
|
2021-07-12 15:50:44 -04:00
|
|
|
cbg_csv_file,
|
|
|
|
delimiter=",",
|
|
|
|
quotechar='"',
|
|
|
|
quoting=csv.QUOTE_MINIMAL,
|
2021-06-28 16:16:14 -04:00
|
|
|
)
|
2021-07-12 15:50:44 -04:00
|
|
|
|
|
|
|
for geoid10 in geoid10_list:
|
2021-06-28 16:16:14 -04:00
|
|
|
cbg_csv_file_writer.writerow(
|
|
|
|
[
|
|
|
|
geoid10,
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
## write US csv
|
|
|
|
with open(csv_dir_path / "us.csv", mode="w", newline="") as cbg_csv_file:
|
|
|
|
cbg_csv_file_writer = csv.writer(
|
|
|
|
cbg_csv_file,
|
|
|
|
delimiter=",",
|
|
|
|
quotechar='"',
|
|
|
|
quoting=csv.QUOTE_MINIMAL,
|
|
|
|
)
|
|
|
|
for geoid10 in cbg_national:
|
|
|
|
cbg_csv_file_writer.writerow(
|
|
|
|
[
|
|
|
|
geoid10,
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2021-07-28 16:07:28 -04:00
|
|
|
## create national geojson
|
2021-08-02 12:16:38 -04:00
|
|
|
logger.info("Generating national geojson file")
|
2021-07-28 16:07:28 -04:00
|
|
|
usa_df = gpd.GeoDataFrame()
|
|
|
|
|
|
|
|
for file_name in geojson_dir_path.rglob("*.json"):
|
|
|
|
logger.info(f"Ingesting {file_name}")
|
|
|
|
state_gdf = gpd.read_file(file_name)
|
|
|
|
usa_df = usa_df.append(state_gdf)
|
|
|
|
|
|
|
|
usa_df = usa_df.to_crs("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
|
2021-08-02 12:16:38 -04:00
|
|
|
logger.info("Writing national geojson file")
|
2021-07-28 16:07:28 -04:00
|
|
|
usa_df.to_file(geojson_dir_path / "us.json", driver="GeoJSON")
|
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
logger.info("Census block groups downloading complete")
|