j40-cejst-2/data/data-pipeline/ipython/scoring_comparison.ipynb
Lucas Merrill Brown 67b39475f7
Analysis by region (#385)
* Adding regional comparisons

* Small ETL fixes
2021-07-26 10:02:25 -05:00

1169 lines
52 KiB
Text

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "93c7b73b",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Before running this script as it currently stands, you'll need to run these notebooks (in any order):\n",
"# * score_calc.ipynb\n",
"# * calenviroscreen_etl.ipynb\n",
"# * hud_recap_etl.ipynb\n",
"\n",
"import collections\n",
"import functools\n",
"import IPython\n",
"import itertools\n",
"import numpy as np\n",
"import os\n",
"import pandas as pd\n",
"import pathlib\n",
"import pypandoc\n",
"import requests\n",
"import string\n",
"import sys\n",
"import typing\n",
"import us\n",
"import zipfile\n",
"\n",
"from datetime import datetime\n",
"from tqdm.notebook import tqdm_notebook\n",
"\n",
"module_path = os.path.abspath(os.path.join(\"..\"))\n",
"if module_path not in sys.path:\n",
" sys.path.append(module_path)\n",
"\n",
"from utils import remove_all_from_dir, get_excel_column_name\n",
"from etl.sources.census.etl_utils import get_state_information\n",
"\n",
"\n",
"# Turn on TQDM for pandas so that we can have progress bars when running `apply`.\n",
"tqdm_notebook.pandas()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "881424fd",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Suppress scientific notation in pandas (this shows up for census tract IDs)\n",
"pd.options.display.float_format = \"{:.2f}\".format\n",
"\n",
"# Set some global parameters\n",
"DATA_DIR = pathlib.Path.cwd().parent / \"data\"\n",
"TEMP_DATA_DIR = DATA_DIR / \"tmp\"\n",
"COMPARISON_OUTPUTS_DIR = DATA_DIR / \"comparison_outputs\"\n",
"\n",
"# Make the dirs if they don't exist\n",
"TEMP_DATA_DIR.mkdir(parents=True, exist_ok=True)\n",
"COMPARISON_OUTPUTS_DIR.mkdir(parents=True, exist_ok=True)\n",
"\n",
"CEJST_PRIORITY_COMMUNITY_THRESHOLD = 0.75\n",
"\n",
"# Name fields using variables. (This makes it easy to reference the same fields frequently without using strings\n",
"# and introducing the risk of misspelling the field name.)\n",
"\n",
"GEOID_FIELD_NAME = \"GEOID10\"\n",
"GEOID_TRACT_FIELD_NAME = \"GEOID10_TRACT\"\n",
"GEOID_STATE_FIELD_NAME = \"GEOID10_STATE\"\n",
"COUNTRY_FIELD_NAME = \"Country\"\n",
"CENSUS_BLOCK_GROUP_POPULATION_FIELD = \"Total population\"\n",
"\n",
"CEJST_SCORE_FIELD = \"cejst_score\"\n",
"CEJST_PERCENTILE_FIELD = \"cejst_percentile\"\n",
"CEJST_PRIORITY_COMMUNITY_FIELD = \"cejst_priority_community\"\n",
"\n",
"# Define some suffixes\n",
"POPULATION_SUFFIX = \" (priority population)\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5f3eaa5",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# Load CEJST score data\n",
"cejst_data_path = DATA_DIR / \"score\" / \"csv\" / \"full\" / \"usa.csv\"\n",
"cejst_df = pd.read_csv(cejst_data_path, dtype={GEOID_FIELD_NAME: \"string\"})\n",
"\n",
"# Create the CBG's Census Tract ID by dropping the last number from the FIPS CODE of the CBG.\n",
"# The CBG ID is the last one character.\n",
"# For more information, see https://www.census.gov/programs-surveys/geography/guidance/geo-identifiers.html.\n",
"cejst_df.loc[:, GEOID_TRACT_FIELD_NAME] = (\n",
" cejst_df.loc[:, GEOID_FIELD_NAME].astype(str).str[:-1]\n",
")\n",
"\n",
"cejst_df.loc[:, GEOID_STATE_FIELD_NAME] = (\n",
" cejst_df.loc[:, GEOID_FIELD_NAME].astype(str).str[0:2]\n",
")\n",
"\n",
"cejst_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a2448dcd",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# Load CalEnviroScreen 4.0\n",
"CALENVIROSCREEN_SCORE_FIELD = \"calenviroscreen_score\"\n",
"CALENVIROSCREEN_PERCENTILE_FIELD = \"calenviroscreen_percentile\"\n",
"CALENVIROSCREEN_PRIORITY_COMMUNITY_FIELD = \"calenviroscreen_priority_community\"\n",
"\n",
"calenviroscreen_data_path = DATA_DIR / \"dataset\" / \"calenviroscreen4\" / \"data06.csv\"\n",
"calenviroscreen_df = pd.read_csv(\n",
" calenviroscreen_data_path, dtype={GEOID_TRACT_FIELD_NAME: \"string\"}\n",
")\n",
"\n",
"# Convert priority community field to a bool.\n",
"calenviroscreen_df[CALENVIROSCREEN_PRIORITY_COMMUNITY_FIELD] = calenviroscreen_df[\n",
" CALENVIROSCREEN_PRIORITY_COMMUNITY_FIELD\n",
"].astype(bool)\n",
"\n",
"calenviroscreen_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f612a86a",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Load HUD data\n",
"hud_recap_data_path = DATA_DIR / \"dataset\" / \"hud_recap\" / \"usa.csv\"\n",
"hud_recap_df = pd.read_csv(\n",
" hud_recap_data_path, dtype={GEOID_TRACT_FIELD_NAME: \"string\"}\n",
")\n",
"\n",
"hud_recap_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ee6e6ee",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Join all dataframes that use tracts\n",
"census_tract_dfs = [calenviroscreen_df, hud_recap_df]\n",
"\n",
"census_tract_df = functools.reduce(\n",
" lambda left, right: pd.merge(\n",
" left=left, right=right, on=GEOID_TRACT_FIELD_NAME, how=\"outer\"\n",
" ),\n",
" census_tract_dfs,\n",
")\n",
"\n",
"if census_tract_df[GEOID_TRACT_FIELD_NAME].str.len().unique() != [11]:\n",
" raise ValueError(\"Some of the census tract data has the wrong length.\")\n",
"\n",
"if len(census_tract_df) > 74134:\n",
" raise ValueError(\"Too many rows in the join.\")\n",
"\n",
"census_tract_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70d76fbc",
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"# Join tract indices and CEJST data.\n",
"# Note: we're joining on the census *tract*, so there will be multiple CBG entries joined to the same census tract row from CES,\n",
"# creating multiple rows of the same CES data.\n",
"merged_df = cejst_df.merge(\n",
" census_tract_df,\n",
" how=\"left\",\n",
" on=GEOID_TRACT_FIELD_NAME,\n",
")\n",
"\n",
"\n",
"if len(merged_df) > 220333:\n",
" raise ValueError(\"Too many rows in the join.\")\n",
"\n",
"merged_df.head()\n",
"\n",
"\n",
"# merged_df.to_csv(\n",
"# path_or_buf=COMPARISON_OUTPUTS_DIR / \"merged.csv\", na_rep=\"\", index=False\n",
"# )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "558a2cc1",
"metadata": {},
"outputs": [],
"source": [
"# Define a namedtuple for indices.\n",
"Index = collections.namedtuple(\n",
" typename=\"Index\",\n",
" field_names=[\n",
" \"method_name\",\n",
" \"priority_communities_field\",\n",
" # Note: this field only used by indices defined at the census tract level.\n",
" \"other_census_tract_fields_to_keep\",\n",
" ],\n",
")\n",
"\n",
"# Define the indices used for CEJST scoring (`census_block_group_indices`) as well as comparison\n",
"# (`census_tract_indices`).\n",
"census_block_group_indices = [\n",
" Index(\n",
" method_name=\"Score A\",\n",
" priority_communities_field=\"Score A (top 25th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Score B\",\n",
" priority_communities_field=\"Score B (top 25th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Score C\",\n",
" priority_communities_field=\"Score C (top 25th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Score D (25th percentile)\",\n",
" priority_communities_field=\"Score D (top 25th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Score D (30th percentile)\",\n",
" priority_communities_field=\"Score D (top 30th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Score D (35th percentile)\",\n",
" priority_communities_field=\"Score D (top 35th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Score D (40th percentile)\",\n",
" priority_communities_field=\"Score D (top 40th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
" Index(\n",
" method_name=\"Poverty\",\n",
" priority_communities_field=\"Poverty (Less than 200% of federal poverty line) (top 25th percentile)\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
"]\n",
"\n",
"census_tract_indices = [\n",
" Index(\n",
" method_name=\"CalEnviroScreen 4.0\",\n",
" priority_communities_field=\"calenviroscreen_priority_community\",\n",
" other_census_tract_fields_to_keep=[\n",
" CALENVIROSCREEN_SCORE_FIELD,\n",
" CALENVIROSCREEN_PERCENTILE_FIELD,\n",
" ],\n",
" ),\n",
" Index(\n",
" method_name=\"HUD RECAP\",\n",
" priority_communities_field=\"hud_recap_priority_community\",\n",
" other_census_tract_fields_to_keep=[],\n",
" ),\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5b71b2ab",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"def get_state_distributions(\n",
" df: pd.DataFrame, priority_communities_fields: typing.List[str]\n",
") -> pd.DataFrame:\n",
" \"\"\"For each boolean field of priority communities, calculate distribution across states and territories.\"\"\"\n",
"\n",
" # Ensure each field is boolean.\n",
" for priority_communities_field in priority_communities_fields:\n",
" if df[priority_communities_field].dtype != bool:\n",
" print(f\"Converting {priority_communities_field} to boolean.\")\n",
"\n",
" # Calculate the population included as priority communities per CBG. Will either be 0 or the population.\n",
" df[f\"{priority_communities_field}{POPULATION_SUFFIX}\"] = (\n",
" df[priority_communities_field] * df[CENSUS_BLOCK_GROUP_POPULATION_FIELD]\n",
" )\n",
"\n",
" def calculate_state_comparison(\n",
" frame: pd.DataFrame, geography_field: str\n",
" ) -> pd.DataFrame:\n",
" \"\"\"\n",
" This method will be applied to a `group_by` object. Inherits some parameters from outer scope.\n",
"\n",
" \"\"\"\n",
" summary_dict = {}\n",
" summary_dict[COUNTRY_FIELD_NAME] = frame[COUNTRY_FIELD_NAME].unique()[0]\n",
"\n",
" if geography_field == COUNTRY_FIELD_NAME:\n",
" summary_dict[GEOID_STATE_FIELD_NAME] = \"00\"\n",
" summary_dict[\"Geography name\"] = \"(Entire USA)\"\n",
"\n",
" if geography_field == GEOID_STATE_FIELD_NAME:\n",
" state_id = frame[GEOID_STATE_FIELD_NAME].unique()[0]\n",
" summary_dict[GEOID_STATE_FIELD_NAME] = state_id\n",
" summary_dict[\"Geography name\"] = us.states.lookup(state_id).name\n",
"\n",
" # Also add region information\n",
" region_id = frame[\"region\"].unique()[0]\n",
" summary_dict[\"region\"] = region_id\n",
"\n",
" if geography_field == \"region\":\n",
" region_id = frame[\"region\"].unique()[0]\n",
" summary_dict[\"region\"] = region_id\n",
" summary_dict[\"Geography name\"] = region_id\n",
"\n",
" if geography_field == \"division\":\n",
" division_id = frame[\"division\"].unique()[0]\n",
" summary_dict[\"division\"] = division_id\n",
" summary_dict[\"Geography name\"] = division_id\n",
"\n",
" summary_dict[\"Total CBGs in geography\"] = len(frame)\n",
" summary_dict[\"Total population in geography\"] = frame[\n",
" CENSUS_BLOCK_GROUP_POPULATION_FIELD\n",
" ].sum()\n",
"\n",
" for priority_communities_field in priority_communities_fields:\n",
" summary_dict[f\"{priority_communities_field}{POPULATION_SUFFIX}\"] = frame[\n",
" f\"{priority_communities_field}{POPULATION_SUFFIX}\"\n",
" ].sum()\n",
"\n",
" summary_dict[f\"{priority_communities_field} (total CBGs)\"] = frame[\n",
" f\"{priority_communities_field}\"\n",
" ].sum()\n",
"\n",
" # Calculate some combinations of other variables.\n",
" summary_dict[f\"{priority_communities_field} (percent CBGs)\"] = (\n",
" summary_dict[f\"{priority_communities_field} (total CBGs)\"]\n",
" / summary_dict[\"Total CBGs in geography\"]\n",
" )\n",
"\n",
" summary_dict[f\"{priority_communities_field} (percent population)\"] = (\n",
" summary_dict[f\"{priority_communities_field}{POPULATION_SUFFIX}\"]\n",
" / summary_dict[\"Total population in geography\"]\n",
" )\n",
"\n",
" df = pd.DataFrame(summary_dict, index=[0])\n",
"\n",
" return df\n",
"\n",
" # Add a field for country so we can do aggregations across the entire country.\n",
" df[COUNTRY_FIELD_NAME] = \"USA\"\n",
"\n",
" # First, run the comparison by the whole country\n",
" usa_grouped_df = df.groupby(COUNTRY_FIELD_NAME)\n",
"\n",
" # Run the comparison function on the groups.\n",
" usa_distribution_df = usa_grouped_df.progress_apply(\n",
" lambda frame: calculate_state_comparison(\n",
" frame, geography_field=COUNTRY_FIELD_NAME\n",
" )\n",
" )\n",
"\n",
" # Next, run the comparison by state\n",
" state_grouped_df = df.groupby(GEOID_STATE_FIELD_NAME)\n",
"\n",
" # Run the comparison function on the groups.\n",
" state_distribution_df = state_grouped_df.progress_apply(\n",
" lambda frame: calculate_state_comparison(\n",
" frame, geography_field=GEOID_STATE_FIELD_NAME\n",
" )\n",
" )\n",
"\n",
" # Next, run the comparison by region\n",
" region_grouped_df = df.groupby(\"region\")\n",
"\n",
" # Run the comparison function on the groups.\n",
" region_distribution_df = region_grouped_df.progress_apply(\n",
" lambda frame: calculate_state_comparison(frame, geography_field=\"region\")\n",
" )\n",
"\n",
" # Next, run the comparison by division\n",
" division_grouped_df = df.groupby(\"division\")\n",
"\n",
" # Run the comparison function on the groups.\n",
" division_distribution_df = division_grouped_df.progress_apply(\n",
" lambda frame: calculate_state_comparison(frame, geography_field=\"division\")\n",
" )\n",
"\n",
" # Combine the three\n",
" combined_df = pd.concat(\n",
" [\n",
" usa_distribution_df,\n",
" state_distribution_df,\n",
" region_distribution_df,\n",
" division_distribution_df,\n",
" ]\n",
" )\n",
"\n",
" return combined_df\n",
"\n",
"\n",
"def write_state_distribution_excel(\n",
" state_distribution_df: pd.DataFrame, file_path: pathlib.PosixPath\n",
") -> None:\n",
" \"\"\"Write the dataframe to excel with special formatting.\"\"\"\n",
" # Create a Pandas Excel writer using XlsxWriter as the engine.\n",
" writer = pd.ExcelWriter(file_path, engine=\"xlsxwriter\")\n",
"\n",
" # Convert the dataframe to an XlsxWriter Excel object. We also turn off the\n",
" # index column at the left of the output dataframe.\n",
" state_distribution_df.to_excel(writer, sheet_name=\"Sheet1\", index=False)\n",
"\n",
" # Get the xlsxwriter workbook and worksheet objects.\n",
" workbook = writer.book\n",
" worksheet = writer.sheets[\"Sheet1\"]\n",
" worksheet.autofilter(\n",
" 0, 0, state_distribution_df.shape[0], state_distribution_df.shape[1]\n",
" )\n",
"\n",
" # Set a width parameter for all columns\n",
" # Note: this is parameterized because every call to `set_column` requires setting the width.\n",
" column_width = 15\n",
"\n",
" for column in state_distribution_df.columns:\n",
" # Turn the column index into excel ranges (e.g., column #95 is \"CR\" and the range may be \"CR2:CR53\").\n",
" column_index = state_distribution_df.columns.get_loc(column)\n",
" column_character = get_excel_column_name(column_index)\n",
"\n",
" # Set all columns to larger width\n",
" worksheet.set_column(f\"{column_character}:{column_character}\", column_width)\n",
"\n",
" # Special formatting for all percent columns\n",
" # Note: we can't just search for `percent`, because that's included in the word `percentile`.\n",
" if \"percent \" in column or \"(percent)\" in column:\n",
" # Make these columns percentages.\n",
" percentage_format = workbook.add_format({\"num_format\": \"0%\"})\n",
" worksheet.set_column(\n",
" f\"{column_character}:{column_character}\",\n",
" column_width,\n",
" percentage_format,\n",
" )\n",
"\n",
" # Special formatting for columns that capture the percent of population considered priority.\n",
" if \"(percent population)\" in column:\n",
" column_ranges = (\n",
" f\"{column_character}2:{column_character}{len(state_distribution_df)+1}\"\n",
" )\n",
"\n",
" # Add green to red conditional formatting.\n",
" worksheet.conditional_format(\n",
" column_ranges,\n",
" # Min: green, max: red.\n",
" {\n",
" \"type\": \"2_color_scale\",\n",
" \"min_color\": \"#00FF7F\",\n",
" \"max_color\": \"#C82538\",\n",
" },\n",
" )\n",
"\n",
" header_format = workbook.add_format(\n",
" {\"bold\": True, \"text_wrap\": True, \"valign\": \"bottom\"}\n",
" )\n",
"\n",
" # Overwrite both the value and the format of each header cell\n",
" # This is because xlsxwriter / pandas has a known bug where it can't wrap text for a dataframe.\n",
" # See https://stackoverflow.com/questions/42562977/xlsxwriter-text-wrap-not-working.\n",
" for col_num, value in enumerate(state_distribution_df.columns.values):\n",
" worksheet.write(0, col_num, value, header_format)\n",
"\n",
" writer.save()\n",
"\n",
"\n",
"fields_to_analyze = [\n",
" index.priority_communities_field\n",
" for index in census_block_group_indices + census_tract_indices\n",
"]\n",
"\n",
"state_fips_codes = get_state_information(DATA_DIR)\n",
"\n",
"merged_with_state_information_df = merged_df.merge(\n",
" right=state_fips_codes, left_on=GEOID_STATE_FIELD_NAME, right_on=\"fips\"\n",
")\n",
"\n",
"state_distribution_df = get_state_distributions(\n",
" df=merged_with_state_information_df,\n",
" priority_communities_fields=fields_to_analyze,\n",
")\n",
"\n",
"state_distribution_df.to_csv(\n",
" path_or_buf=COMPARISON_OUTPUTS_DIR / \"Priority CBGs by state.csv\",\n",
" na_rep=\"\",\n",
" index=False,\n",
")\n",
"\n",
"write_state_distribution_excel(\n",
" state_distribution_df=state_distribution_df,\n",
" file_path=COMPARISON_OUTPUTS_DIR / \"Priority CBGs by state.xlsx\",\n",
")\n",
"\n",
"state_distribution_df.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9b9a329",
"metadata": {},
"outputs": [],
"source": [
"def write_markdown_and_docx_content(\n",
" markdown_content: str, file_dir: pathlib.PosixPath, file_name_without_extension: str\n",
") -> pathlib.PosixPath:\n",
" \"\"\"Write Markdown content to both .md and .docx files.\"\"\"\n",
" # Set the file paths for both files.\n",
" markdown_file_path = file_dir / f\"{file_name_without_extension}.md\"\n",
" docx_file_path = file_dir / f\"{file_name_without_extension}.docx\"\n",
"\n",
" # Write the markdown content to file.\n",
" with open(markdown_file_path, \"w\") as text_file:\n",
" text_file.write(markdown_content)\n",
"\n",
" # Convert markdown file to Word doc.\n",
" pypandoc.convert_file(\n",
" source_file=str(markdown_file_path),\n",
" to=\"docx\",\n",
" outputfile=str(docx_file_path),\n",
" extra_args=[],\n",
" )\n",
"\n",
" return docx_file_path\n",
"\n",
"\n",
"def get_markdown_comparing_census_block_group_indices(\n",
" census_block_group_indices=typing.List[Index],\n",
" df=pd.DataFrame,\n",
" state_field=GEOID_STATE_FIELD_NAME,\n",
") -> str:\n",
" \"\"\"Generate a Markdown string of analysis of multiple CBG indices.\"\"\"\n",
" count_field_name = \"Count of CBGs\"\n",
"\n",
" # List of all states/territories in their FIPS codes:\n",
" state_ids = sorted(df[state_field].unique())\n",
" state_names = \", \".join([us.states.lookup(state_id).name for state_id in state_ids])\n",
"\n",
" # Create markdown content for comparisons.\n",
" markdown_content = f\"\"\"\n",
"# Comparing multiple indices at the census block group level\n",
" \n",
"(This report was calculated on {datetime.today().strftime('%Y-%m-%d')}.)\n",
"\n",
"This report compares the following indices: {\", \".join([index.method_name for index in census_block_group_indices])}.\n",
"\n",
"This report analyzes the following US states and territories: {state_names}.\n",
"\n",
"\"\"\"\n",
"\n",
" for (index1, index2) in itertools.combinations(census_block_group_indices, 2):\n",
" # Group all data by their different values on Priority Communities Field for Index1 vs Priority Communities Field for Index2.\n",
" count_df = (\n",
" df.groupby(\n",
" [index1.priority_communities_field, index2.priority_communities_field]\n",
" )[GEOID_FIELD_NAME]\n",
" .count()\n",
" .reset_index(name=count_field_name)\n",
" )\n",
"\n",
" total_cbgs = count_df[count_field_name].sum()\n",
"\n",
" # Returns a series\n",
" true_true_cbgs_series = count_df.loc[\n",
" count_df[index1.priority_communities_field]\n",
" & count_df[index2.priority_communities_field],\n",
" count_field_name,\n",
" ]\n",
" true_false_cbgs_series = count_df.loc[\n",
" count_df[index1.priority_communities_field]\n",
" & ~count_df[index2.priority_communities_field],\n",
" count_field_name,\n",
" ]\n",
" false_true_cbgs_series = count_df.loc[\n",
" ~count_df[index1.priority_communities_field]\n",
" & count_df[index2.priority_communities_field],\n",
" count_field_name,\n",
" ]\n",
" false_false_cbgs_series = count_df.loc[\n",
" ~count_df[index1.priority_communities_field]\n",
" & ~count_df[index2.priority_communities_field],\n",
" count_field_name,\n",
" ]\n",
"\n",
" # Convert from series to a scalar value, including accounting for if no data exists for that pairing.\n",
" true_true_cbgs = (\n",
" true_true_cbgs_series.iloc[0] if len(true_true_cbgs_series) > 0 else 0\n",
" )\n",
" true_false_cbgs = (\n",
" true_false_cbgs_series.iloc[0] if len(true_false_cbgs_series) > 0 else 0\n",
" )\n",
" false_true_cbgs = (\n",
" false_true_cbgs_series.iloc[0] if len(false_true_cbgs_series) > 0 else 0\n",
" )\n",
" false_false_cbgs = (\n",
" false_false_cbgs_series.iloc[0] if len(false_false_cbgs_series) > 0 else 0\n",
" )\n",
"\n",
" markdown_content += (\n",
" \"*** \\n\\n\"\n",
" \"There are \"\n",
" f\"{true_true_cbgs} ({true_true_cbgs / total_cbgs:.0%}) \"\n",
" f\"census block groups that are both {index1.method_name} priority communities and {index2.method_name} priority communities.\\n\\n\"\n",
" \"There are \"\n",
" f\"{true_false_cbgs} ({true_false_cbgs / total_cbgs:.0%}) \"\n",
" f\"census block groups that are {index1.method_name} priority communities but not {index2.method_name} priority communities.\\n\\n\"\n",
" \"There are \"\n",
" f\"{false_true_cbgs} ({false_true_cbgs / total_cbgs:.0%}) \"\n",
" f\"census block groups that are not {index1.method_name} priority communities but are {index2.method_name} priority communities.\\n\\n\"\n",
" \"There are \"\n",
" f\"{false_false_cbgs} ({false_false_cbgs / total_cbgs:.0%}) \"\n",
" f\"census block groups that are neither {index1.method_name} priority communities nor {index2.method_name} priority communities.\\n\\n\"\n",
" \"\\n\\n\"\n",
" )\n",
"\n",
" return markdown_content\n",
"\n",
"\n",
"def get_comparison_census_block_group_indices(\n",
" census_block_group_indices=typing.List[Index],\n",
" df=pd.DataFrame,\n",
" state_field=GEOID_STATE_FIELD_NAME,\n",
") -> pathlib.PosixPath:\n",
" markdown_content = get_markdown_comparing_census_block_group_indices(\n",
" census_block_group_indices=census_block_group_indices,\n",
" df=merged_with_state_information_df,\n",
" )\n",
"\n",
" comparison_docx_file_path = write_markdown_and_docx_content(\n",
" markdown_content=markdown_content,\n",
" file_dir=COMPARISON_OUTPUTS_DIR,\n",
" file_name_without_extension=f\"Comparison report - All CBG indices\",\n",
" )\n",
"\n",
" return comparison_docx_file_path\n",
"\n",
"\n",
"# Compare multiple scores at the CBG level\n",
"get_comparison_census_block_group_indices(\n",
" census_block_group_indices=census_block_group_indices,\n",
" df=merged_with_state_information_df,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "25a10027",
"metadata": {},
"outputs": [],
"source": [
"# This cell defines a variety of comparison functions. It does not run them.\n",
"\n",
"# Define a namedtuple for column names, which need to be shared between multiple parts of this comparison pipeline.\n",
"# Named tuples are useful here because they provide guarantees that for each instance, all properties are defined and\n",
"# can be accessed as properties (rather than as strings).\n",
"\n",
"# Note: if you'd like to add a field used throughout the comparison process, add it in three places.\n",
"# For an example `new_field`,\n",
"# 1. in this namedtuple, add the field as a string in `field_names` (e.g., `field_names=[..., \"new_field\"])`)\n",
"# 2. in the function `get_comparison_field_names`, define how the field name should be created from input data\n",
"# (e.g., `...new_field=f\"New field compares {method_a_name} to {method_b_name}\")\n",
"# 3. In the function `get_comparison_markdown_content`, add some reporting on the new field to the markdown content.\n",
"# (e.g., `The statistics indicate that {calculation_based_on_new_field} percent of census tracts are different between scores.`)\n",
"ComparisonFieldNames = collections.namedtuple(\n",
" typename=\"ComparisonFieldNames\",\n",
" field_names=[\n",
" \"any_tract_has_at_least_one_method_a_cbg\",\n",
" \"method_b_tract_has_at_least_one_method_a_cbg\",\n",
" \"method_b_tract_has_100_percent_method_a_cbg\",\n",
" \"method_b_non_priority_tract_has_at_least_one_method_a_cbg\",\n",
" \"method_b_non_priority_tract_has_100_percent_method_a_cbg\",\n",
" ],\n",
")\n",
"\n",
"\n",
"def get_comparison_field_names(\n",
" method_a_name: str,\n",
" method_b_name: str,\n",
") -> ComparisonFieldNames:\n",
" comparison_field_names = ComparisonFieldNames(\n",
" any_tract_has_at_least_one_method_a_cbg=(\n",
" f\"Any tract has at least one {method_a_name} Priority CBG?\"\n",
" ),\n",
" method_b_tract_has_at_least_one_method_a_cbg=(\n",
" f\"{method_b_name} priority tract has at least one {method_a_name} CBG?\"\n",
" ),\n",
" method_b_tract_has_100_percent_method_a_cbg=(\n",
" f\"{method_b_name} tract has 100% {method_a_name} priority CBGs?\"\n",
" ),\n",
" method_b_non_priority_tract_has_at_least_one_method_a_cbg=(\n",
" f\"Non-priority {method_b_name} tract has at least one {method_a_name} priority CBG?\"\n",
" ),\n",
" method_b_non_priority_tract_has_100_percent_method_a_cbg=(\n",
" f\"Non-priority {method_b_name} tract has 100% {method_a_name} priority CBGs?\"\n",
" ),\n",
" )\n",
" return comparison_field_names\n",
"\n",
"\n",
"def get_df_with_only_shared_states(\n",
" df: pd.DataFrame,\n",
" field_a: str,\n",
" field_b: str,\n",
" state_field=GEOID_STATE_FIELD_NAME,\n",
") -> pd.DataFrame:\n",
" \"\"\"\n",
" Useful for looking at shared geographies across two fields.\n",
"\n",
" For a data frame and two fields, return a data frame only for states where there are non-null\n",
" values for both fields in that state (or territory).\n",
"\n",
" This is useful, for example, when running a comparison of CalEnviroScreen (only in California) against\n",
" a draft score that's national, and returning only the data for California for the entire data frame.\n",
" \"\"\"\n",
" field_a_states = df.loc[df[field_a].notnull(), state_field].unique()\n",
" field_b_states = df.loc[df[field_b].notnull(), state_field].unique()\n",
"\n",
" shared_states = list(set(field_a_states) & set(field_b_states))\n",
"\n",
" df = df.loc[df[state_field].isin(shared_states), :]\n",
"\n",
" return df\n",
"\n",
"\n",
"def get_comparison_df(\n",
" df: pd.DataFrame,\n",
" method_a_priority_census_block_groups_field: str,\n",
" method_b_priority_census_tracts_field: str,\n",
" other_census_tract_fields_to_keep: typing.Optional[typing.List[str]],\n",
" comparison_field_names: ComparisonFieldNames,\n",
" output_dir: pathlib.PosixPath,\n",
") -> None:\n",
" \"\"\"Produces a comparison report for any two given boolean columns representing priority fields.\n",
"\n",
" Args:\n",
" df: a pandas dataframe including the data for this comparison.\n",
" method_a_priority_census_block_groups_field: the name of a boolean column in `df`, such as the CEJST priority\n",
" community field that defines communities at the level of census block groups (CBGs).\n",
" method_b_priority_census_tracts_field: the name of a boolean column in `df`, such as the CalEnviroScreen priority\n",
" community field that defines communities at the level of census tracts.\n",
" other_census_tract_fields_to_keep (optional): a list of field names to preserve at the census tract level\n",
"\n",
" Returns:\n",
" df: a pandas dataframe with one row with the results of this comparison\n",
" \"\"\"\n",
"\n",
" def calculate_comparison(frame: pd.DataFrame) -> pd.DataFrame:\n",
" \"\"\"\n",
" This method will be applied to a `group_by` object.\n",
"\n",
" Note: It inherits from outer scope `method_a_priority_census_block_groups_field`, `method_b_priority_census_tracts_field`,\n",
" and `other_census_tract_fields_to_keep`.\n",
" \"\"\"\n",
" # Keep all the tract values at the Census Tract Level\n",
" for field in other_census_tract_fields_to_keep:\n",
" if len(frame[field].unique()) != 1:\n",
" raise ValueError(\n",
" f\"There are different values per CBG for field {field}.\"\n",
" \"`other_census_tract_fields_to_keep` can only be used for fields at the census tract level.\"\n",
" )\n",
"\n",
" df = frame.loc[\n",
" frame.index[0],\n",
" [\n",
" GEOID_TRACT_FIELD_NAME,\n",
" method_b_priority_census_tracts_field,\n",
" ]\n",
" + other_census_tract_fields_to_keep,\n",
" ]\n",
"\n",
" # Convenience constant for whether the tract is or is not a method B priority community.\n",
" is_a_method_b_priority_tract = frame.loc[\n",
" frame.index[0], [method_b_priority_census_tracts_field]\n",
" ][0]\n",
"\n",
" # Recall that NaN values are not falsy, so we need to check if `is_a_method_b_priority_tract` is True.\n",
" is_a_method_b_priority_tract = is_a_method_b_priority_tract is True\n",
"\n",
" # Calculate whether the tract (whether or not it is a comparison priority tract) includes CBGs that are priority\n",
" # according to the current CBG score.\n",
" df[comparison_field_names.any_tract_has_at_least_one_method_a_cbg] = (\n",
" frame.loc[:, method_a_priority_census_block_groups_field].sum() > 0\n",
" )\n",
"\n",
" # Calculate comparison\n",
" # A comparison priority tract has at least one CBG that is a priority CBG.\n",
" df[comparison_field_names.method_b_tract_has_at_least_one_method_a_cbg] = (\n",
" frame.loc[:, method_a_priority_census_block_groups_field].sum() > 0\n",
" if is_a_method_b_priority_tract\n",
" else None\n",
" )\n",
"\n",
" # A comparison priority tract has all of its contained CBGs as CBG priority CBGs.\n",
" df[comparison_field_names.method_b_tract_has_100_percent_method_a_cbg] = (\n",
" frame.loc[:, method_a_priority_census_block_groups_field].mean() == 1\n",
" if is_a_method_b_priority_tract\n",
" else None\n",
" )\n",
"\n",
" # Calculate the inverse\n",
" # A tract that is _not_ a comparison priority has at least one CBG priority CBG.\n",
" df[\n",
" comparison_field_names.method_b_non_priority_tract_has_at_least_one_method_a_cbg\n",
" ] = (\n",
" frame.loc[:, method_a_priority_census_block_groups_field].sum() > 0\n",
" if not is_a_method_b_priority_tract\n",
" else None\n",
" )\n",
"\n",
" # A tract that is _not_ a comparison priority has all of its contained CBGs as CBG priority CBGs.\n",
" df[\n",
" comparison_field_names.method_b_non_priority_tract_has_100_percent_method_a_cbg\n",
" ] = (\n",
" frame.loc[:, method_a_priority_census_block_groups_field].mean() == 1\n",
" if not is_a_method_b_priority_tract\n",
" else None\n",
" )\n",
"\n",
" # For all remaining fields, calculate the average\n",
" # TODO: refactor to vectorize to make faster.\n",
" for field in [\n",
" \"Poverty (Less than 200% of federal poverty line)\",\n",
" \"Percent of households in linguistic isolation\",\n",
" \"Percent individuals age 25 or over with less than high school degree\",\n",
" \"Unemployed civilians (percent)\",\n",
" ]:\n",
" df[f\"{field} (average of CBGs)\"] = frame.loc[:, field].mean()\n",
"\n",
" return df\n",
"\n",
" # Group all data by the census tract.\n",
" grouped_df = df.groupby(GEOID_TRACT_FIELD_NAME)\n",
"\n",
" # Run the comparison function on the groups.\n",
" comparison_df = grouped_df.progress_apply(calculate_comparison)\n",
"\n",
" return comparison_df\n",
"\n",
"\n",
"def get_comparison_markdown_content(\n",
" original_df: pd.DataFrame,\n",
" comparison_df: pd.DataFrame,\n",
" comparison_field_names: ComparisonFieldNames,\n",
" method_a_name: str,\n",
" method_b_name: str,\n",
" method_a_priority_census_block_groups_field: str,\n",
" method_b_priority_census_tracts_field: str,\n",
" state_field: str = GEOID_STATE_FIELD_NAME,\n",
") -> str:\n",
" # Prepare some constants for use in the following Markdown content.\n",
" total_cbgs = len(original_df)\n",
"\n",
" # List of all states/territories in their FIPS codes:\n",
" state_ids = sorted(original_df[state_field].unique())\n",
" state_names = \", \".join([us.states.lookup(state_id).name for state_id in state_ids])\n",
"\n",
" # Note: using squeeze throughout do reduce result of `sum()` to a scalar.\n",
" # TODO: investigate why sums are sometimes series and sometimes scalar.\n",
" method_a_priority_cbgs = (\n",
" original_df.loc[:, method_a_priority_census_block_groups_field].sum().squeeze()\n",
" )\n",
" method_a_priority_cbgs_percent = f\"{method_a_priority_cbgs / total_cbgs:.0%}\"\n",
"\n",
" total_tracts_count = len(comparison_df)\n",
"\n",
" method_b_priority_tracts_count = comparison_df.loc[\n",
" :, method_b_priority_census_tracts_field\n",
" ].sum()\n",
"\n",
" method_b_priority_tracts_count_percent = (\n",
" f\"{method_b_priority_tracts_count / total_tracts_count:.0%}\"\n",
" )\n",
" method_b_non_priority_tracts_count = (\n",
" total_tracts_count - method_b_priority_tracts_count\n",
" )\n",
"\n",
" method_a_tracts_count = (\n",
" comparison_df.loc[\n",
" :, comparison_field_names.any_tract_has_at_least_one_method_a_cbg\n",
" ]\n",
" .sum()\n",
" .squeeze()\n",
" )\n",
" method_a_tracts_count_percent = f\"{method_a_tracts_count / total_tracts_count:.0%}\"\n",
"\n",
" # Method A priority community stats\n",
" method_b_tracts_with_at_least_one_method_a_cbg = comparison_df.loc[\n",
" :, comparison_field_names.method_b_tract_has_at_least_one_method_a_cbg\n",
" ].sum()\n",
" method_b_tracts_with_at_least_one_method_a_cbg_percent = f\"{method_b_tracts_with_at_least_one_method_a_cbg / method_b_priority_tracts_count:.0%}\"\n",
"\n",
" method_b_tracts_with_at_100_percent_method_a_cbg = comparison_df.loc[\n",
" :, comparison_field_names.method_b_tract_has_100_percent_method_a_cbg\n",
" ].sum()\n",
" method_b_tracts_with_at_100_percent_method_a_cbg_percent = f\"{method_b_tracts_with_at_100_percent_method_a_cbg / method_b_priority_tracts_count:.0%}\"\n",
"\n",
" # Method A non-priority community stats\n",
" method_b_non_priority_tracts_with_at_least_one_method_a_cbg = comparison_df.loc[\n",
" :,\n",
" comparison_field_names.method_b_non_priority_tract_has_at_least_one_method_a_cbg,\n",
" ].sum()\n",
"\n",
" method_b_non_priority_tracts_with_at_least_one_method_a_cbg_percent = f\"{method_b_non_priority_tracts_with_at_least_one_method_a_cbg / method_b_non_priority_tracts_count:.0%}\"\n",
"\n",
" method_b_non_priority_tracts_with_100_percent_method_a_cbg = comparison_df.loc[\n",
" :,\n",
" comparison_field_names.method_b_non_priority_tract_has_100_percent_method_a_cbg,\n",
" ].sum()\n",
" method_b_non_priority_tracts_with_100_percent_method_a_cbg_percent = f\"{method_b_non_priority_tracts_with_100_percent_method_a_cbg / method_b_non_priority_tracts_count:.0%}\"\n",
"\n",
" # Create markdown content for comparisons.\n",
" markdown_content = f\"\"\"\n",
"# {method_a_name} compared to {method_b_name}\n",
"\n",
"(This report was calculated on {datetime.today().strftime('%Y-%m-%d')}.)\n",
"\n",
"This report analyzes the following US states and territories: {state_names}.\n",
"\n",
"Recall that census tracts contain one or more census block groups, with up to nine census block groups per tract.\n",
"\n",
"Within the geographic area analyzed, there are {method_b_priority_tracts_count} census tracts designated as priority communities by {method_b_name}, out of {total_tracts_count} total tracts ({method_b_priority_tracts_count_percent}). \n",
"\n",
"Within the geographic region analyzed, there are {method_a_priority_cbgs} census block groups considered as priority communities by {method_a_name}, out of {total_cbgs} CBGs ({method_a_priority_cbgs_percent}). They occupy {method_a_tracts_count} census tracts ({method_a_tracts_count_percent}) of the geographic area analyzed.\n",
"\n",
"Out of every {method_b_name} priority census tract, {method_b_tracts_with_at_least_one_method_a_cbg} ({method_b_tracts_with_at_least_one_method_a_cbg_percent}) of these census tracts have at least one census block group within them that is considered a priority community by {method_a_name}.\n",
"\n",
"Out of every {method_b_name} priority census tract, {method_b_tracts_with_at_100_percent_method_a_cbg} ({method_b_tracts_with_at_100_percent_method_a_cbg_percent}) of these census tracts have 100% of the included census block groups within them considered priority communities by {method_a_name}.\n",
"\n",
"Out of every census tract that is __not__ marked as a priority community by {method_b_name}, {method_b_non_priority_tracts_with_at_least_one_method_a_cbg} ({method_b_non_priority_tracts_with_at_least_one_method_a_cbg_percent}) of these census tracts have at least one census block group within them that is considered a priority community by the current version of the CEJST score.\n",
"\n",
"Out of every census tract that is __not__ marked as a priority community by {method_b_name}, {method_b_non_priority_tracts_with_100_percent_method_a_cbg} ({method_b_non_priority_tracts_with_100_percent_method_a_cbg_percent}) of these census tracts have 100% of the included census block groups within them considered priority communities by the current version of the CEJST score.\n",
"\"\"\"\n",
"\n",
" return markdown_content\n",
"\n",
"\n",
"def get_secondary_comparison_df(\n",
" comparison_df: pd.DataFrame,\n",
" comparison_field_names: ComparisonFieldNames,\n",
" method_b_priority_census_tracts_field: str,\n",
") -> pd.DataFrame:\n",
" \"\"\"A secondary level of comparison.\n",
"\n",
" The first level of comparison identifies census tracts prioritized by Method A,\n",
" compared to whether or not they're prioritized by Method B.\n",
"\n",
" This comparison method analyzes characteristics of those census tracts, based on whether or not they are prioritized\n",
" or not by Method A and/or Method B.\n",
"\n",
"\n",
" E.g., it might show that tracts prioritized by A but not B have a higher average income,\n",
" or that tracts prioritized by B but not A have a lower percent of unemployed people.\"\"\"\n",
" grouped_df = comparison_df.groupby(\n",
" [\n",
" method_b_priority_census_tracts_field,\n",
" comparison_field_names.method_b_tract_has_at_least_one_method_a_cbg,\n",
" comparison_field_names.method_b_non_priority_tract_has_at_least_one_method_a_cbg,\n",
" ],\n",
" dropna=False,\n",
" )\n",
"\n",
" # Run the comparison function on the groups.\n",
" secondary_comparison_df = grouped_df.mean().reset_index()\n",
"\n",
" return secondary_comparison_df\n",
"\n",
"\n",
"def execute_comparison(\n",
" df: pd.DataFrame,\n",
" method_a_name: str,\n",
" method_b_name: str,\n",
" method_a_priority_census_block_groups_field: str,\n",
" method_b_priority_census_tracts_field: str,\n",
" other_census_tract_fields_to_keep: typing.Optional[typing.List[str]],\n",
") -> pathlib.PosixPath:\n",
" \"\"\"Execute an individual comparison by creating the data frame and writing the report.\n",
"\n",
" Args:\n",
" df: a pandas dataframe including the data for this comparison.\n",
" method_a_priority_census_block_groups_field: the name of a boolean column in `df`, such as the CEJST priority\n",
" community field that defines communities at the level of census block groups (CBGs).\n",
" method_b_priority_census_tracts_field: the name of a boolean column in `df`, such as the CalEnviroScreen priority\n",
" community field that defines communities at the level of census tracts.\n",
" other_census_tract_fields_to_keep (optional): a list of field names to preserve at the census tract level\n",
"\n",
" Returns:\n",
" df: a pandas dataframe with one row with the results of this comparison\n",
"\n",
" \"\"\"\n",
" comparison_field_names = get_comparison_field_names(\n",
" method_a_name=method_a_name, method_b_name=method_b_name\n",
" )\n",
"\n",
" # Create or use a directory for outputs grouped by Method A.\n",
" output_dir = COMPARISON_OUTPUTS_DIR / method_a_name\n",
" output_dir.mkdir(parents=True, exist_ok=True)\n",
"\n",
" df_with_only_shared_states = get_df_with_only_shared_states(\n",
" df=df,\n",
" field_a=method_a_priority_census_block_groups_field,\n",
" field_b=method_b_priority_census_tracts_field,\n",
" )\n",
"\n",
" comparison_df = get_comparison_df(\n",
" df=df_with_only_shared_states,\n",
" method_a_priority_census_block_groups_field=method_a_priority_census_block_groups_field,\n",
" method_b_priority_census_tracts_field=method_b_priority_census_tracts_field,\n",
" comparison_field_names=comparison_field_names,\n",
" other_census_tract_fields_to_keep=other_census_tract_fields_to_keep,\n",
" output_dir=output_dir,\n",
" )\n",
"\n",
" # Write comparison to CSV.\n",
" file_path = (\n",
" output_dir / f\"Comparison Output - {method_a_name} and {method_b_name}.csv\"\n",
" )\n",
" comparison_df.to_csv(\n",
" path_or_buf=file_path,\n",
" na_rep=\"\",\n",
" index=False,\n",
" )\n",
"\n",
" # Secondary comparison DF\n",
" secondary_comparison_df = get_secondary_comparison_df(\n",
" comparison_df=comparison_df,\n",
" comparison_field_names=comparison_field_names,\n",
" method_b_priority_census_tracts_field=method_b_priority_census_tracts_field,\n",
" )\n",
"\n",
" # Write secondary comparison to CSV.\n",
" file_path = (\n",
" output_dir\n",
" / f\"Secondary Comparison Output - {method_a_name} and {method_b_name}.csv\"\n",
" )\n",
" secondary_comparison_df.to_csv(\n",
" path_or_buf=file_path,\n",
" na_rep=\"\",\n",
" index=False,\n",
" )\n",
"\n",
" markdown_content = get_comparison_markdown_content(\n",
" original_df=df_with_only_shared_states,\n",
" comparison_df=comparison_df,\n",
" comparison_field_names=comparison_field_names,\n",
" method_a_name=method_a_name,\n",
" method_b_name=method_b_name,\n",
" method_a_priority_census_block_groups_field=method_a_priority_census_block_groups_field,\n",
" method_b_priority_census_tracts_field=method_b_priority_census_tracts_field,\n",
" )\n",
"\n",
" comparison_docx_file_path = write_markdown_and_docx_content(\n",
" markdown_content=markdown_content,\n",
" file_dir=output_dir,\n",
" file_name_without_extension=f\"Comparison report - {method_a_name} and {method_b_name}\",\n",
" )\n",
"\n",
" return comparison_docx_file_path\n",
"\n",
"\n",
"def execute_comparisons(\n",
" df: pd.DataFrame,\n",
" census_block_group_indices: typing.List[Index],\n",
" census_tract_indices: typing.List[Index],\n",
"):\n",
" \"\"\"Create multiple comparison reports.\"\"\"\n",
" comparison_docx_file_paths = []\n",
" for cbg_index in census_block_group_indices:\n",
" for census_tract_index in census_tract_indices:\n",
" print(\n",
" f\"Running comparisons for {cbg_index.method_name} against {census_tract_index.method_name}...\"\n",
" )\n",
"\n",
" comparison_docx_file_path = execute_comparison(\n",
" df=df,\n",
" method_a_name=cbg_index.method_name,\n",
" method_b_name=census_tract_index.method_name,\n",
" method_a_priority_census_block_groups_field=cbg_index.priority_communities_field,\n",
" method_b_priority_census_tracts_field=census_tract_index.priority_communities_field,\n",
" other_census_tract_fields_to_keep=census_tract_index.other_census_tract_fields_to_keep,\n",
" )\n",
"\n",
" comparison_docx_file_paths.append(comparison_docx_file_path)\n",
"\n",
" return comparison_docx_file_paths"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9b8b6d1e",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# Actually execute the functions\n",
"file_paths = execute_comparisons(\n",
" df=merged_df,\n",
" census_block_group_indices=census_block_group_indices,\n",
" census_tract_indices=census_tract_indices,\n",
")\n",
"\n",
"print(file_paths)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}