diff --git a/data/data-pipeline/data_pipeline/etl/score/etl_score.py b/data/data-pipeline/data_pipeline/etl/score/etl_score.py index 14f181b7..4fc20a42 100644 --- a/data/data-pipeline/data_pipeline/etl/score/etl_score.py +++ b/data/data-pipeline/data_pipeline/etl/score/etl_score.py @@ -161,6 +161,11 @@ class ScoreETL(ExtractTransformLoad): renamed_field=self.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD_NAME, bucket=None, ), + DataSet( + input_field=self.MEDIAN_INCOME_FIELD_NAME, + renamed_field=self.MEDIAN_INCOME_FIELD_NAME, + bucket=None, + ), # The following data sets have buckets, because they're used in Score C DataSet( input_field="CANCER", @@ -540,6 +545,7 @@ class ScoreETL(ExtractTransformLoad): logger.info("Adding Score G") high_school_cutoff_threshold = 0.05 + high_school_cutoff_threshold_2 = 0.06 df["Score G (communities)"] = ( (df[self.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD_NAME] < 0.7) @@ -551,6 +557,25 @@ class ScoreETL(ExtractTransformLoad): df["Score G"] = df["Score G (communities)"].astype(int) df["Score G (percentile)"] = df["Score G"] + df["Score H (communities)"] = ( + (df[self.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD_NAME] < 0.8) + & (df[self.HIGH_SCHOOL_FIELD_NAME] > high_school_cutoff_threshold_2) + ) | ( + (df[self.POVERTY_LESS_THAN_200_FPL_FIELD_NAME] > 0.40) + & (df[self.HIGH_SCHOOL_FIELD_NAME] > high_school_cutoff_threshold_2) + ) + df["Score H"] = df["Score H (communities)"].astype(int) + + # df["80% AMI & 6% high school (communities)"] = ( + # (df[self.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD_NAME] < 0.8) + # & (df[self.HIGH_SCHOOL_FIELD_NAME] > high_school_cutoff_threshold_2) + # ) + # + # df["FPL200>40% & 6% high school (communities)"] = ( + # (df[self.POVERTY_LESS_THAN_200_FPL_FIELD_NAME] > 0.40) + # & (df[self.HIGH_SCHOOL_FIELD_NAME] > high_school_cutoff_threshold_2) + # ) + df["NMTC (communities)"] = ( (df[self.MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD_NAME] < 0.8) ) | ( @@ -637,7 +662,8 @@ class ScoreETL(ExtractTransformLoad): # Skip GEOID_FIELD_NAME, because it's a string. if data_set.renamed_field == self.GEOID_FIELD_NAME: continue - df[f"{data_set.renamed_field}"] = pd.to_numeric( + + df[data_set.renamed_field] = pd.to_numeric( df[data_set.renamed_field] ) diff --git a/data/data-pipeline/data_pipeline/ipython/scoring_comparison.ipynb b/data/data-pipeline/data_pipeline/ipython/scoring_comparison.ipynb index 01538091..398aa72a 100644 --- a/data/data-pipeline/data_pipeline/ipython/scoring_comparison.ipynb +++ b/data/data-pipeline/data_pipeline/ipython/scoring_comparison.ipynb @@ -242,14 +242,18 @@ " \"priority_communities_field\",\n", " # Note: this field only used by indices defined at the census tract level.\n", " \"other_census_tract_fields_to_keep\",\n", - " ],\n", + " ]\n", ")\n", "\n", "# Define the indices used for CEJST scoring (`census_block_group_indices`) as well as comparison\n", "# (`census_tract_indices`).\n", - "\n", "census_block_group_indices = [\n", " Index(\n", + " method_name=\"Score H\",\n", + " priority_communities_field=\"Score H (communities)\",\n", + " other_census_tract_fields_to_keep=[],\n", + " ),\n", + " Index(\n", " method_name=\"Score G\",\n", " priority_communities_field=\"Score G (communities)\",\n", " other_census_tract_fields_to_keep=[],\n", @@ -264,16 +268,6 @@ " priority_communities_field=\"Score F (communities)\",\n", " other_census_tract_fields_to_keep=[],\n", " ),\n", - "# Index(\n", - "# method_name=\"Score F (socioeconomic only)\",\n", - "# priority_communities_field=\"Meets socioeconomic criteria\",\n", - "# other_census_tract_fields_to_keep=[],\n", - "# ),\n", - "# Index(\n", - "# method_name=\"Score F (burden only)\",\n", - "# priority_communities_field=\"Meets burden criteria\",\n", - "# other_census_tract_fields_to_keep=[],\n", - "# ),\n", " Index(\n", " method_name=\"Score A\",\n", " priority_communities_field=\"Score A (top 25th percentile)\",\n", @@ -293,27 +287,11 @@ " method_name=\"Score D (25th percentile)\",\n", " priority_communities_field=\"Score D (top 25th percentile)\",\n", " other_census_tract_fields_to_keep=[],\n", - " ),\n", - "# Index(\n", - "# method_name=\"Score D (30th percentile)\",\n", - "# priority_communities_field=\"Score D (top 30th percentile)\",\n", - "# other_census_tract_fields_to_keep=[],\n", - "# ),\n", - "# Index(\n", - "# method_name=\"Score D (35th percentile)\",\n", - "# priority_communities_field=\"Score D (top 35th percentile)\",\n", - "# other_census_tract_fields_to_keep=[],\n", - "# ),\n", - "# Index(\n", - "# method_name=\"Score D (40th percentile)\",\n", - "# priority_communities_field=\"Score D (top 40th percentile)\",\n", - "# other_census_tract_fields_to_keep=[],\n", - "# ),\n", " Index(\n", " method_name=\"Poverty\",\n", " priority_communities_field=\"Poverty (Less than 200% of federal poverty line) (top 25th percentile)\",\n", " other_census_tract_fields_to_keep=[],\n", - " ),\n", + " )\n", "]\n", "\n", "census_tract_indices = [\n", @@ -572,6 +550,185 @@ "state_distribution_df.head()" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "8790cd64", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# Compare CBG scores to each other, running secondary analysis on\n", + "# characteristics of CBGs prioritized by one but not the other.\n", + "def get_cbg_score_comparison_df(\n", + " df: pd.DataFrame,\n", + " method_a_priority_census_block_groups_field: str,\n", + " method_b_priority_census_block_groups_field: str,\n", + " comparison_fields: typing.List[str],\n", + ") -> pd.DataFrame:\n", + " \"\"\"Compare CBG scores to each other.\n", + "\n", + " This comparison method analyzes characteristics of those census block groups, based on whether or not they are prioritized\n", + " or not by Method A and/or Method B.\n", + "\n", + " E.g., it might show that CBGs prioritized by A but not B have a higher average income,\n", + " or that CBGs prioritized by B but not A have a lower percent of unemployed people.\n", + " \"\"\"\n", + " df_subset = df[\n", + " [\n", + " method_a_priority_census_block_groups_field,\n", + " method_b_priority_census_block_groups_field,\n", + " ]\n", + " + comparison_fields\n", + " ]\n", + "\n", + " grouped_df = df_subset.groupby(\n", + " [\n", + " method_a_priority_census_block_groups_field,\n", + " method_b_priority_census_block_groups_field,\n", + " ],\n", + " dropna=False,\n", + " )\n", + " \n", + " # Run the comparison function on the groups.\n", + " comparison_df = grouped_df.mean().reset_index()\n", + "\n", + " # Rename fields to reflect the mean aggregation\n", + " comparison_df.rename(\n", + " mapper={\n", + " comparison_field: f\"{comparison_field} (mean of CBGs)\"\n", + " for comparison_field in comparison_fields\n", + " },\n", + " axis=1,\n", + " inplace=True,\n", + " )\n", + "\n", + " return comparison_df\n", + "\n", + "\n", + "def write_cbg_score_comparison_excel(\n", + " cbg_score_comparison_df: pd.DataFrame, file_path: pathlib.PosixPath\n", + ") -> None:\n", + " \"\"\"Write the dataframe to excel with special formatting.\"\"\"\n", + " # Create a Pandas Excel writer using XlsxWriter as the engine.\n", + " writer = pd.ExcelWriter(file_path, engine=\"xlsxwriter\")\n", + "\n", + " # Convert the dataframe to an XlsxWriter Excel object. We also turn off the\n", + " # index column at the left of the output dataframe.\n", + " cbg_score_comparison_df.to_excel(writer, sheet_name=\"Sheet1\", index=False)\n", + "\n", + " # Get the xlsxwriter workbook and worksheet objects.\n", + " workbook = writer.book\n", + " worksheet = writer.sheets[\"Sheet1\"]\n", + " worksheet.autofilter(\n", + " 0, 0, cbg_score_comparison_df.shape[0], cbg_score_comparison_df.shape[1]\n", + " )\n", + "\n", + " # Set a width parameter for all columns\n", + " # Note: this is parameterized because every call to `set_column` requires setting the width.\n", + " column_width = 15\n", + "\n", + " for column in cbg_score_comparison_df.columns:\n", + " # Turn the column index into excel ranges (e.g., column #95 is \"CR\" and the range may be \"CR2:CR53\").\n", + " column_index = cbg_score_comparison_df.columns.get_loc(column)\n", + " column_character = get_excel_column_name(column_index)\n", + "\n", + " # Set all columns to larger width\n", + " worksheet.set_column(f\"{column_character}:{column_character}\", column_width)\n", + "\n", + " # Add green to red conditional formatting.\n", + " column_ranges = f\"{column_character}2:{column_character}{len(cbg_score_comparison_df)+1}\"\n", + " worksheet.conditional_format(\n", + " column_ranges,\n", + " # Min: green, max: red.\n", + " {\n", + " \"type\": \"2_color_scale\",\n", + " \"min_color\": \"#00FF7F\",\n", + " \"max_color\": \"#C82538\",\n", + " },\n", + " )\n", + "\n", + " # Special formatting for all percent columns\n", + " # Note: we can't just search for `percent`, because that's included in the word `percentile`.\n", + " if \"percent \" in column or \"(percent)\" in column or \"Percent \" in column:\n", + " # Make these columns percentages.\n", + " percentage_format = workbook.add_format({\"num_format\": \"0%\"})\n", + " worksheet.set_column(\n", + " f\"{column_character}:{column_character}\",\n", + " column_width,\n", + " percentage_format,\n", + " )\n", + "\n", + " header_format = workbook.add_format(\n", + " {\"bold\": True, \"text_wrap\": True, \"valign\": \"bottom\"}\n", + " )\n", + "\n", + " # Overwrite both the value and the format of each header cell\n", + " # This is because xlsxwriter / pandas has a known bug where it can't wrap text for a dataframe.\n", + " # See https://stackoverflow.com/questions/42562977/xlsxwriter-text-wrap-not-working.\n", + " for col_num, value in enumerate(cbg_score_comparison_df.columns.values):\n", + " worksheet.write(0, col_num, value, header_format)\n", + "\n", + " writer.save()\n", + "\n", + "\n", + "def compare_cbg_scores(\n", + " df: pd.DataFrame,\n", + " index_a: Index,\n", + " index_b: Index,\n", + " output_dir: pathlib.PosixPath,\n", + " comparison_fields: typing.List[str],\n", + "):\n", + " # Secondary comparison DF\n", + " cbg_score_comparison_df = get_cbg_score_comparison_df(\n", + " df=df,\n", + " method_a_priority_census_block_groups_field=index_a.priority_communities_field,\n", + " method_b_priority_census_block_groups_field=index_b.priority_communities_field,\n", + " comparison_fields=comparison_fields,\n", + " )\n", + "\n", + " # Write secondary comparison to CSV.\n", + " file_name_part = (\n", + " f\"CBG Comparison Output - {index_a.method_name} and {index_b.method_name}\"\n", + " )\n", + " output_dir.mkdir(parents=True, exist_ok=True)\n", + " file_path = output_dir / (file_name_part + \".csv\")\n", + " file_path_xlsx = output_dir / (file_name_part + \".xlsx\")\n", + " \n", + " cbg_score_comparison_df.to_csv(\n", + " path_or_buf=file_path,\n", + " na_rep=\"\",\n", + " index=False,\n", + " )\n", + "\n", + " write_cbg_score_comparison_excel(\n", + " cbg_score_comparison_df=cbg_score_comparison_df, file_path=file_path_xlsx\n", + " )\n", + "\n", + "\n", + "comparison_fields = [\n", + " \"Percent of individuals < 100% Federal Poverty Line\",\n", + " \"Percent of individuals < 200% Federal Poverty Line\",\n", + " \"Median household income (% of AMI)\",\n", + " \"Percent of households in linguistic isolation\",\n", + " \"Percent individuals age 25 or over with less than high school degree\",\n", + " \"Linguistic isolation (percent)\",\n", + " \"Unemployed civilians (percent)\",\n", + " \"Median household income in the past 12 months\",\n", + "]\n", + "\n", + "for (index_a, index_b) in itertools.combinations(census_block_group_indices, 2):\n", + " print(f\"Comparing {index_a} and {index_b}.\")\n", + " compare_cbg_scores(\n", + " df=merged_df,\n", + " index_a=index_a,\n", + " index_b=index_b,\n", + " comparison_fields=comparison_fields,\n", + " output_dir=COMPARISON_OUTPUTS_DIR / \"cbg_score_comparisons\",\n", + " )" + ] + }, { "cell_type": "code", "execution_count": null,