diff --git a/client/src/components/AreaDetail/AreaDetail.tsx b/client/src/components/AreaDetail/AreaDetail.tsx index 2f3656b8..3d6d2667 100644 --- a/client/src/components/AreaDetail/AreaDetail.tsx +++ b/client/src/components/AreaDetail/AreaDetail.tsx @@ -1163,6 +1163,9 @@ const AreaDetail = ({properties}: IAreaDetailProps) => { null } percentTractTribal={percentTractTribal} + isGrandfathered={ + properties[constants.IS_GRANDFATHERED] + } /> { - {/* Only show the DonutCopy if Adjacency index is true and the total number of disadv ind == 0 */} + {/* Only show the DonutCopy if Adjacency index is true, the total number of disadv ind == 0, + and not grandfathered. */} {properties[constants.ADJACENCY_EXCEEDS_THRESH] && - properties[constants.TOTAL_NUMBER_OF_DISADVANTAGE_INDICATORS] === 0 && ( + properties[constants.TOTAL_NUMBER_OF_DISADVANTAGE_INDICATORS] === 0 && + !properties[constants.IS_GRANDFATHERED] && ( , + PRIO_GRANDFATHERED_LI: , }; export const getPrioNBurdenCopy = (burdens:string) => { diff --git a/client/src/intl/en.json b/client/src/intl/en.json index faec8fff..7a0b7a38 100644 --- a/client/src/intl/en.json +++ b/client/src/intl/en.json @@ -1427,6 +1427,10 @@ "defaultMessage": "The {numPoints} that are Federally Recognized Tribes in this tract are are {also} considered disadvantaged.", "description": "Navigate to the explore the map page. Click on tract, The {numPoints} that are Federally Recognized Tribes in this tract ares are {also} considered disadvantaged." }, + "explore.map.page.side.panel.prio.copy.prio.grandfathered": { + "defaultMessage": "This tract is considered disadvantaged because it was identified as disadvantaged in version 1.0 of the tool.", + "description": "Navigate to the explore the map page. Click on tract, The side panel will show This tract is considered disadvantaged. This tract is considered disadvantaged because it was identified as disadvantaged in version 1.0 of the tool." + }, "explore.map.page.side.panel.prio.copy.prio.island.li": { "defaultMessage": "This tract is considered disadvantaged because it meets the low income threshold AND is located in a U.S. Territory.", "description": "Navigate to the explore the map page. Click on tract, The side panel will show This tract is considered disadvantaged. It is an island territory that meets an adjusted low income threshold." diff --git a/client/src/intl/es.json b/client/src/intl/es.json index 7bd2b14d..06ccbe5b 100644 --- a/client/src/intl/es.json +++ b/client/src/intl/es.json @@ -356,6 +356,7 @@ "explore.map.page.side.panel.prio.copy.prio.akus": "Los {numAKpoints} pueblos nativos de Alaska y las {numUSpoints} tribus de esta zona que están reconocidas a nivel federal también se consideran desfavorecidos.", "explore.map.page.side.panel.prio.copy.prio.anv": "Los {numAKpoints} pueblos nativos de Alaska y las tribus de esta zona que están reconocidas a nivel federal {also} se consideran desfavorecidos.", "explore.map.page.side.panel.prio.copy.prio.donut": "Este distrito censal se considera desfavorecido. Está rodeado de distritos censales desfavorecidos Y cumple con el umbral ajustado de bajos ingresos. El ajuste no corresponde a ninguna de las categorías.", + "explore.map.page.side.panel.prio.copy.prio.grandfathered": "Este distrito censal se considera desfavorecido porque fue identificado como desfavorecido en la versión 1.0 de esta herramienta.", "explore.map.page.side.panel.prio.copy.prio.frt": "Las tierras de las tribus reconocidas a nivel federal que cubren {amount} de esta extensión se consideran {also} desfavorecidas.", "explore.map.page.side.panel.prio.copy.prio.frt.n.points": "Los {numPoints} que son tribus reconocidas a nivel federal en este distrito censal se consideran {also} desfavorecidos.", "explore.map.page.side.panel.prio.copy.prio.n.burden": "Este distrito censal se considera desfavorecido porque cumple con el umbral de carga Y con el umbral socioeconómico asociado.", diff --git a/data/data-pipeline/data_pipeline/etl/score/constants.py b/data/data-pipeline/data_pipeline/etl/score/constants.py index 964a76f3..acbee81c 100644 --- a/data/data-pipeline/data_pipeline/etl/score/constants.py +++ b/data/data-pipeline/data_pipeline/etl/score/constants.py @@ -282,6 +282,7 @@ TILES_SCORE_COLUMNS = { # The NEW final score value INCLUDES the adjacency index. field_names.FINAL_SCORE_N_BOOLEAN: "SN_C", field_names.FINAL_SCORE_N_BOOLEAN_V1_0: "SN_C_V10", + field_names.GRANDFATHERED_N_COMMUNITIES_V1_0: "SN_GRAND", field_names.IS_TRIBAL_DAC: "SN_T", field_names.DIABETES_LOW_INCOME_FIELD: "DLI", field_names.ASTHMA_LOW_INCOME_FIELD: "ALI", diff --git a/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/README.md b/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/README.md index 01ae8488..4c2693ee 100644 --- a/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/README.md +++ b/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/README.md @@ -1,23 +1,25 @@ -These files are used as inputs to unit tests. Some notes in their creation is below. +# How to generate the sample data in this folder + +The sample data in this folder can be easily generated by debugging the `data_pipeline/etl/score/etl_score_post.py` file +and exporting data using the debugger console. Examples of this exporting are below. + +## Why in pickle format? + +Exporting as a Pickle file keeps all the metadata about the columns including the data types. If we were to export as CSV then we will need +to code the data types in the test fixtures for all the columns for the comparison to be correct. + +## Exporting the test data + +First, verify the code works as expected before exporting the data. You will not be able to inspect the data exports as they are in binary. +You will be using the debugger to export the data. Note that it is best to export a small subset of the data for faster test execution. + +### create_tile_data test +1. Place a breakpoint in `data_pipeline/etl/score/etl_score_post.py` in the `transform` method right after the call to +`_create_tile_data` and start the debugger running the Generate Post Score command (`generate-score-post`). +1. Partially export the `output_score_county_state_merged_df` and `self.output_score_tiles_df` data to a pickle file once the debugger pauses +at the breakpoint. Use these sample commands in the debugger console. Note that we are using head and tail to have territories in the sample data. -### create_tile_data_expected.pkl -1. Set a breakpoint in the `test_create_tile_data` method in `data_pipeline/etl/score/tests/test_score_post.py` -after the call to `_create_tile_data` and debug the test. -2. Extract a subset of the `output_tiles_df_actual` dataframe. Do not extract the whole score as the file -will be too big and the test will run slow. Also, you need to extract the same tracts that are in -the `create_tile_score_data_input.pkl` input data. For example, use the following command once the breakpoint is reached -to extract a few rows at the top and bottom of the score. This will some capture states and territories. ```python -import pandas as pd -pd.concat([output_tiles_df_actual.head(3), output_tiles_df_actual.tail(3)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl') +pd.concat([output_score_county_state_merged_df.head(3), output_score_county_state_merged_df.tail(4)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl') +pd.concat([self.output_score_tiles_df.head(3), self.output_score_tiles_df.tail(4)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl') ``` - -### create_tile_score_data_input.pkl -1. Set a breakpoint in the transform method in `data_pipeline/etl/score/etl_score_post.py` before the call to -`_create_tile_data` and run the post scoring. -2. Extract a subset of the `output_score_county_state_merged_df` dataframe. Do not extract the whole score as the file -will be too big and the test will run slow. For example, use the following command once the breakpoint is reached -to extract a few rows at the top and bottom of the score. This will some capture states and territories. -```python -pd.concat([output_score_county_state_merged_df.head(3), output_score_county_state_merged_df.tail(3)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl') -``` \ No newline at end of file diff --git a/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl b/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl index 3257e33c..387230d7 100644 Binary files a/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl and b/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl differ diff --git a/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl b/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl index 8d21a5b9..3458d992 100644 Binary files a/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl and b/data/data-pipeline/data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl differ