Added tract grandfathering language to UI

This commit is contained in:
Carlos Felix 2024-12-05 16:31:53 -05:00 committed by Carlos Felix
parent d22c348504
commit a58edbc724
10 changed files with 49 additions and 23 deletions

View file

@ -1163,6 +1163,9 @@ const AreaDetail = ({properties}: IAreaDetailProps) => {
null
}
percentTractTribal={percentTractTribal}
isGrandfathered={
properties[constants.IS_GRANDFATHERED]
}
/>
<PrioritizationCopy2
totalCategoriesPrioritized={
@ -1189,9 +1192,11 @@ const AreaDetail = ({properties}: IAreaDetailProps) => {
</div>
</div>
{/* Only show the DonutCopy if Adjacency index is true and the total number of disadv ind == 0 */}
{/* Only show the DonutCopy if Adjacency index is true, the total number of disadv ind == 0,
and not grandfathered. */}
{properties[constants.ADJACENCY_EXCEEDS_THRESH] &&
properties[constants.TOTAL_NUMBER_OF_DISADVANTAGE_INDICATORS] === 0 && (
properties[constants.TOTAL_NUMBER_OF_DISADVANTAGE_INDICATORS] === 0 &&
!properties[constants.IS_GRANDFATHERED] && (
<DonutCopy
isAdjacent={properties[constants.ADJACENCY_EXCEEDS_THRESH]}
povertyBelow200Percentile={

View file

@ -10,6 +10,7 @@ interface IPrioritizationCopy {
isAdjacencyThreshMet: boolean,
isAdjacencyLowIncome: boolean,
isIslandLowIncome: boolean,
isGrandfathered: boolean,
tribalCountAK: number | null,
tribalCountUS: null, // when this signal is supported add number type
percentTractTribal: number | null
@ -38,6 +39,7 @@ const PrioritizationCopy =
totalBurdensPrioritized,
isAdjacencyThreshMet,
isAdjacencyLowIncome,
isGrandfathered,
isIslandLowIncome,
tribalCountAK,
tribalCountUS,
@ -57,8 +59,10 @@ const PrioritizationCopy =
} else if (isAdjacencyThreshMet && !isAdjacencyLowIncome) {
// if 1-2-1
if ( tribalCountAK === null && tribalCountUS === null) {
if (isGrandfathered) {
prioCopyRendered = EXPLORE_COPY.PRIORITIZATION_COPY.PRIO_GRANDFATHERED_LI;
// if 1-2-1-1
if (percentTractTribal === null) {
} else if (percentTractTribal === null) {
prioCopyRendered = EXPLORE_COPY.PRIORITIZATION_COPY.NOT_PRIO_SURR_LI;
// if 1-2-1-2
} else if (percentTractTribal === 0) {

View file

@ -71,6 +71,7 @@ export const TOTAL_NUMBER_OF_INDICATORS = "THRHLD";
export const COUNT_OF_CATEGORIES_DISADV = "CC";
export const SCORE_N_COMMUNITIES = "SN_C";
export const SCORE_N_TRIBAL = "SN_T";
export const IS_GRANDFATHERED = "SN_GRAND";
export const SIDE_PANEL_STATE = "UI_EXP";
export const SIDE_PANEL_STATE_VALUES = {

View file

@ -660,6 +660,14 @@ export const PRIORITIZATION_COPY = {
bold: boldFn,
}}
/>,
PRIO_GRANDFATHERED_LI: <FormattedMessage
id={'explore.map.page.side.panel.prio.copy.prio.grandfathered'}
defaultMessage={'This tract is considered disadvantaged because it was identified as disadvantaged in version 1.0 of the tool.'}
description={`Navigate to the explore the map page. Click on tract, The side panel will show This tract is considered disadvantaged. This tract is considered disadvantaged because it was identified as disadvantaged in version 1.0 of the tool.`}
values={{
bold: boldFn,
}}
/>,
};
export const getPrioNBurdenCopy = (burdens:string) => {

View file

@ -1427,6 +1427,10 @@
"defaultMessage": "The {numPoints} that are Federally Recognized Tribes in this tract are are {also} considered disadvantaged.",
"description": "Navigate to the explore the map page. Click on tract, The {numPoints} that are Federally Recognized Tribes in this tract ares are {also} considered disadvantaged."
},
"explore.map.page.side.panel.prio.copy.prio.grandfathered": {
"defaultMessage": "This tract is considered disadvantaged because it was identified as disadvantaged in version 1.0 of the tool.",
"description": "Navigate to the explore the map page. Click on tract, The side panel will show This tract is considered disadvantaged. This tract is considered disadvantaged because it was identified as disadvantaged in version 1.0 of the tool."
},
"explore.map.page.side.panel.prio.copy.prio.island.li": {
"defaultMessage": "This tract is considered disadvantaged because it meets the low income threshold <bold>AND</bold> is located in a U.S. Territory.",
"description": "Navigate to the explore the map page. Click on tract, The side panel will show This tract is considered disadvantaged. It is an island territory that meets an adjusted low income threshold."

View file

@ -356,6 +356,7 @@
"explore.map.page.side.panel.prio.copy.prio.akus": "Los {numAKpoints} pueblos nativos de Alaska y las {numUSpoints} tribus de esta zona que están reconocidas a nivel federal también se consideran desfavorecidos.",
"explore.map.page.side.panel.prio.copy.prio.anv": "Los {numAKpoints} pueblos nativos de Alaska y las tribus de esta zona que están reconocidas a nivel federal {also} se consideran desfavorecidos.",
"explore.map.page.side.panel.prio.copy.prio.donut": "Este distrito censal se considera desfavorecido. Está rodeado de distritos censales desfavorecidos <bold>Y</bold> cumple con el umbral ajustado de bajos ingresos. El ajuste no corresponde a ninguna de las categorías.",
"explore.map.page.side.panel.prio.copy.prio.grandfathered": "Este distrito censal se considera desfavorecido porque fue identificado como desfavorecido en la versión 1.0 de esta herramienta.",
"explore.map.page.side.panel.prio.copy.prio.frt": "Las tierras de las tribus reconocidas a nivel federal que cubren {amount} de esta extensión se consideran {also} desfavorecidas.",
"explore.map.page.side.panel.prio.copy.prio.frt.n.points": "Los {numPoints} que son tribus reconocidas a nivel federal en este distrito censal se consideran {also} desfavorecidos.",
"explore.map.page.side.panel.prio.copy.prio.n.burden": "Este distrito censal se considera desfavorecido porque cumple con el umbral de carga <bold>Y</bold> con el umbral socioeconómico asociado.",

View file

@ -282,6 +282,7 @@ TILES_SCORE_COLUMNS = {
# The NEW final score value INCLUDES the adjacency index.
field_names.FINAL_SCORE_N_BOOLEAN: "SN_C",
field_names.FINAL_SCORE_N_BOOLEAN_V1_0: "SN_C_V10",
field_names.GRANDFATHERED_N_COMMUNITIES_V1_0: "SN_GRAND",
field_names.IS_TRIBAL_DAC: "SN_T",
field_names.DIABETES_LOW_INCOME_FIELD: "DLI",
field_names.ASTHMA_LOW_INCOME_FIELD: "ALI",

View file

@ -1,23 +1,25 @@
These files are used as inputs to unit tests. Some notes in their creation is below.
# How to generate the sample data in this folder
The sample data in this folder can be easily generated by debugging the `data_pipeline/etl/score/etl_score_post.py` file
and exporting data using the debugger console. Examples of this exporting are below.
## Why in pickle format?
Exporting as a Pickle file keeps all the metadata about the columns including the data types. If we were to export as CSV then we will need
to code the data types in the test fixtures for all the columns for the comparison to be correct.
## Exporting the test data
First, verify the code works as expected before exporting the data. You will not be able to inspect the data exports as they are in binary.
You will be using the debugger to export the data. Note that it is best to export a small subset of the data for faster test execution.
### create_tile_data test
1. Place a breakpoint in `data_pipeline/etl/score/etl_score_post.py` in the `transform` method right after the call to
`_create_tile_data` and start the debugger running the Generate Post Score command (`generate-score-post`).
1. Partially export the `output_score_county_state_merged_df` and `self.output_score_tiles_df` data to a pickle file once the debugger pauses
at the breakpoint. Use these sample commands in the debugger console. Note that we are using head and tail to have territories in the sample data.
### create_tile_data_expected.pkl
1. Set a breakpoint in the `test_create_tile_data` method in `data_pipeline/etl/score/tests/test_score_post.py`
after the call to `_create_tile_data` and debug the test.
2. Extract a subset of the `output_tiles_df_actual` dataframe. Do not extract the whole score as the file
will be too big and the test will run slow. Also, you need to extract the same tracts that are in
the `create_tile_score_data_input.pkl` input data. For example, use the following command once the breakpoint is reached
to extract a few rows at the top and bottom of the score. This will some capture states and territories.
```python
import pandas as pd
pd.concat([output_tiles_df_actual.head(3), output_tiles_df_actual.tail(3)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl')
pd.concat([output_score_county_state_merged_df.head(3), output_score_county_state_merged_df.tail(4)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl')
pd.concat([self.output_score_tiles_df.head(3), self.output_score_tiles_df.tail(4)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_data_expected.pkl')
```
### create_tile_score_data_input.pkl
1. Set a breakpoint in the transform method in `data_pipeline/etl/score/etl_score_post.py` before the call to
`_create_tile_data` and run the post scoring.
2. Extract a subset of the `output_score_county_state_merged_df` dataframe. Do not extract the whole score as the file
will be too big and the test will run slow. For example, use the following command once the breakpoint is reached
to extract a few rows at the top and bottom of the score. This will some capture states and territories.
```python
pd.concat([output_score_county_state_merged_df.head(3), output_score_county_state_merged_df.tail(3)], ignore_index=True).to_pickle('data_pipeline/etl/score/tests/snapshots/create_tile_score_data_input.pkl')
```