mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-07-30 12:01:16 -07:00
fixing merge conflicts
This commit is contained in:
parent
3b150b5761
commit
07c4c030d3
266 changed files with 1868 additions and 1811 deletions
|
@ -5,18 +5,15 @@ import typing
|
|||
from typing import Optional
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.score.etl_utils import (
|
||||
compare_to_list_of_expected_state_fips_codes,
|
||||
)
|
||||
from data_pipeline.etl.score.schemas.datasets import DatasetsConfig
|
||||
from data_pipeline.utils import (
|
||||
load_yaml_dict_from_file,
|
||||
unzip_file_from_url,
|
||||
remove_all_from_dir,
|
||||
get_module_logger,
|
||||
)
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import load_yaml_dict_from_file
|
||||
from data_pipeline.utils import remove_all_from_dir
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import importlib
|
||||
import concurrent.futures
|
||||
import importlib
|
||||
import typing
|
||||
|
||||
from data_pipeline.etl.score.etl_score import ScoreETL
|
||||
|
|
|
@ -81,7 +81,7 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "he_heat"
|
||||
df_field_name: "EXTREME_HEAT_FIELD"
|
||||
long_name: "Summer days above 90F"
|
||||
long_name: "Summer days above 90F"
|
||||
field_type: float
|
||||
include_in_downloadable_files: true
|
||||
include_in_tiles: true
|
||||
|
@ -92,7 +92,7 @@ datasets:
|
|||
include_in_downloadable_files: true
|
||||
include_in_tiles: true
|
||||
- short_name: "he_green"
|
||||
long_name: "Percent impenetrable surface areas"
|
||||
long_name: "Percent impenetrable surface areas"
|
||||
df_field_name: "IMPENETRABLE_SURFACES_FIELD"
|
||||
field_type: float
|
||||
include_in_downloadable_files: true
|
||||
|
@ -110,7 +110,7 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "EBP_PFS"
|
||||
df_field_name: "REVISED_ENERGY_BURDEN_FIELD_NAME"
|
||||
long_name: "Energy burden"
|
||||
long_name: "Energy burden"
|
||||
field_type: float
|
||||
include_in_downloadable_files: true
|
||||
include_in_tiles: true
|
||||
|
@ -121,7 +121,7 @@ datasets:
|
|||
- short_name: "fuds_count"
|
||||
df_field_name: "ELIGIBLE_FUDS_COUNT_FIELD_NAME"
|
||||
long_name: "Count of eligible Formerly Used Defense Site (FUDS) properties centroids"
|
||||
description_short:
|
||||
description_short:
|
||||
"The number of FUDS marked as Eligible and Has Project in the tract."
|
||||
field_type: int64
|
||||
include_in_tiles: false
|
||||
|
@ -129,7 +129,7 @@ datasets:
|
|||
- short_name: "not_fuds_ct"
|
||||
df_field_name: "INELIGIBLE_FUDS_COUNT_FIELD_NAME"
|
||||
long_name: "Count of ineligible Formerly Used Defense Site (FUDS) properties centroids"
|
||||
description_short:
|
||||
description_short:
|
||||
"The number of FUDS marked as Ineligible or Project in the tract."
|
||||
field_type: int64
|
||||
include_in_tiles: false
|
||||
|
@ -137,7 +137,7 @@ datasets:
|
|||
- short_name: "has_fuds"
|
||||
df_field_name: "ELIGIBLE_FUDS_BINARY_FIELD_NAME"
|
||||
long_name: "Is there at least one Formerly Used Defense Site (FUDS) in the tract?"
|
||||
description_short:
|
||||
description_short:
|
||||
"Whether the tract has a FUDS"
|
||||
field_type: bool
|
||||
include_in_tiles: false
|
||||
|
@ -149,7 +149,7 @@ datasets:
|
|||
- short_name: "has_aml"
|
||||
df_field_name: "AML_BOOLEAN"
|
||||
long_name: "Is there at least one abandoned mine in this census tract?"
|
||||
description_short:
|
||||
description_short:
|
||||
"Whether the tract has an abandoned mine"
|
||||
field_type: bool
|
||||
include_in_tiles: true
|
||||
|
@ -161,7 +161,7 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "EXAMPLE_FIELD"
|
||||
df_field_name: "Input Field 1"
|
||||
long_name: "Example Field 1"
|
||||
long_name: "Example Field 1"
|
||||
field_type: float
|
||||
include_in_tiles: true
|
||||
include_in_downloadable_files: true
|
||||
|
@ -172,35 +172,35 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "flood_eligible_properties"
|
||||
df_field_name: "COUNT_PROPERTIES"
|
||||
long_name: "Count of properties eligible for flood risk calculation within tract (floor of 250)"
|
||||
long_name: "Count of properties eligible for flood risk calculation within tract (floor of 250)"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "flood_risk_properties_today"
|
||||
df_field_name: "PROPERTIES_AT_RISK_FROM_FLOODING_TODAY"
|
||||
long_name: "Count of properties at risk of flood today"
|
||||
long_name: "Count of properties at risk of flood today"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "flood_risk_properties_30yrs"
|
||||
df_field_name: "PROPERTIES_AT_RISK_FROM_FLOODING_IN_30_YEARS"
|
||||
long_name: "Count of properties at risk of flood in 30 years"
|
||||
long_name: "Count of properties at risk of flood in 30 years"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "flood_risk_share_today"
|
||||
df_field_name: "SHARE_OF_PROPERTIES_AT_RISK_FROM_FLOODING_TODAY"
|
||||
long_name: "Share of properties at risk of flood today"
|
||||
long_name: "Share of properties at risk of flood today"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: true
|
||||
- short_name: "flood_risk_share_30yrs"
|
||||
df_field_name: "SHARE_OF_PROPERTIES_AT_RISK_FROM_FLOODING_IN_30_YEARS"
|
||||
long_name: "Share of properties at risk of flood in 30 years"
|
||||
long_name: "Share of properties at risk of flood in 30 years"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
|
@ -212,35 +212,35 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "fire_eligible_properties"
|
||||
df_field_name: "COUNT_PROPERTIES"
|
||||
long_name: "Count of properties eligible for wildfire risk calculation within tract (floor of 250)"
|
||||
long_name: "Count of properties eligible for wildfire risk calculation within tract (floor of 250)"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "fire_risk_properties_today"
|
||||
df_field_name: "PROPERTIES_AT_RISK_FROM_FIRE_TODAY"
|
||||
long_name: "Count of properties at risk of wildfire today"
|
||||
long_name: "Count of properties at risk of wildfire today"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "fire_risk_properties_30yrs"
|
||||
df_field_name: "PROPERTIES_AT_RISK_FROM_FIRE_IN_30_YEARS"
|
||||
long_name: "Count of properties at risk of wildfire in 30 years"
|
||||
long_name: "Count of properties at risk of wildfire in 30 years"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "fire_risk_share_today"
|
||||
df_field_name: "SHARE_OF_PROPERTIES_AT_RISK_FROM_FIRE_TODAY"
|
||||
long_name: "Share of properties at risk of fire today"
|
||||
long_name: "Share of properties at risk of fire today"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: true
|
||||
- short_name: "fire_risk_share_30yrs"
|
||||
df_field_name: "SHARE_OF_PROPERTIES_AT_RISK_FROM_FIRE_IN_30_YEARS"
|
||||
long_name: "Share of properties at risk of fire in 30 years"
|
||||
long_name: "Share of properties at risk of fire in 30 years"
|
||||
field_type: float
|
||||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
|
@ -252,7 +252,7 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "travel_burden"
|
||||
df_field_name: "TRAVEL_BURDEN_FIELD_NAME"
|
||||
long_name: "DOT Travel Barriers Score"
|
||||
long_name: "DOT Travel Barriers Score"
|
||||
field_type: float
|
||||
include_in_tiles: true
|
||||
include_in_downloadable_files: true
|
||||
|
@ -264,28 +264,28 @@ datasets:
|
|||
load_fields:
|
||||
- short_name: "ncld_eligible"
|
||||
df_field_name: "ELIGIBLE_FOR_NATURE_DEPRIVED_FIELD_NAME"
|
||||
long_name: "Does the tract have at least 35 acres in it?"
|
||||
long_name: "Does the tract have at least 35 acres in it?"
|
||||
field_type: bool
|
||||
include_in_tiles: true
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
- short_name: "percent_impervious"
|
||||
df_field_name: "TRACT_PERCENT_IMPERVIOUS_FIELD_NAME"
|
||||
long_name: "Share of the tract's land area that is covered by impervious surface as a percent"
|
||||
long_name: "Share of the tract's land area that is covered by impervious surface as a percent"
|
||||
field_type: percentage
|
||||
include_in_tiles: true
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: true
|
||||
- short_name: "percent_nonnatural"
|
||||
df_field_name: "TRACT_PERCENT_NON_NATURAL_FIELD_NAME"
|
||||
long_name: "Share of the tract's land area that is covered by impervious surface or cropland as a percent"
|
||||
long_name: "Share of the tract's land area that is covered by impervious surface or cropland as a percent"
|
||||
field_type: percentage
|
||||
include_in_tiles: true
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: true
|
||||
- short_name: "percent_cropland"
|
||||
df_field_name: "TRACT_PERCENT_CROPLAND_FIELD_NAME"
|
||||
long_name: "Share of the tract's land area that is covered by cropland as a percent"
|
||||
long_name: "Share of the tract's land area that is covered by cropland as a percent"
|
||||
field_type: percentage
|
||||
include_in_tiles: true
|
||||
include_in_downloadable_files: true
|
||||
|
@ -328,4 +328,4 @@ datasets:
|
|||
include_in_tiles: false
|
||||
include_in_downloadable_files: true
|
||||
create_percentile: false
|
||||
create_reverse_percentile: true
|
||||
create_reverse_percentile: true
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
from pathlib import Path
|
||||
import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from data_pipeline.config import settings
|
||||
|
||||
from data_pipeline.score import field_names
|
||||
|
||||
## note: to keep map porting "right" fields, keeping descriptors the same.
|
||||
|
|
|
@ -1,31 +1,28 @@
|
|||
import functools
|
||||
from typing import List
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.score import constants
|
||||
from data_pipeline.etl.sources.census_acs.etl import CensusACSETL
|
||||
from data_pipeline.etl.sources.national_risk_index.etl import (
|
||||
NationalRiskIndexETL,
|
||||
)
|
||||
from data_pipeline.etl.sources.dot_travel_composite.etl import (
|
||||
TravelCompositeETL,
|
||||
)
|
||||
from data_pipeline.etl.sources.eamlis.etl import AbandonedMineETL
|
||||
from data_pipeline.etl.sources.fsf_flood_risk.etl import (
|
||||
FloodRiskETL,
|
||||
)
|
||||
from data_pipeline.etl.sources.eamlis.etl import AbandonedMineETL
|
||||
from data_pipeline.etl.sources.fsf_wildfire_risk.etl import WildfireRiskETL
|
||||
from data_pipeline.etl.sources.national_risk_index.etl import (
|
||||
NationalRiskIndexETL,
|
||||
)
|
||||
from data_pipeline.etl.sources.nlcd_nature_deprived.etl import NatureDeprivedETL
|
||||
from data_pipeline.etl.sources.tribal_overlap.etl import TribalOverlapETL
|
||||
from data_pipeline.etl.sources.us_army_fuds.etl import USArmyFUDS
|
||||
from data_pipeline.etl.sources.nlcd_nature_deprived.etl import NatureDeprivedETL
|
||||
from data_pipeline.etl.sources.fsf_wildfire_risk.etl import WildfireRiskETL
|
||||
from data_pipeline.score.score_runner import ScoreRunner
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.etl.score import constants
|
||||
|
||||
from data_pipeline.score.score_runner import ScoreRunner
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,24 +1,22 @@
|
|||
import concurrent.futures
|
||||
import math
|
||||
import os
|
||||
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
|
||||
from data_pipeline.content.schemas.download_schemas import CSVConfig
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.score import constants
|
||||
from data_pipeline.etl.score.etl_utils import check_score_data_source
|
||||
from data_pipeline.etl.sources.census.etl_utils import (
|
||||
check_census_data_source,
|
||||
)
|
||||
from data_pipeline.etl.score.etl_utils import check_score_data_source
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.content.schemas.download_schemas import CSVConfig
|
||||
from data_pipeline.utils import (
|
||||
get_module_logger,
|
||||
zip_files,
|
||||
load_yaml_dict_from_file,
|
||||
load_dict_from_yaml_object_fields,
|
||||
)
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import load_dict_from_yaml_object_fields
|
||||
from data_pipeline.utils import load_yaml_dict_from_file
|
||||
from data_pipeline.utils import zip_files
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,29 +1,25 @@
|
|||
from pathlib import Path
|
||||
import json
|
||||
from numpy import float64
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from data_pipeline.content.schemas.download_schemas import (
|
||||
CSVConfig,
|
||||
CodebookConfig,
|
||||
ExcelConfig,
|
||||
)
|
||||
|
||||
from data_pipeline.content.schemas.download_schemas import CodebookConfig
|
||||
from data_pipeline.content.schemas.download_schemas import CSVConfig
|
||||
from data_pipeline.content.schemas.download_schemas import ExcelConfig
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.score.etl_utils import floor_series, create_codebook
|
||||
from data_pipeline.utils import (
|
||||
get_module_logger,
|
||||
zip_files,
|
||||
load_yaml_dict_from_file,
|
||||
column_list_from_yaml_object_fields,
|
||||
load_dict_from_yaml_object_fields,
|
||||
)
|
||||
from data_pipeline.score import field_names
|
||||
|
||||
|
||||
from data_pipeline.etl.score.etl_utils import create_codebook
|
||||
from data_pipeline.etl.score.etl_utils import floor_series
|
||||
from data_pipeline.etl.sources.census.etl_utils import (
|
||||
check_census_data_source,
|
||||
)
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import column_list_from_yaml_object_fields
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import load_dict_from_yaml_object_fields
|
||||
from data_pipeline.utils import load_yaml_dict_from_file
|
||||
from data_pipeline.utils import zip_files
|
||||
from numpy import float64
|
||||
|
||||
from . import constants
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,24 +1,21 @@
|
|||
import os
|
||||
import sys
|
||||
import typing
|
||||
from pathlib import Path
|
||||
from collections import namedtuple
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.score.constants import (
|
||||
TILES_ISLAND_AREA_FIPS_CODES,
|
||||
TILES_PUERTO_RICO_FIPS_CODE,
|
||||
TILES_CONTINENTAL_US_FIPS_CODE,
|
||||
TILES_ALASKA_AND_HAWAII_FIPS_CODE,
|
||||
)
|
||||
from data_pipeline.etl.score.constants import TILES_ALASKA_AND_HAWAII_FIPS_CODE
|
||||
from data_pipeline.etl.score.constants import TILES_CONTINENTAL_US_FIPS_CODE
|
||||
from data_pipeline.etl.score.constants import TILES_ISLAND_AREA_FIPS_CODES
|
||||
from data_pipeline.etl.score.constants import TILES_PUERTO_RICO_FIPS_CODE
|
||||
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
|
||||
from data_pipeline.utils import (
|
||||
download_file_from_url,
|
||||
get_module_logger,
|
||||
)
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import download_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
from . import constants
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
@ -99,7 +96,7 @@ def floor_series(series: pd.Series, number_of_decimals: int) -> pd.Series:
|
|||
if series.isin(unacceptable_values).any():
|
||||
series.replace(mapping, regex=False, inplace=True)
|
||||
|
||||
multiplication_factor = 10 ** number_of_decimals
|
||||
multiplication_factor = 10**number_of_decimals
|
||||
|
||||
# In order to safely cast NaNs
|
||||
# First coerce series to float type: series.astype(float)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from dataclasses import dataclass, field
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import field
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class FieldType(Enum):
|
||||
|
|
|
@ -5,7 +5,8 @@ from pathlib import Path
|
|||
import pandas as pd
|
||||
import pytest
|
||||
from data_pipeline import config
|
||||
from data_pipeline.etl.score import etl_score_post, tests
|
||||
from data_pipeline.etl.score import etl_score_post
|
||||
from data_pipeline.etl.score import tests
|
||||
from data_pipeline.etl.score.etl_score_post import PostScoreETL
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
fips,state_name,state_abbreviation,region,division
|
||||
01,Alabama,AL,South,East South Central
|
||||
02,Alaska,AK,West,Pacific
|
||||
04,Arizona,AZ,West,Mountain
|
||||
04,Arizona,AZ,West,Mountain
|
||||
|
|
|
|
@ -1,11 +1,10 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pytest
|
||||
|
||||
from data_pipeline.etl.score.etl_utils import (
|
||||
floor_series,
|
||||
compare_to_list_of_expected_state_fips_codes,
|
||||
)
|
||||
from data_pipeline.etl.score.etl_utils import floor_series
|
||||
|
||||
|
||||
def test_floor_series():
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
# pylint: disable=W0212
|
||||
## Above disables warning about access to underscore-prefixed methods
|
||||
|
||||
from importlib import reload
|
||||
from pathlib import Path
|
||||
|
||||
import pandas.api.types as ptypes
|
||||
import pandas.testing as pdt
|
||||
from data_pipeline.content.schemas.download_schemas import (
|
||||
CSVConfig,
|
||||
)
|
||||
|
||||
from data_pipeline.etl.score import constants
|
||||
from data_pipeline.utils import load_yaml_dict_from_file
|
||||
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.config import settings
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
import pathlib
|
||||
from pathlib import Path
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.etl.score.etl_utils import (
|
||||
compare_to_list_of_expected_state_fips_codes,
|
||||
)
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger, download_file_from_url
|
||||
from data_pipeline.utils import download_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
import typing
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger, download_file_from_url
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import download_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ For SVI 2018, the authors also included two adjunct variables, 1) 2014-2018 ACS
|
|||
|
||||
**Important Notes**
|
||||
|
||||
1. Tracts with zero estimates for the total population (N = 645 for the U.S.) were removed during the ranking process. These tracts were added back to the SVI databases after ranking.
|
||||
1. Tracts with zero estimates for the total population (N = 645 for the U.S.) were removed during the ranking process. These tracts were added back to the SVI databases after ranking.
|
||||
|
||||
2. The TOTPOP field value is 0, but the percentile ranking fields (RPL_THEME1, RPL_THEME2, RPL_THEME3, RPL_THEME4, and RPL_THEMES) were set to -999.
|
||||
|
||||
|
@ -66,4 +66,4 @@ here: https://www.census.gov/programs-surveys/acs/data/variance-tables.html.
|
|||
|
||||
For selected ACS 5-year Detailed Tables, “Users can calculate margins of error for aggregated data by using the variance replicates. Unlike available approximation formulas, this method results in an exact margin of error by using the covariance term.”
|
||||
|
||||
MOEs are _not_ included nor considered during this data processing nor for the scoring comparison tool.
|
||||
MOEs are _not_ included nor considered during this data processing nor for the scoring comparison tool.
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -3,12 +3,12 @@ import json
|
|||
import subprocess
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
import geopandas as gpd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
|
||||
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -5,13 +5,11 @@ from pathlib import Path
|
|||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.utils import (
|
||||
get_module_logger,
|
||||
remove_all_dirs_from_dir,
|
||||
remove_files_from_dir,
|
||||
unzip_file_from_url,
|
||||
zip_directory,
|
||||
)
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import remove_all_dirs_from_dir
|
||||
from data_pipeline.utils import remove_files_from_dir
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
from data_pipeline.utils import zip_directory
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
from collections import namedtuple
|
||||
import os
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
from collections import namedtuple
|
||||
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.sources.census_acs.etl_utils import (
|
||||
retrieve_census_acs_data,
|
||||
)
|
||||
from data_pipeline.etl.sources.census_acs.etl_imputations import (
|
||||
calculate_income_measures,
|
||||
)
|
||||
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
from data_pipeline.etl.sources.census_acs.etl_utils import (
|
||||
retrieve_census_acs_data,
|
||||
)
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
from typing import Any, List, NamedTuple, Tuple
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import NamedTuple
|
||||
from typing import Tuple
|
||||
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import censusdata
|
||||
import pandas as pd
|
||||
|
||||
|
||||
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.sources.census_acs.etl_utils import (
|
||||
retrieve_census_acs_data,
|
||||
)
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import requests
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.utils import unzip_file_from_url, download_file_from_url
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import download_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
import json
|
||||
from typing import List
|
||||
import requests
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.score import field_names
|
||||
import requests
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
pd.options.mode.chained_assignment = "raise"
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
from pathlib import Path
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
from pathlib import Path
|
||||
import pandas as pd
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# DOT travel barriers
|
||||
|
||||
The below description is taken from DOT directly:
|
||||
The below description is taken from DOT directly:
|
||||
|
||||
Consistent with OMB’s Interim Guidance for the Justice40 Initiative, DOT’s interim definition of DACs includes (a) certain qualifying census tracts, (b) any Tribal land, or (c) any territory or possession of the United States. DOT has provided a mapping tool to assist applicants in identifying whether a project is located in a Disadvantaged Community, available at Transportation Disadvantaged Census Tracts (arcgis.com). A shapefile of the geospatial data is available Transportation Disadvantaged Census Tracts shapefile (version 2 .0, posted 5/10/22).
|
||||
|
||||
|
@ -13,4 +13,4 @@ The DOT interim definition for DACs was developed by an internal and external co
|
|||
Resilience disadvantage identifies communities vulnerable to hazards caused by climate change. (1)
|
||||
- Equity disadvantage identifies communities with a with a high percentile of persons (age 5+) who speak English "less than well." (1)
|
||||
|
||||
The CEJST uses only Transportation Access Disadvantage.
|
||||
The CEJST uses only Transportation Access Disadvantage.
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
# pylint: disable=unsubscriptable-object
|
||||
# pylint: disable=unsupported-assignment-operation
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
The following is the description from eAMLIS as of August 16, 2022.
|
||||
The following is the description from eAMLIS as of August 16, 2022.
|
||||
---
|
||||
|
||||
e-AMLIS is not a comprehensive database of all AML features or all AML grant activities. e-AMLIS is a national inventory that provides information about known abandoned mine land (AML) features including polluted waters. The majority of the data in e-AMLIS provides information about known coal AML features for the 25 states and 3 tribal SMCRA-approved AML Programs. e-AMLIS also provides limited information on non-coal AML features, and, non-coal reclamation projects as well as AML features for states and tribes that do not have an approved AML Program. Additionally, e-AMLIS only accounts for the direct construction cost to reclaim each AML feature that has been identified by states and Tribes. Other project costs such as planning, design, permitting, and construction oversight are not tracked in e-AMLIS.
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
from pathlib import Path
|
||||
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.etl.sources.geo_utils import add_tracts_for_geometries
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
@ -58,7 +57,6 @@ class EJSCREENAreasOfConcernETL(ExtractTransformLoad):
|
|||
|
||||
# TO DO: As a one off we did all the processing in a separate Notebook
|
||||
# Can add here later for a future PR
|
||||
pass
|
||||
|
||||
def load(self) -> None:
|
||||
if self.ejscreen_areas_of_concern_data_exists():
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
from pathlib import Path
|
||||
import pandas as pd
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
from pathlib import Path
|
||||
import pandas as pd
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# FSF flood risk data
|
||||
|
||||
Flood risk computed as 1 in 100 year flood zone
|
||||
Flood risk computed as 1 in 100 year flood zone
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
# pylint: disable=unsubscriptable-object
|
||||
# pylint: disable=unsupported-assignment-operation
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# FSF wildfire risk data
|
||||
|
||||
Fire risk computed as >= 0.003 burn risk probability
|
||||
Fire risk computed as >= 0.003 burn risk probability
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
# pylint: disable=unsubscriptable-object
|
||||
# pylint: disable=unsupported-assignment-operation
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
"""Utililities for turning geographies into tracts, using census data"""
|
||||
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from functools import lru_cache
|
||||
|
||||
import geopandas as gpd
|
||||
from data_pipeline.etl.sources.tribal.etl import TribalETL
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
from .census.etl import CensusETL
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.utils import (
|
||||
get_module_logger,
|
||||
unzip_file_from_url,
|
||||
)
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import pandas as pd
|
||||
from pandas.errors import EmptyDataError
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
from pandas.errors import EmptyDataError
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
import pandas as pd
|
||||
import requests
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.config import settings
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.score import field_names
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
@ -96,4 +95,3 @@ class MappingForEJETL(ExtractTransformLoad):
|
|||
|
||||
def validate(self) -> None:
|
||||
logger.info("Validating Mapping For EJ Data")
|
||||
pass
|
||||
|
|
|
@ -37,4 +37,4 @@ Oklahoma City,90R,D
|
|||
Milwaukee Co.,S-D1,D
|
||||
Milwaukee Co.,S-D2,D
|
||||
Milwaukee Co.,S-D3,D
|
||||
Milwaukee Co.,S-D4,D
|
||||
Milwaukee Co.,S-D4,D
|
||||
|
|
|
|
@ -1,10 +1,11 @@
|
|||
import pathlib
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import download_file_from_url, get_module_logger
|
||||
from data_pipeline.utils import download_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ According to the documentation:
|
|||
|
||||
There exist two data categories: Population Burden and Population Characteristics.
|
||||
|
||||
There are two indicators within Population Burden: Exposure, and Socioeconomic. Within Population Characteristics, there exist two indicators: Sensitive, Environmental Effects. Each respective indicator contains several relevant covariates, and an averaged score.
|
||||
There are two indicators within Population Burden: Exposure, and Socioeconomic. Within Population Characteristics, there exist two indicators: Sensitive, Environmental Effects. Each respective indicator contains several relevant covariates, and an averaged score.
|
||||
|
||||
The two "Pollution Burden" average scores are then averaged together and the result is multiplied by the average of the "Population Characteristics" categories to get the total EJ Score for each tract.
|
||||
|
||||
|
@ -20,4 +20,4 @@ Furthermore, it was determined that Bladensburg residents are at a higher risk o
|
|||
|
||||
Source:
|
||||
|
||||
Driver, A.; Mehdizadeh, C.; Bara-Garcia, S.; Bodenreider, C.; Lewis, J.; Wilson, S. Utilization of the Maryland Environmental Justice Screening Tool: A Bladensburg, Maryland Case Study. Int. J. Environ. Res. Public Health 2019, 16, 348.
|
||||
Driver, A.; Mehdizadeh, C.; Bara-Garcia, S.; Bodenreider, C.; Lewis, J.; Wilson, S. Utilization of the Maryland Environmental Justice Screening Tool: A Bladensburg, Maryland Case Study. Int. J. Environ. Res. Public Health 2019, 16, 348.
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
from glob import glob
|
||||
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -29,4 +29,4 @@ Sources:
|
|||
* Minnesota Pollution Control Agency. (2015, December 15). Environmental Justice Framework Report.
|
||||
Retrieved from https://www.pca.state.mn.us/sites/default/files/p-gen5-05.pdf.
|
||||
|
||||
* Faust, J., L. August, K. Bangia, V. Galaviz, J. Leichty, S. Prasad… and L. Zeise. (2017, January). Update to the California Communities Environmental Health Screening Tool CalEnviroScreen 3.0. Retrieved from OEHHA website: https://oehha.ca.gov/media/downloads/calenviroscreen/report/ces3report.pdf
|
||||
* Faust, J., L. August, K. Bangia, V. Galaviz, J. Leichty, S. Prasad… and L. Zeise. (2017, January). Update to the California Communities Environmental Health Screening Tool CalEnviroScreen 3.0. Retrieved from OEHHA website: https://oehha.ca.gov/media/downloads/calenviroscreen/report/ces3report.pdf
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -2,10 +2,9 @@
|
|||
# but it may be a known bug. https://github.com/PyCQA/pylint/issues/1498
|
||||
# pylint: disable=unsubscriptable-object
|
||||
# pylint: disable=unsupported-assignment-operation
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
The following dataset was compiled by TPL (Trust for Public Lands) using NCLD data. We define as: AREA - [CROPLAND] - [IMPERVIOUS SURFACES].
|
||||
|
||||
## Codebook
|
||||
## Codebook
|
||||
- GEOID10 – Census tract ID
|
||||
- SF – State Name
|
||||
- CF – County Name
|
||||
|
@ -13,7 +13,7 @@ The following dataset was compiled by TPL (Trust for Public Lands) using NCLD da
|
|||
- AcresCrops – Acres crops calculated by summing all cells in the NLCD Cropland Data Layer crop classes.
|
||||
- PctCrops – Formula: AcresCrops/TractAcres*100.
|
||||
- PctImperv – Mean imperviousness for each census tract.
|
||||
- CAVEAT: Where tracts extend into open water, mean imperviousness may be underestimated.
|
||||
- CAVEAT: Where tracts extend into open water, mean imperviousness may be underestimated.
|
||||
- __TO USE__ PctNatural – Formula: 100 – PctCrops – PctImperv.
|
||||
- PctNat90 – Tract in or below 10th percentile for PctNatural. 1 = True, 0 = False.
|
||||
- PctNatural 10th percentile = 28.6439%
|
||||
|
@ -24,7 +24,7 @@ The following dataset was compiled by TPL (Trust for Public Lands) using NCLD da
|
|||
- P200_PFS 65th percentile = 64.0%
|
||||
- NatureDep – ImpOrCrp = 1 AND LowInAndEd = 1.
|
||||
|
||||
We added `GEOID10_TRACT` before converting shapefile to csv.
|
||||
We added `GEOID10_TRACT` before converting shapefile to csv.
|
||||
|
||||
## Instructions to recreate
|
||||
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
# pylint: disable=unsubscriptable-object
|
||||
# pylint: disable=unsupported-assignment-operation
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
import functools
|
||||
import pandas as pd
|
||||
|
||||
import pandas as pd
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.utils import (
|
||||
get_module_logger,
|
||||
unzip_file_from_url,
|
||||
)
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
from pathlib import Path
|
||||
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import unzip_file_from_url
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
from pathlib import Path
|
||||
|
||||
from data_pipeline.utils import (
|
||||
get_module_logger,
|
||||
remove_all_from_dir,
|
||||
remove_files_from_dir,
|
||||
)
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.utils import remove_all_from_dir
|
||||
from data_pipeline.utils import remove_files_from_dir
|
||||
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.etl.sources.geo_utils import (
|
||||
add_tracts_for_geometries,
|
||||
get_tribal_geojson,
|
||||
get_tract_geojson,
|
||||
)
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.etl.sources.geo_utils import add_tracts_for_geometries
|
||||
from data_pipeline.etl.sources.geo_utils import get_tract_geojson
|
||||
from data_pipeline.etl.sources.geo_utils import get_tribal_geojson
|
||||
from data_pipeline.score import field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
from pathlib import Path
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
|
||||
from data_pipeline.utils import get_module_logger, download_file_from_url
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.base import ValidGeoLevel
|
||||
from data_pipeline.etl.sources.geo_utils import add_tracts_for_geometries
|
||||
from data_pipeline.utils import download_file_from_url
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue