fixing merge conflicts

This commit is contained in:
lucasmbrown-usds 2022-09-30 13:43:31 -04:00
commit 07c4c030d3
266 changed files with 1868 additions and 1811 deletions

1
.github/CODEOWNERS vendored
View file

@ -1,2 +1 @@
* @esfoobar-usds @vim-usds @emma-nechamkin @mattbowen-usds

View file

@ -13,4 +13,3 @@ Los mantenedores del proyecto tienen el derecho y la obligación de eliminar, ed
Los casos de abuso, acoso o de otro comportamiento inaceptable se pueden denunciar abriendo un problema o contactando con uno o más de los mantenedores del proyecto en justice40open@usds.gov.
Este Código de conducta es una adaptación de la versión 1.0.0 del Convenio del colaborador ([Contributor Covenant](http://contributor-covenant.org), *en inglés*) disponible en el sitio http://contributor-covenant.org/version/1/0/0/ *(en inglés)*.

View file

@ -43,4 +43,3 @@ Si desea colaborar con alguna parte del código base, bifurque el repositorio si
* Al menos un revisor autorizado debe aprobar la confirmación (en [CODEOWNERS](https://github.com/usds/justice40-tool/tree/main/.github/CODEOWNERS), en inglés, consulte la lista más reciente de estos revisores).
* Todas las verificaciones de estado obligatorias deben ser aprobadas.
Si hay un desacuerdo importante entre los integrantes del equipo, se organizará una reunión con el fin de determinar el plan de acción para la solicitud de incorporación de cambios.

View file

@ -28,4 +28,3 @@ Por estos u otros propósitos y motivos, y sin ninguna expectativa de otra consi
c. El Afirmante excluye la responsabilidad de los derechos de compensación de otras personas que se puedan aplicar a la Obra o a cualquier uso de esta, incluidos, entre otros, los Derechos de Autor y Derechos Conexos de cualquier persona sobre la Obra. Además, el Afirmante excluye la responsabilidad de obtener los consentimientos o permisos u otros derechos necesarios que se exijan para cualquier uso de la Obra.
d. El Afirmante entiende y reconoce que Creative Commons no es una parte en este documento y que no tiene ningún derecho u obligación con respecto a esta CC0 o al uso de la Obra.

View file

@ -62,4 +62,3 @@ describe('Does the map zoom and adjust to lat/long correctly?', () => {
// });
// });
});

View file

@ -11,4 +11,3 @@ Feature: All links on Public Eng page are functional
# When I look for the "Apr 15 Reg Link" CTA
# And I click on the "Apr 15 Reg Link" event
# Then the link should respond successfully

View file

@ -13,4 +13,3 @@
export const hyphenizeString = (string) => {
return string.split(/\.| /).join('-').toLowerCase();
};

View file

@ -1,4 +1,3 @@
import AreaDetail from './AreaDetail';
export default AreaDetail;

View file

@ -65,4 +65,3 @@ describe('rendering of the AreaDetail', () => {
expect(asFragment()).toMatchSnapshot();
});
});

View file

@ -11,4 +11,3 @@ declare const DisadvantagedDotModule: DisadvantagedDotNamespace.IDisadvantagedDo
};
export = DisadvantagedDotModule;

View file

@ -10,4 +10,3 @@ declare const DownloadLinkModule: DownloadLinkNamespace.IDownloadLink & {
};
export = DownloadLinkModule;

View file

@ -35,4 +35,3 @@
display: none;
}
}

View file

@ -17,4 +17,3 @@
width: 90%;
z-index: 1;
}

View file

@ -10,4 +10,3 @@ declare const MapSearchModuleScssModule: MapSearchModuleScssNamespace.IMapSearch
};
export = MapSearchModuleScssModule;

View file

@ -11,4 +11,3 @@ declare const MapSearchMessageModuleScssModule: MapSearchMessageModuleScssNamesp
};
export = MapSearchMessageModuleScssModule;

View file

@ -18,6 +18,3 @@
top: 100px;
}

View file

@ -11,4 +11,3 @@ TerritoryFocusControlModuleScssNamespace.ITerritoryFocusControlModuleScss & {
};
export = TerritoryFocusControlModuleScssModule;

View file

@ -89,4 +89,3 @@ const TerritoryFocusControl = ({onClick}: ITerritoryFocusControl) => {
};
export default TerritoryFocusControl;

View file

@ -10,4 +10,3 @@ declare const ZoomWarningModuleScssModule: ZoomWarningModuleScssNamespace.IZoomW
};
export = ZoomWarningModuleScssModule;

View file

@ -198,4 +198,3 @@ export const GET_INVOLVED_COMMENTS = {
}}
/>,
};

View file

@ -261,5 +261,3 @@ export const CONSOLE_ERROR = defineMessages({
description: 'Navigate to the about page. This is console error staging URL',
},
});

View file

@ -97,5 +97,3 @@ export const CENSUS_TRACT_FEEDBACK = {
}}
/>,
};

View file

@ -176,4 +176,3 @@ export const getOSBaseMap = () : Style => {
],
};
};

View file

@ -252,4 +252,3 @@ export const makeMapStyle = () : Style => {
],
};
};

View file

@ -21,5 +21,3 @@ fs.writeFile('esNoNesting.json', fixedEsString, (err) => {
}
console.log('JSON data is saved.');
});

View file

@ -1,31 +1,27 @@
from subprocess import call
import sys
import click
from subprocess import call
import click
from data_pipeline.config import settings
from data_pipeline.etl.runner import (
etl_runner,
score_generate,
score_geo,
score_post,
)
from data_pipeline.etl.runner import etl_runner
from data_pipeline.etl.runner import score_generate
from data_pipeline.etl.runner import score_geo
from data_pipeline.etl.runner import score_post
from data_pipeline.etl.sources.census.etl_utils import check_census_data_source
from data_pipeline.etl.sources.census.etl_utils import (
check_census_data_source,
reset_data_directories as census_reset,
zip_census_data,
)
from data_pipeline.etl.sources.census.etl_utils import zip_census_data
from data_pipeline.etl.sources.tribal.etl_utils import (
reset_data_directories as tribal_reset,
)
from data_pipeline.tile.generate import generate_tiles
from data_pipeline.utils import (
data_folder_cleanup,
get_module_logger,
score_folder_cleanup,
downloadable_cleanup,
temp_folder_cleanup,
check_first_run,
)
from data_pipeline.utils import check_first_run
from data_pipeline.utils import data_folder_cleanup
from data_pipeline.utils import downloadable_cleanup
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import score_folder_cleanup
from data_pipeline.utils import temp_folder_cleanup
logger = get_module_logger(__name__)
@ -36,8 +32,6 @@ dataset_cli_help = "Grab the data from either 'local' for local access or 'aws'
def cli():
"""Defines a click group for the commands below"""
pass
@cli.command(help="Clean up all census data folders")
def census_cleanup():

View file

@ -12,12 +12,12 @@ To see more: https://buildmedia.readthedocs.org/media/pdf/papermill/latest/paper
To run:
` $ python src/run_tract_comparison.py --template_notebook=TEMPLATE.ipynb --parameter_yaml=PARAMETERS.yaml`
"""
import os
import datetime
import argparse
import yaml
import datetime
import os
import papermill as pm
import yaml
def _read_param_file(param_file: str) -> dict:

View file

@ -1,9 +1,9 @@
import pathlib
import pandas as pd
import xlsxwriter
from data_pipeline.score import field_names
from data_pipeline.etl.sources.census.etl_utils import get_state_information
from data_pipeline.score import field_names
# Some excel parameters
DEFAULT_COLUMN_WIDTH = 18

View file

@ -1,8 +1,7 @@
import pathlib
from dynaconf import Dynaconf
import data_pipeline
from dynaconf import Dynaconf
settings = Dynaconf(
envvar_prefix="DYNACONF",

View file

@ -386,6 +386,72 @@ fields:
- score_name: Greater than or equal to the 90th percentile for low median household income as a percent of area median income and has low HS education in 2009 (island areas)?
label: Greater than or equal to the 90th percentile for low median household income as a percent of area median income and has low HS education in 2009 (island areas)?
format: bool
- score_name: Share of properties at risk of flood in 30 years
label: Share of properties at risk of flood in 30 years
format: percentage
- score_name: Share of properties at risk of fire in 30 years
label: Share of properties at risk of fire in 30 years
format: percentage
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years and is low income?
label: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years and is low income?
format: bool
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years and is low income?
label: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years and is low income?
format: bool
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years
label: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years
format: bool
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years
label: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years
format: bool
- score_name: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent and is low income?
label: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent and is low income?
format: bool
- score_name: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent
label: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent
format: bool
- score_name: Share of the tract's land area that is covered by impervious surface or cropland as a percent
label: Share of the tract's land area that is covered by impervious surface or cropland as a percent
format: percentage
- score_name: Share of the tract's land area that is covered by impervious surface or cropland as a percent (percentile)
label: Share of the tract's land area that is covered by impervious surface or cropland as a percent (percentile)
format: percentage
- score_name: Share of properties at risk of flood in 30 years (percentile)
label: Share of properties at risk of flood in 30 years (percentile)
format: percentage
- score_name: Share of properties at risk of fire in 30 years (percentile)
label: Share of properties at risk of fire in 30 years (percentile)
format: percentage
- score_name: Does the tract have at least 35 acres in it?
label: Does the tract have at least 35 acres in it?
format: bool
- score_name: Is there at least one Formerly Used Defense Site (FUDS) in the tract?
label: Is there at least one Formerly Used Defense Site (FUDS) in the tract?
format: bool
- score_name: Is there at least one abandoned mine in this census tract?
label: Is there at least one abandoned mine in this census tract?
format: bool
- score_name: Is there at least one Formerly Used Defense Site (FUDS) in the tract, where missing data is treated as False?
label: Is there at least one Formerly Used Defense Site (FUDS) in the tract, where missing data is treated as False?
format: bool
- score_name: Is there at least one abandoned mine in this census tract, where missing data is treated as False?
label: Is there at least one abandoned mine in this census tract, where missing data is treated as False?
format: bool
- score_name: There is at least one abandoned mine in this census tract and the tract is low income.
label: There is at least one abandoned mine in this census tract and the tract is low income.
format: bool
- score_name: There is at least one Formerly Used Defense Site (FUDS) in the tract and the tract is low income.
label: There is at least one Formerly Used Defense Site (FUDS) in the tract and the tract is low income.
format: bool
- score_name: Tract-level redlining score meets or exceeds 3.25 and is low income
label: Tract experienced historic underinvestment and remains low income
format: bool
- score_name: Tract-level redlining score meets or exceeds 3.25
label: Tract experienced historic underinvestment
format: bool
- score_name: Income data has been estimated based on neighbor income
label: Income data has been estimated based on geographic neighbor income
format: bool
- score_name: Number of Tribal areas within Census tract
label: Number of Tribal areas within Census tract
format: int64

View file

@ -390,6 +390,72 @@ sheets:
- score_name: Greater than or equal to the 90th percentile for low median household income as a percent of area median income and has low HS education in 2009 (island areas)?
label: Greater than or equal to the 90th percentile for low median household income as a percent of area median income and has low HS education in 2009 (island areas)?
format: bool
- score_name: Share of properties at risk of flood in 30 years
label: Share of properties at risk of flood in 30 years
format: percentage
- score_name: Share of properties at risk of fire in 30 years
label: Share of properties at risk of fire in 30 years
format: percentage
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years and is low income?
label: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years and is low income?
format: bool
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years and is low income?
label: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years and is low income?
format: bool
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years
label: Greater than or equal to the 90th percentile for share of properties at risk of flood in 30 years
format: bool
- score_name: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years
label: Greater than or equal to the 90th percentile for share of properties at risk of fire in 30 years
format: bool
- score_name: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent and is low income?
label: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent and is low income?
format: bool
- score_name: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent
label: Greater than or equal to the 90th percentile for share of the tract's land area that is covered by impervious surface or cropland as a percent
format: bool
- score_name: Share of the tract's land area that is covered by impervious surface or cropland as a percent
label: Share of the tract's land area that is covered by impervious surface or cropland as a percent
format: percentage
- score_name: Share of the tract's land area that is covered by impervious surface or cropland as a percent (percentile)
label: Share of the tract's land area that is covered by impervious surface or cropland as a percent (percentile)
format: percentage
- score_name: Share of properties at risk of flood in 30 years (percentile)
label: Share of properties at risk of flood in 30 years (percentile)
format: percentage
- score_name: Share of properties at risk of fire in 30 years (percentile)
label: Share of properties at risk of fire in 30 years (percentile)
format: percentage
- score_name: Does the tract have at least 35 acres in it?
label: Does the tract have at least 35 acres in it?
format: bool
- score_name: Is there at least one Formerly Used Defense Site (FUDS) in the tract?
label: Is there at least one Formerly Used Defense Site (FUDS) in the tract?
format: bool
- score_name: Is there at least one abandoned mine in this census tract?
label: Is there at least one abandoned mine in this census tract?
format: bool
- score_name: There is at least one abandoned mine in this census tract and the tract is low income.
label: There is at least one abandoned mine in this census tract and the tract is low income.
format: bool
- score_name: There is at least one Formerly Used Defense Site (FUDS) in the tract and the tract is low income.
label: There is at least one Formerly Used Defense Site (FUDS) in the tract and the tract is low income.
format: bool
- score_name: Is there at least one Formerly Used Defense Site (FUDS) in the tract, where missing data is treated as False?
label: Is there at least one Formerly Used Defense Site (FUDS) in the tract, where missing data is treated as False?
format: bool
- score_name: Is there at least one abandoned mine in this census tract, where missing data is treated as False?
label: Is there at least one abandoned mine in this census tract, where missing data is treated as False?
format: bool
- score_name: Tract-level redlining score meets or exceeds 3.25 and is low income
label: Tract experienced historic underinvestment and remains low income
format: bool
- score_name: Tract-level redlining score meets or exceeds 3.25
label: Tract experienced historic underinvestment
format: bool
- score_name: Income data has been estimated based on neighbor income
label: Income data has been estimated based on geographic neighbor income
format: bool
- score_name: Number of Tribal areas within Census tract
label: Number of Tribal areas within Census tract
format: int64

View file

@ -1,6 +1,8 @@
from dataclasses import dataclass, field
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from typing import List, Optional
from typing import List
from typing import Optional
class FieldType(Enum):

View file

@ -5,18 +5,15 @@ import typing
from typing import Optional
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.score.etl_utils import (
compare_to_list_of_expected_state_fips_codes,
)
from data_pipeline.etl.score.schemas.datasets import DatasetsConfig
from data_pipeline.utils import (
load_yaml_dict_from_file,
unzip_file_from_url,
remove_all_from_dir,
get_module_logger,
)
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import load_yaml_dict_from_file
from data_pipeline.utils import remove_all_from_dir
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,5 +1,5 @@
import importlib
import concurrent.futures
import importlib
import typing
from data_pipeline.etl.score.etl_score import ScoreETL

View file

@ -1,8 +1,7 @@
from pathlib import Path
import datetime
from pathlib import Path
from data_pipeline.config import settings
from data_pipeline.score import field_names
## note: to keep map porting "right" fields, keeping descriptors the same.

View file

@ -1,31 +1,28 @@
import functools
from typing import List
from dataclasses import dataclass
from typing import List
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.score import constants
from data_pipeline.etl.sources.census_acs.etl import CensusACSETL
from data_pipeline.etl.sources.national_risk_index.etl import (
NationalRiskIndexETL,
)
from data_pipeline.etl.sources.dot_travel_composite.etl import (
TravelCompositeETL,
)
from data_pipeline.etl.sources.eamlis.etl import AbandonedMineETL
from data_pipeline.etl.sources.fsf_flood_risk.etl import (
FloodRiskETL,
)
from data_pipeline.etl.sources.eamlis.etl import AbandonedMineETL
from data_pipeline.etl.sources.fsf_wildfire_risk.etl import WildfireRiskETL
from data_pipeline.etl.sources.national_risk_index.etl import (
NationalRiskIndexETL,
)
from data_pipeline.etl.sources.nlcd_nature_deprived.etl import NatureDeprivedETL
from data_pipeline.etl.sources.tribal_overlap.etl import TribalOverlapETL
from data_pipeline.etl.sources.us_army_fuds.etl import USArmyFUDS
from data_pipeline.etl.sources.nlcd_nature_deprived.etl import NatureDeprivedETL
from data_pipeline.etl.sources.fsf_wildfire_risk.etl import WildfireRiskETL
from data_pipeline.score.score_runner import ScoreRunner
from data_pipeline.score import field_names
from data_pipeline.etl.score import constants
from data_pipeline.score.score_runner import ScoreRunner
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,24 +1,22 @@
import concurrent.futures
import math
import os
import geopandas as gpd
import numpy as np
import pandas as pd
import geopandas as gpd
from data_pipeline.content.schemas.download_schemas import CSVConfig
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.score import constants
from data_pipeline.etl.score.etl_utils import check_score_data_source
from data_pipeline.etl.sources.census.etl_utils import (
check_census_data_source,
)
from data_pipeline.etl.score.etl_utils import check_score_data_source
from data_pipeline.score import field_names
from data_pipeline.content.schemas.download_schemas import CSVConfig
from data_pipeline.utils import (
get_module_logger,
zip_files,
load_yaml_dict_from_file,
load_dict_from_yaml_object_fields,
)
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import load_dict_from_yaml_object_fields
from data_pipeline.utils import load_yaml_dict_from_file
from data_pipeline.utils import zip_files
logger = get_module_logger(__name__)

View file

@ -1,29 +1,25 @@
from pathlib import Path
import json
from numpy import float64
from pathlib import Path
import numpy as np
import pandas as pd
from data_pipeline.content.schemas.download_schemas import (
CSVConfig,
CodebookConfig,
ExcelConfig,
)
from data_pipeline.content.schemas.download_schemas import CodebookConfig
from data_pipeline.content.schemas.download_schemas import CSVConfig
from data_pipeline.content.schemas.download_schemas import ExcelConfig
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.score.etl_utils import floor_series, create_codebook
from data_pipeline.utils import (
get_module_logger,
zip_files,
load_yaml_dict_from_file,
column_list_from_yaml_object_fields,
load_dict_from_yaml_object_fields,
)
from data_pipeline.score import field_names
from data_pipeline.etl.score.etl_utils import create_codebook
from data_pipeline.etl.score.etl_utils import floor_series
from data_pipeline.etl.sources.census.etl_utils import (
check_census_data_source,
)
from data_pipeline.score import field_names
from data_pipeline.utils import column_list_from_yaml_object_fields
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import load_dict_from_yaml_object_fields
from data_pipeline.utils import load_yaml_dict_from_file
from data_pipeline.utils import zip_files
from numpy import float64
from . import constants
logger = get_module_logger(__name__)

View file

@ -1,24 +1,21 @@
import os
import sys
import typing
from pathlib import Path
from collections import namedtuple
from pathlib import Path
import numpy as np
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.score.constants import (
TILES_ISLAND_AREA_FIPS_CODES,
TILES_PUERTO_RICO_FIPS_CODE,
TILES_CONTINENTAL_US_FIPS_CODE,
TILES_ALASKA_AND_HAWAII_FIPS_CODE,
)
from data_pipeline.etl.score.constants import TILES_ALASKA_AND_HAWAII_FIPS_CODE
from data_pipeline.etl.score.constants import TILES_CONTINENTAL_US_FIPS_CODE
from data_pipeline.etl.score.constants import TILES_ISLAND_AREA_FIPS_CODES
from data_pipeline.etl.score.constants import TILES_PUERTO_RICO_FIPS_CODE
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
from data_pipeline.utils import (
download_file_from_url,
get_module_logger,
)
from data_pipeline.score import field_names
from data_pipeline.utils import download_file_from_url
from data_pipeline.utils import get_module_logger
from . import constants
logger = get_module_logger(__name__)
@ -99,7 +96,7 @@ def floor_series(series: pd.Series, number_of_decimals: int) -> pd.Series:
if series.isin(unacceptable_values).any():
series.replace(mapping, regex=False, inplace=True)
multiplication_factor = 10 ** number_of_decimals
multiplication_factor = 10**number_of_decimals
# In order to safely cast NaNs
# First coerce series to float type: series.astype(float)

View file

@ -1,6 +1,8 @@
from dataclasses import dataclass, field
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from typing import List, Optional
from typing import List
from typing import Optional
class FieldType(Enum):

View file

@ -5,7 +5,8 @@ from pathlib import Path
import pandas as pd
import pytest
from data_pipeline import config
from data_pipeline.etl.score import etl_score_post, tests
from data_pipeline.etl.score import etl_score_post
from data_pipeline.etl.score import tests
from data_pipeline.etl.score.etl_score_post import PostScoreETL

View file

@ -1,11 +1,10 @@
import pandas as pd
import numpy as np
import pandas as pd
import pytest
from data_pipeline.etl.score.etl_utils import (
floor_series,
compare_to_list_of_expected_state_fips_codes,
)
from data_pipeline.etl.score.etl_utils import floor_series
def test_floor_series():

View file

@ -1,14 +1,13 @@
# pylint: disable=W0212
## Above disables warning about access to underscore-prefixed methods
from importlib import reload
from pathlib import Path
import pandas.api.types as ptypes
import pandas.testing as pdt
from data_pipeline.content.schemas.download_schemas import (
CSVConfig,
)
from data_pipeline.etl.score import constants
from data_pipeline.utils import load_yaml_dict_from_file

View file

@ -1,8 +1,7 @@
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.config import settings
logger = get_module_logger(__name__)

View file

@ -1,13 +1,15 @@
import pathlib
from pathlib import Path
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.etl.score.etl_utils import (
compare_to_list_of_expected_state_fips_codes,
)
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger, download_file_from_url
from data_pipeline.utils import download_file_from_url
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,11 @@
import typing
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import get_module_logger, download_file_from_url
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.score import field_names
from data_pipeline.utils import download_file_from_url
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,8 @@
import pandas as pd
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -3,12 +3,12 @@ import json
import subprocess
from enum import Enum
from pathlib import Path
import geopandas as gpd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -5,13 +5,11 @@ from pathlib import Path
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.utils import (
get_module_logger,
remove_all_dirs_from_dir,
remove_files_from_dir,
unzip_file_from_url,
zip_directory,
)
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import remove_all_dirs_from_dir
from data_pipeline.utils import remove_files_from_dir
from data_pipeline.utils import unzip_file_from_url
from data_pipeline.utils import zip_directory
logger = get_module_logger(__name__)

View file

@ -1,19 +1,19 @@
from collections import namedtuple
import os
import pandas as pd
import geopandas as gpd
from collections import namedtuple
import geopandas as gpd
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.sources.census_acs.etl_utils import (
retrieve_census_acs_data,
)
from data_pipeline.etl.sources.census_acs.etl_imputations import (
calculate_income_measures,
)
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.etl.sources.census_acs.etl_utils import (
retrieve_census_acs_data,
)
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,7 +1,10 @@
from typing import Any, List, NamedTuple, Tuple
import pandas as pd
import geopandas as gpd
from typing import Any
from typing import List
from typing import NamedTuple
from typing import Tuple
import geopandas as gpd
import pandas as pd
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger

View file

@ -1,10 +1,9 @@
import os
from pathlib import Path
from typing import List
import censusdata
import pandas as pd
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
from data_pipeline.utils import get_module_logger

View file

@ -1,11 +1,10 @@
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.sources.census_acs.etl_utils import (
retrieve_census_acs_data,
)
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,13 +1,14 @@
import json
from pathlib import Path
import numpy as np
import pandas as pd
import requests
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.config import settings
from data_pipeline.utils import unzip_file_from_url, download_file_from_url
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import download_file_from_url
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,14 +1,13 @@
import json
from typing import List
import requests
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
import requests
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
pd.options.mode.chained_assignment = "raise"

View file

@ -1,7 +1,8 @@
from pathlib import Path
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,8 +1,9 @@
from pathlib import Path
import pandas as pd
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,10 +1,9 @@
# pylint: disable=unsubscriptable-object
# pylint: disable=unsupported-assignment-operation
import pandas as pd
import geopandas as gpd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,10 @@
from pathlib import Path
import geopandas as gpd
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.etl.sources.geo_utils import add_tracts_for_geometries
from data_pipeline.utils import get_module_logger

View file

@ -1,6 +1,6 @@
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger

View file

@ -1,5 +1,4 @@
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
@ -58,7 +57,6 @@ class EJSCREENAreasOfConcernETL(ExtractTransformLoad):
# TO DO: As a one off we did all the processing in a separate Notebook
# Can add here later for a future PR
pass
def load(self) -> None:
if self.ejscreen_areas_of_concern_data_exists():

View file

@ -1,10 +1,11 @@
from pathlib import Path
import pandas as pd
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,9 +1,10 @@
from pathlib import Path
import pandas as pd
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,10 +1,9 @@
# pylint: disable=unsubscriptable-object
# pylint: disable=unsupported-assignment-operation
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,10 +1,9 @@
# pylint: disable=unsubscriptable-object
# pylint: disable=unsupported-assignment-operation
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,11 +1,12 @@
"""Utililities for turning geographies into tracts, using census data"""
from functools import lru_cache
from pathlib import Path
from typing import Optional
from functools import lru_cache
import geopandas as gpd
from data_pipeline.etl.sources.tribal.etl import TribalETL
from data_pipeline.utils import get_module_logger
from .census.etl import CensusETL
logger = get_module_logger(__name__)

View file

@ -1,11 +1,9 @@
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import (
get_module_logger,
unzip_file_from_url,
)
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,8 +1,8 @@
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import get_module_logger
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,9 @@
import pandas as pd
from pandas.errors import EmptyDataError
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
from pandas.errors import EmptyDataError
logger = get_module_logger(__name__)

View file

@ -1,5 +1,6 @@
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,8 @@
import pandas as pd
import requests
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.config import settings
logger = get_module_logger(__name__)

View file

@ -1,10 +1,9 @@
import pandas as pd
import geopandas as gpd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)
@ -96,4 +95,3 @@ class MappingForEJETL(ExtractTransformLoad):
def validate(self) -> None:
logger.info("Validating Mapping For EJ Data")
pass

View file

@ -1,10 +1,11 @@
import pathlib
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import download_file_from_url, get_module_logger
from data_pipeline.utils import download_file_from_url
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,11 +1,11 @@
from glob import glob
import geopandas as gpd
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,8 @@
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.utils import get_module_logger
from data_pipeline.score import field_names
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -2,10 +2,9 @@
# but it may be a known bug. https://github.com/PyCQA/pylint/issues/1498
# pylint: disable=unsubscriptable-object
# pylint: disable=unsupported-assignment-operation
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,10 +1,9 @@
# pylint: disable=unsubscriptable-object
# pylint: disable=unsupported-assignment-operation
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,12 +1,11 @@
import functools
import pandas as pd
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import (
get_module_logger,
unzip_file_from_url,
)
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,11 +1,12 @@
from pathlib import Path
import geopandas as gpd
import pandas as pd
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger, unzip_file_from_url
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import unzip_file_from_url
logger = get_module_logger(__name__)

View file

@ -1,10 +1,8 @@
from pathlib import Path
from data_pipeline.utils import (
get_module_logger,
remove_all_from_dir,
remove_files_from_dir,
)
from data_pipeline.utils import get_module_logger
from data_pipeline.utils import remove_all_from_dir
from data_pipeline.utils import remove_files_from_dir
logger = get_module_logger(__name__)

View file

@ -1,12 +1,11 @@
import geopandas as gpd
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.etl.sources.geo_utils import (
add_tracts_for_geometries,
get_tribal_geojson,
get_tract_geojson,
)
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.etl.sources.geo_utils import add_tracts_for_geometries
from data_pipeline.etl.sources.geo_utils import get_tract_geojson
from data_pipeline.etl.sources.geo_utils import get_tribal_geojson
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger

View file

@ -1,11 +1,13 @@
from pathlib import Path
import geopandas as gpd
import pandas as pd
import numpy as np
from data_pipeline.etl.base import ExtractTransformLoad, ValidGeoLevel
from data_pipeline.utils import get_module_logger, download_file_from_url
import geopandas as gpd
import numpy as np
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.etl.base import ValidGeoLevel
from data_pipeline.etl.sources.geo_utils import add_tracts_for_geometries
from data_pipeline.utils import download_file_from_url
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,9 +1,6 @@
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,8 +1,8 @@
from collections import namedtuple
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,7 +1,6 @@
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
import pandas as pd
from data_pipeline.score.score import Score
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,8 +1,7 @@
import data_pipeline.score.field_names as field_names
import numpy as np
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,11 +1,11 @@
from typing import Tuple
import data_pipeline.etl.score.constants as constants
import data_pipeline.score.field_names as field_names
import numpy as np
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
import data_pipeline.etl.score.constants as constants
logger = get_module_logger(__name__)

View file

@ -1,12 +1,12 @@
from typing import Tuple
import data_pipeline.etl.score.constants as constants
import data_pipeline.score.field_names as field_names
import numpy as np
import pandas as pd
from data_pipeline.score.score import Score
import data_pipeline.score.field_names as field_names
from data_pipeline.utils import get_module_logger
import data_pipeline.etl.score.constants as constants
from data_pipeline.score.utils import calculate_tract_adjacency_scores
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,6 +1,5 @@
import pandas as pd
from data_pipeline.score.score_narwhal import ScoreNarwhal
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -1,12 +1,12 @@
"""Utilities to help generate the score."""
import pandas as pd
import geopandas as gpd
import data_pipeline.score.field_names as field_names
import geopandas as gpd
import pandas as pd
from data_pipeline.etl.sources.geo_utils import get_tract_geojson
from data_pipeline.utils import get_module_logger
# XXX: @jorge I am torn about the coupling that importing from
# etl.sources vs keeping the code DRY. Thoughts?
from data_pipeline.etl.sources.geo_utils import get_tract_geojson
from data_pipeline.utils import get_module_logger
logger = get_module_logger(__name__)

View file

@ -3,7 +3,6 @@ from pathlib import Path
from shutil import copyfile
import pytest
from data_pipeline.config import settings
from data_pipeline.etl.base import ExtractTransformLoad

View file

@ -1,8 +1,8 @@
import pandas as pd
import pytest
from data_pipeline.config import settings
from data_pipeline.score.field_names import GEOID_TRACT_FIELD
from data_pipeline.etl.score import constants
from data_pipeline.score.field_names import GEOID_TRACT_FIELD
@pytest.fixture(scope="session")

View file

@ -1,9 +1,11 @@
# flake8: noqa: W0613,W0611,F811
from dataclasses import dataclass
import pytest
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger
from data_pipeline.score.score_narwhal import ScoreNarwhal
from data_pipeline.utils import get_module_logger
from .fixtures import final_score_df # pylint: disable=unused-import
logger = get_module_logger(__name__)

Some files were not shown because too many files have changed in this diff Show more