mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-02-23 10:04:18 -08:00
Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
This commit is contained in:
parent
52b6d92820
commit
4ae5ec512f
16 changed files with 1416 additions and 228 deletions
Binary file not shown.
|
@ -40,7 +40,7 @@ def validate_new_data(
|
|||
assert (
|
||||
checking_df[score_col].nunique() <= 3
|
||||
), f"Error: there are too many values possible in {score_col}"
|
||||
assert (True in checking_df[score_col].unique()) & (
|
||||
assert (True in checking_df[score_col].unique()) | (
|
||||
False in checking_df[score_col].unique()
|
||||
), f"Error: {score_col} should be a boolean"
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ fields:
|
|||
- score_name: Total population
|
||||
label: Total population
|
||||
format: float
|
||||
- score_name: Percent of individuals below 200% Federal Poverty Line, imputed and adjusted
|
||||
label: Adjusted percent of individuals below 200% Federal Poverty Line
|
||||
format: float
|
||||
- score_name: Is low income and has a low percent of higher ed students?
|
||||
label: Is low income and has a low percent of higher ed students?
|
||||
format: bool
|
||||
|
|
|
@ -30,6 +30,9 @@ sheets:
|
|||
- score_name: Total population
|
||||
label: Total population
|
||||
format: float
|
||||
- score_name: Percent of individuals below 200% Federal Poverty Line, imputed and adjusted
|
||||
label: Adjusted percent of individuals below 200% Federal Poverty Line
|
||||
format: float
|
||||
- score_name: Is low income and has a low percent of higher ed students?
|
||||
label: Is low income and has a low percent of higher ed students?
|
||||
format: bool
|
||||
|
|
|
@ -14,16 +14,6 @@ DATASET_LIST = [
|
|||
"module_dir": "tree_equity_score",
|
||||
"class_name": "TreeEquityScoreETL",
|
||||
},
|
||||
{
|
||||
"name": "census_acs",
|
||||
"module_dir": "census_acs",
|
||||
"class_name": "CensusACSETL",
|
||||
},
|
||||
{
|
||||
"name": "census_acs_2010",
|
||||
"module_dir": "census_acs_2010",
|
||||
"class_name": "CensusACS2010ETL",
|
||||
},
|
||||
{
|
||||
"name": "census_decennial",
|
||||
"module_dir": "census_decennial",
|
||||
|
@ -124,6 +114,17 @@ DATASET_LIST = [
|
|||
"module_dir": "maryland_ejscreen",
|
||||
"class_name": "MarylandEJScreenETL",
|
||||
},
|
||||
# This has to come after us.json exists
|
||||
{
|
||||
"name": "census_acs",
|
||||
"module_dir": "census_acs",
|
||||
"class_name": "CensusACSETL",
|
||||
},
|
||||
{
|
||||
"name": "census_acs_2010",
|
||||
"module_dir": "census_acs_2010",
|
||||
"class_name": "CensusACS2010ETL",
|
||||
},
|
||||
]
|
||||
CENSUS_INFO = {
|
||||
"name": "census",
|
||||
|
|
|
@ -5,6 +5,9 @@ from data_pipeline.config import settings
|
|||
|
||||
from data_pipeline.score import field_names
|
||||
|
||||
## note: to keep map porting "right" fields, keeping descriptors the same.
|
||||
|
||||
|
||||
# Base Paths
|
||||
DATA_PATH = Path(settings.APP_ROOT) / "data"
|
||||
TMP_PATH = DATA_PATH / "tmp"
|
||||
|
@ -179,6 +182,8 @@ TILES_SCORE_COLUMNS = {
|
|||
+ field_names.PERCENTILE_FIELD_SUFFIX: "P100_PFS",
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX: "P200_PFS",
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX: "P200_I_PFS",
|
||||
field_names.LEAD_PAINT_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX: "LPF_PFS",
|
||||
field_names.NPL_FIELD + field_names.PERCENTILE_FIELD_SUFFIX: "NPL_PFS",
|
||||
|
@ -198,7 +203,8 @@ TILES_SCORE_COLUMNS = {
|
|||
field_names.M_HOUSING: "M_HSG",
|
||||
field_names.M_POLLUTION: "M_PLN",
|
||||
field_names.M_HEALTH: "M_HLTH",
|
||||
field_names.SCORE_M_COMMUNITIES: "SM_C",
|
||||
# temporarily update this so that it's the Narwhal score that gets visualized on the map
|
||||
field_names.SCORE_N_COMMUNITIES: "SM_C",
|
||||
field_names.SCORE_M + field_names.PERCENTILE_FIELD_SUFFIX: "SM_PFS",
|
||||
field_names.EXPECTED_POPULATION_LOSS_RATE_LOW_INCOME_LOW_HIGHER_ED_FIELD: "EPLRLI",
|
||||
field_names.EXPECTED_AGRICULTURE_LOSS_RATE_LOW_INCOME_LOW_HIGHER_ED_FIELD: "EALRLI",
|
||||
|
@ -246,7 +252,6 @@ TILES_SCORE_COLUMNS = {
|
|||
field_names.ISLAND_LOW_MEDIAN_INCOME_PCTILE_THRESHOLD: "IA_LMI_ET",
|
||||
field_names.ISLAND_UNEMPLOYMENT_PCTILE_THRESHOLD: "IA_UN_ET",
|
||||
field_names.ISLAND_POVERTY_PCTILE_THRESHOLD: "IA_POV_ET",
|
||||
field_names.FPL_200_SERIES: "FPL200S",
|
||||
field_names.THRESHOLD_COUNT: "TC",
|
||||
field_names.CATEGORY_COUNT: "CC",
|
||||
field_names.ISLAND_AREAS_UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD: "IAULHSE",
|
||||
|
@ -283,7 +288,7 @@ TILES_SCORE_COLUMNS = {
|
|||
## Low high school and low higher ed for t&wd
|
||||
field_names.WORKFORCE_SOCIO_INDICATORS_EXCEEDED: "M_WKFC_EBSI",
|
||||
## FPL 200 and low higher ed for all others
|
||||
field_names.FPL_200_AND_COLLEGE_ATTENDANCE_SERIES: "M_EBSI",
|
||||
field_names.FPL_200_SERIES: "M_EBSI",
|
||||
}
|
||||
|
||||
# columns to round floats to 2 decimals
|
||||
|
@ -311,6 +316,8 @@ TILES_SCORE_FLOAT_COLUMNS = [
|
|||
+ field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.LEAD_PAINT_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.NPL_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.RMP_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
|
@ -332,7 +339,6 @@ TILES_SCORE_FLOAT_COLUMNS = [
|
|||
field_names.LOW_HS_EDUCATION_LOW_HIGHER_ED_FIELD,
|
||||
field_names.ISLAND_AREAS_LOW_HS_EDUCATION_FIELD,
|
||||
field_names.WASTEWATER_FIELD + field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.SCORE_M + field_names.PERCENTILE_FIELD_SUFFIX,
|
||||
field_names.COLLEGE_NON_ATTENDANCE_FIELD,
|
||||
field_names.COLLEGE_ATTENDANCE_FIELD,
|
||||
]
|
||||
|
|
|
@ -405,6 +405,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
df[field_names.MEDIAN_INCOME_FIELD] / df[field_names.AMI_FIELD]
|
||||
)
|
||||
|
||||
# QQ: why don't we just filter to the numeric columns by type?
|
||||
numeric_columns = [
|
||||
field_names.HOUSING_BURDEN_FIELD,
|
||||
field_names.TOTAL_POP_FIELD,
|
||||
|
@ -458,6 +459,7 @@ class ScoreETL(ExtractTransformLoad):
|
|||
field_names.IMPENETRABLE_SURFACES_FIELD,
|
||||
# We have to pass this boolean here in order to include it in ag value loss percentiles.
|
||||
field_names.AGRICULTURAL_VALUE_BOOL_FIELD,
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD,
|
||||
]
|
||||
|
||||
non_numeric_columns = [
|
||||
|
|
|
@ -29,7 +29,7 @@ from . import constants
|
|||
logger = get_module_logger(__name__)
|
||||
|
||||
# Define the DAC variable
|
||||
DISADVANTAGED_COMMUNITIES_FIELD = field_names.SCORE_M_COMMUNITIES
|
||||
DISADVANTAGED_COMMUNITIES_FIELD = field_names.SCORE_N_COMMUNITIES
|
||||
|
||||
|
||||
class PostScoreETL(ExtractTransformLoad):
|
||||
|
|
|
@ -1,14 +1,26 @@
|
|||
from collections import namedtuple
|
||||
import os
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
|
||||
from data_pipeline.config import settings
|
||||
from data_pipeline.etl.base import ExtractTransformLoad
|
||||
from data_pipeline.etl.sources.census_acs.etl_utils import (
|
||||
retrieve_census_acs_data,
|
||||
)
|
||||
from data_pipeline.utils import get_module_logger
|
||||
from data_pipeline.etl.sources.census_acs.etl_imputations import (
|
||||
calculate_income_measures,
|
||||
)
|
||||
|
||||
from data_pipeline.utils import get_module_logger, unzip_file_from_url
|
||||
from data_pipeline.score import field_names
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
# because now there is a requirement for the us.json, this will port from
|
||||
# AWS when a local copy does not exist.
|
||||
CENSUS_DATA_S3_URL = settings.AWS_JUSTICE40_DATASOURCES_URL + "/census.zip"
|
||||
|
||||
|
||||
class CensusACSETL(ExtractTransformLoad):
|
||||
def __init__(self):
|
||||
|
@ -59,6 +71,23 @@ class CensusACSETL(ExtractTransformLoad):
|
|||
self.POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME = (
|
||||
"Percent of individuals < 200% Federal Poverty Line"
|
||||
)
|
||||
self.IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME = (
|
||||
"Percent of individuals < 200% Federal Poverty Line, imputed"
|
||||
)
|
||||
|
||||
self.ADJUSTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME = (
|
||||
"Adjusted percent of individuals < 200% Federal Poverty Line"
|
||||
)
|
||||
|
||||
self.ADJUSTED_AND_IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME_PRELIMINARY = (
|
||||
"Preliminary adjusted percent of individuals < 200% Federal Poverty Line,"
|
||||
+ " imputed"
|
||||
)
|
||||
|
||||
self.ADJUSTED_AND_IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME = (
|
||||
"Adjusted percent of individuals < 200% Federal Poverty Line,"
|
||||
+ " imputed"
|
||||
)
|
||||
|
||||
self.MEDIAN_HOUSE_VALUE_FIELD = "B25077_001E"
|
||||
self.MEDIAN_HOUSE_VALUE_FIELD_NAME = (
|
||||
|
@ -136,6 +165,10 @@ class CensusACSETL(ExtractTransformLoad):
|
|||
"Percent enrollment in college or graduate school"
|
||||
)
|
||||
|
||||
self.IMPUTED_COLLEGE_ATTENDANCE_FIELD = (
|
||||
"Percent enrollment in college or graduate school, imputed"
|
||||
)
|
||||
|
||||
self.COLLEGE_NON_ATTENDANCE_FIELD = "Percent of population not currently enrolled in college or graduate school"
|
||||
|
||||
self.RE_FIELDS = [
|
||||
|
@ -188,18 +221,50 @@ class CensusACSETL(ExtractTransformLoad):
|
|||
self.MEDIAN_INCOME_FIELD_NAME,
|
||||
self.POVERTY_LESS_THAN_100_PERCENT_FPL_FIELD_NAME,
|
||||
self.POVERTY_LESS_THAN_150_PERCENT_FPL_FIELD_NAME,
|
||||
self.POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME,
|
||||
self.IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME,
|
||||
self.MEDIAN_HOUSE_VALUE_FIELD_NAME,
|
||||
self.HIGH_SCHOOL_ED_FIELD,
|
||||
self.COLLEGE_ATTENDANCE_FIELD,
|
||||
self.COLLEGE_NON_ATTENDANCE_FIELD,
|
||||
self.IMPUTED_COLLEGE_ATTENDANCE_FIELD,
|
||||
]
|
||||
+ self.RE_OUTPUT_FIELDS
|
||||
+ [self.PERCENT_PREFIX + field for field in self.RE_OUTPUT_FIELDS]
|
||||
+ [
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_FIELD,
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD,
|
||||
]
|
||||
)
|
||||
|
||||
self.df: pd.DataFrame
|
||||
|
||||
def _merge_geojson(
|
||||
self,
|
||||
df: pd.DataFrame,
|
||||
usa_geo_df: gpd.GeoDataFrame,
|
||||
geoid_field: str = "GEOID10",
|
||||
geometry_field: str = "geometry",
|
||||
state_code_field: str = "STATEFP10",
|
||||
county_code_field: str = "COUNTYFP10",
|
||||
) -> gpd.GeoDataFrame:
|
||||
usa_geo_df[geoid_field] = (
|
||||
usa_geo_df[geoid_field].astype(str).str.zfill(11)
|
||||
)
|
||||
return gpd.GeoDataFrame(
|
||||
df.merge(
|
||||
usa_geo_df[
|
||||
[
|
||||
geoid_field,
|
||||
geometry_field,
|
||||
state_code_field,
|
||||
county_code_field,
|
||||
]
|
||||
],
|
||||
left_on=[self.GEOID_TRACT_FIELD_NAME],
|
||||
right_on=[geoid_field],
|
||||
)
|
||||
)
|
||||
|
||||
def extract(self) -> None:
|
||||
# Define the variables to retrieve
|
||||
variables = (
|
||||
|
@ -227,6 +292,27 @@ class CensusACSETL(ExtractTransformLoad):
|
|||
|
||||
df = self.df
|
||||
|
||||
# Here we join the geometry of the US to the dataframe so that we can impute
|
||||
# The income of neighbors. first this looks locally; if there's no local
|
||||
# geojson file for all of the US, this will read it off of S3
|
||||
logger.info("Reading in geojson for the country")
|
||||
if not os.path.exists(
|
||||
self.DATA_PATH / "census" / "geojson" / "us.json"
|
||||
):
|
||||
logger.info("Fetching Census data from AWS S3")
|
||||
unzip_file_from_url(
|
||||
CENSUS_DATA_S3_URL,
|
||||
self.DATA_PATH / "tmp",
|
||||
self.DATA_PATH,
|
||||
)
|
||||
|
||||
geo_df = gpd.read_file(
|
||||
self.DATA_PATH / "census" / "geojson" / "us.json"
|
||||
)
|
||||
df = self._merge_geojson(
|
||||
df=df,
|
||||
usa_geo_df=geo_df,
|
||||
)
|
||||
# Rename two fields.
|
||||
df = df.rename(
|
||||
columns={
|
||||
|
@ -349,7 +435,7 @@ class CensusACSETL(ExtractTransformLoad):
|
|||
df["B03003_003E"] / df["B03003_001E"]
|
||||
)
|
||||
|
||||
# Calculate college attendance:
|
||||
# Calculate college attendance and adjust low income
|
||||
df[self.COLLEGE_ATTENDANCE_FIELD] = (
|
||||
df[self.COLLEGE_ATTENDANCE_MALE_ENROLLED_PUBLIC]
|
||||
+ df[self.COLLEGE_ATTENDANCE_MALE_ENROLLED_PRIVATE]
|
||||
|
@ -361,22 +447,64 @@ class CensusACSETL(ExtractTransformLoad):
|
|||
1 - df[self.COLLEGE_ATTENDANCE_FIELD]
|
||||
)
|
||||
|
||||
# strip columns
|
||||
df = df[self.COLUMNS_TO_KEEP]
|
||||
|
||||
# Save results to self.
|
||||
self.df = df
|
||||
|
||||
# rename columns to be used in score
|
||||
rename_fields = {
|
||||
"Percent of individuals < 200% Federal Poverty Line": field_names.POVERTY_LESS_THAN_200_FPL_FIELD,
|
||||
}
|
||||
self.df.rename(
|
||||
columns=rename_fields,
|
||||
inplace=True,
|
||||
errors="raise",
|
||||
# we impute income for both income measures
|
||||
## TODO: Convert to pydantic for clarity
|
||||
logger.info("Imputing income information")
|
||||
ImputeVariables = namedtuple(
|
||||
"ImputeVariables", ["raw_field_name", "imputed_field_name"]
|
||||
)
|
||||
|
||||
df = calculate_income_measures(
|
||||
impute_var_named_tup_list=[
|
||||
ImputeVariables(
|
||||
raw_field_name=self.POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME,
|
||||
imputed_field_name=self.IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME,
|
||||
),
|
||||
ImputeVariables(
|
||||
raw_field_name=self.COLLEGE_ATTENDANCE_FIELD,
|
||||
imputed_field_name=self.IMPUTED_COLLEGE_ATTENDANCE_FIELD,
|
||||
),
|
||||
],
|
||||
geo_df=df,
|
||||
geoid_field=self.GEOID_TRACT_FIELD_NAME,
|
||||
)
|
||||
|
||||
logger.info("Calculating with imputed values")
|
||||
|
||||
df[
|
||||
self.ADJUSTED_AND_IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME
|
||||
] = (
|
||||
df[self.POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME].fillna(
|
||||
df[self.IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME]
|
||||
)
|
||||
- df[self.COLLEGE_ATTENDANCE_FIELD].fillna(
|
||||
df[self.IMPUTED_COLLEGE_ATTENDANCE_FIELD]
|
||||
)
|
||||
).clip(
|
||||
lower=0
|
||||
)
|
||||
|
||||
# All values should have a value at this point
|
||||
assert (
|
||||
df[
|
||||
self.ADJUSTED_AND_IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME
|
||||
]
|
||||
.isna()
|
||||
.sum()
|
||||
== 0
|
||||
), "Error: not all values were filled..."
|
||||
|
||||
logger.info("Renaming columns...")
|
||||
df = df.rename(
|
||||
columns={
|
||||
self.ADJUSTED_AND_IMPUTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME: field_names.POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD,
|
||||
self.POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME: field_names.POVERTY_LESS_THAN_200_FPL_FIELD,
|
||||
}
|
||||
)
|
||||
|
||||
# Strip columns and save results to self.
|
||||
self.df = df[self.COLUMNS_TO_KEEP]
|
||||
|
||||
def load(self) -> None:
|
||||
logger.info("Saving Census ACS Data")
|
||||
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
from typing import List, NamedTuple
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
||||
def _get_fips_mask(
|
||||
geo_df: gpd.GeoDataFrame,
|
||||
row: gpd.GeoSeries,
|
||||
fips_digits: int,
|
||||
geoid_field: str = "GEOID10_TRACT",
|
||||
) -> pd.Series:
|
||||
return (
|
||||
geo_df[geoid_field].str[:fips_digits] == row[geoid_field][:fips_digits]
|
||||
)
|
||||
|
||||
|
||||
def _get_neighbor_mask(
|
||||
geo_df: gpd.GeoDataFrame, row: gpd.GeoSeries
|
||||
) -> pd.Series:
|
||||
return geo_df["geometry"].touches(row["geometry"])
|
||||
|
||||
|
||||
def _choose_best_mask(
|
||||
geo_df: gpd.GeoDataFrame,
|
||||
masks_in_priority_order: List[pd.Series],
|
||||
column_to_impute: str,
|
||||
) -> pd.Series:
|
||||
for mask in masks_in_priority_order:
|
||||
if any(geo_df[mask][column_to_impute].notna()):
|
||||
return mask
|
||||
raise Exception("No mask found")
|
||||
|
||||
|
||||
def _prepare_dataframe_for_imputation(
|
||||
impute_var_named_tup_list: List[NamedTuple],
|
||||
geo_df: gpd.GeoDataFrame,
|
||||
geoid_field: str = "GEOID10_TRACT",
|
||||
) -> tuple[list, gpd.GeoDataFrame]:
|
||||
imputing_cols = [
|
||||
impute_var_pair.raw_field_name
|
||||
for impute_var_pair in impute_var_named_tup_list
|
||||
]
|
||||
|
||||
# prime column to exist
|
||||
for impute_var_pair in impute_var_named_tup_list:
|
||||
geo_df[impute_var_pair.imputed_field_name] = geo_df[
|
||||
impute_var_pair.raw_field_name
|
||||
].copy()
|
||||
|
||||
# generate a list of tracts for which at least one of the imputation
|
||||
# columns is null
|
||||
tract_list = geo_df[geo_df[imputing_cols].isna().any(axis=1)][
|
||||
geoid_field
|
||||
].unique()
|
||||
|
||||
# Check that imputation is a valid choice for this set of fields
|
||||
logger.info(f"Imputing values for {len(tract_list)} unique tracts.")
|
||||
assert len(tract_list) > 0, "Error: No missing values to impute"
|
||||
|
||||
return tract_list, geo_df
|
||||
|
||||
|
||||
def calculate_income_measures(
|
||||
impute_var_named_tup_list: list,
|
||||
geo_df: gpd.GeoDataFrame,
|
||||
geoid_field: str,
|
||||
) -> pd.DataFrame:
|
||||
"""Impute values based on geographic neighbors
|
||||
|
||||
We only want to check neighbors a single time, so all variables
|
||||
that we impute get imputed here.
|
||||
|
||||
Takes in:
|
||||
required:
|
||||
impute_var_named_tup_list: list of named tuples (imputed field, raw field)
|
||||
geo_df: geo dataframe that already has the census shapefiles merged
|
||||
geoid field: tract level ID
|
||||
|
||||
Returns: non-geometry pd.DataFrame
|
||||
"""
|
||||
# Determine where to impute variables and fill a column with nulls
|
||||
tract_list, geo_df = _prepare_dataframe_for_imputation(
|
||||
impute_var_named_tup_list=impute_var_named_tup_list,
|
||||
geo_df=geo_df,
|
||||
geoid_field=geoid_field,
|
||||
)
|
||||
|
||||
# Iterate through the dataframe to impute in place
|
||||
for index, row in geo_df.iterrows():
|
||||
if row[geoid_field] in tract_list:
|
||||
neighbor_mask = _get_neighbor_mask(geo_df, row)
|
||||
county_mask = _get_fips_mask(
|
||||
geo_df=geo_df, row=row, fips_digits=5, geoid_field=geoid_field
|
||||
)
|
||||
state_mask = _get_fips_mask(
|
||||
geo_df=geo_df, row=row, fips_digits=2, geoid_field=geoid_field
|
||||
)
|
||||
|
||||
# Impute fields for every row missing at least one value using the best possible set of neighbors
|
||||
# Note that later, we will pull raw.fillna(imputed), so the mechanics of this step aren't critical
|
||||
for impute_var_pair in impute_var_named_tup_list:
|
||||
mask_to_use = _choose_best_mask(
|
||||
geo_df=geo_df,
|
||||
masks_in_priority_order=[
|
||||
neighbor_mask,
|
||||
county_mask,
|
||||
state_mask,
|
||||
],
|
||||
column_to_impute=impute_var_pair.raw_field_name,
|
||||
)
|
||||
geo_df.loc[index, impute_var_pair.imputed_field_name] = geo_df[
|
||||
mask_to_use
|
||||
][impute_var_pair.raw_field_name].mean()
|
||||
|
||||
logger.info("Casting geodataframe as a typical dataframe")
|
||||
# get rid of the geometry column and cast as a typical df
|
||||
df = pd.DataFrame(
|
||||
geo_df[[col for col in geo_df.columns if col != "geometry"]]
|
||||
)
|
||||
|
||||
# finally, return the df
|
||||
return df
|
|
@ -4,6 +4,7 @@ from typing import List
|
|||
import censusdata
|
||||
import pandas as pd
|
||||
|
||||
|
||||
from data_pipeline.etl.sources.census.etl_utils import get_state_fips_codes
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
||||
|
|
|
@ -56,6 +56,19 @@ M_HEALTH = "Health Factor (Definition M)"
|
|||
M_WORKFORCE = "Workforce Factor (Definition M)"
|
||||
M_NON_WORKFORCE = "Any Non-Workforce Factor (Definition M)"
|
||||
|
||||
# Definition Narwhal fields
|
||||
SCORE_N = "Definition N"
|
||||
SCORE_N_COMMUNITIES = "Definition N (communities)"
|
||||
N_CLIMATE = "Climate Factor (Definition N)"
|
||||
N_ENERGY = "Energy Factor (Definition N)"
|
||||
N_TRANSPORTATION = "Transportation Factor (Definition N)"
|
||||
N_HOUSING = "Housing Factor (Definition N)"
|
||||
N_POLLUTION = "Pollution Factor (Definition N)"
|
||||
N_WATER = "Water Factor (Definition N)"
|
||||
N_HEALTH = "Health Factor (Definition N)"
|
||||
N_WORKFORCE = "Workforce Factor (Definition N)"
|
||||
N_NON_WORKFORCE = "Any Non-Workforce Factor (Definition N)"
|
||||
|
||||
PERCENTILE = 90
|
||||
MEDIAN_HOUSE_VALUE_PERCENTILE = 90
|
||||
|
||||
|
@ -93,9 +106,19 @@ HEALTH_SOCIO_INDICATORS_EXCEEDED = (
|
|||
|
||||
# Poverty / Income
|
||||
POVERTY_FIELD = "Poverty (Less than 200% of federal poverty line)"
|
||||
|
||||
# this is the raw, unadjusted variable
|
||||
POVERTY_LESS_THAN_200_FPL_FIELD = (
|
||||
"Percent of individuals below 200% Federal Poverty Line"
|
||||
)
|
||||
|
||||
# this is for use in the donuts
|
||||
ADJUSTED_POVERTY_LESS_THAN_200_PERCENT_FPL_FIELD_NAME = (
|
||||
"Adjusted percent of individuals < 200% Federal Poverty Line"
|
||||
)
|
||||
|
||||
# this is what gets used in the score
|
||||
POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD = "Percent of individuals below 200% Federal Poverty Line, imputed and adjusted"
|
||||
POVERTY_LESS_THAN_150_FPL_FIELD = (
|
||||
"Percent of individuals < 150% Federal Poverty Line"
|
||||
)
|
||||
|
@ -412,6 +435,7 @@ SCORE_M_LOW_INCOME_SUFFIX = (
|
|||
", is low income, and has a low percent of higher ed students"
|
||||
)
|
||||
|
||||
|
||||
COLLEGE_ATTENDANCE_LESS_THAN_20_FIELD = (
|
||||
"Percent higher ed enrollment rate is less than 20%"
|
||||
)
|
||||
|
@ -651,6 +675,7 @@ THRESHOLD_COUNT = "Total threshold criteria exceeded"
|
|||
CATEGORY_COUNT = "Total categories exceeded"
|
||||
|
||||
FPL_200_SERIES = "Is low income?"
|
||||
FPL_200_SERIES_IMPUTED_AND_ADJUSTED = "Is low income (imputed and adjusted)?"
|
||||
FPL_200_AND_COLLEGE_ATTENDANCE_SERIES = (
|
||||
"Is low income and has a low percent of higher ed students?"
|
||||
)
|
||||
|
|
808
data/data-pipeline/data_pipeline/score/score_narwhal.py
Normal file
808
data/data-pipeline/data_pipeline/score/score_narwhal.py
Normal file
|
@ -0,0 +1,808 @@
|
|||
from typing import Tuple
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from data_pipeline.score.score import Score
|
||||
import data_pipeline.score.field_names as field_names
|
||||
from data_pipeline.utils import get_module_logger
|
||||
import data_pipeline.etl.score.constants as constants
|
||||
|
||||
logger = get_module_logger(__name__)
|
||||
|
||||
|
||||
class ScoreNarwhal(Score):
|
||||
"""Very similar to Score M, at present."""
|
||||
|
||||
def __init__(self, df: pd.DataFrame) -> None:
|
||||
self.LOW_INCOME_THRESHOLD: float = 0.65
|
||||
self.MAX_COLLEGE_ATTENDANCE_THRESHOLD: float = 0.20
|
||||
self.ENVIRONMENTAL_BURDEN_THRESHOLD: float = 0.90
|
||||
self.MEDIAN_HOUSE_VALUE_THRESHOLD: float = 0.90
|
||||
self.LACK_OF_HIGH_SCHOOL_MINIMUM_THRESHOLD: float = 0.10
|
||||
|
||||
super().__init__(df)
|
||||
|
||||
def _combine_island_areas_with_states_and_set_thresholds(
|
||||
self,
|
||||
df: pd.DataFrame,
|
||||
column_from_island_areas: str,
|
||||
column_from_decennial_census: str,
|
||||
combined_column_name: str,
|
||||
threshold_cutoff_for_island_areas: float,
|
||||
) -> Tuple[pd.DataFrame, str]:
|
||||
"""Steps to set thresholds for island areas.
|
||||
|
||||
This function is fairly logically complicated. It takes the following steps:
|
||||
|
||||
1. Combine the two different fields into a single field.
|
||||
2. Calculate the 90th percentile for the combined field.
|
||||
3. Create a boolean series that is true for any census tract in the island
|
||||
areas (and only the island areas) that exceeds this percentile.
|
||||
|
||||
For step one, it combines data that is either the island area's Decennial Census
|
||||
value in 2009 or the state's value in 5-year ACS ending in 2010.
|
||||
|
||||
This will be used to generate the percentile cutoff for the 90th percentile.
|
||||
|
||||
The stateside decennial census stopped asking economic comparisons,
|
||||
so this is as close to apples-to-apples as we get. We use 5-year ACS for data
|
||||
robustness over 1-year ACS.
|
||||
"""
|
||||
# Create the combined field.
|
||||
# TODO: move this combined field percentile calculation to `etl_score`,
|
||||
# since most other percentile logic is there.
|
||||
# There should only be one entry in either 2009 or 2019 fields, not one in both.
|
||||
# But just to be safe, we take the mean and ignore null values so if there
|
||||
# *were* entries in both, this result would make sense.
|
||||
df[combined_column_name] = df[
|
||||
[column_from_island_areas, column_from_decennial_census]
|
||||
].mean(axis=1, skipna=True)
|
||||
|
||||
# Create a percentile field for use in the Islands / PR visualization
|
||||
# TODO: move this code
|
||||
# In the code below, percentiles are constructed based on the combined column
|
||||
# of census and island data, but only reported for the island areas (where there
|
||||
# is no other comprehensive percentile information)
|
||||
return_series_name = (
|
||||
column_from_island_areas
|
||||
+ field_names.ISLAND_AREAS_PERCENTILE_ADJUSTMENT_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
)
|
||||
df[return_series_name] = np.where(
|
||||
df[column_from_decennial_census].isna(),
|
||||
df[combined_column_name].rank(pct=True),
|
||||
np.nan,
|
||||
)
|
||||
|
||||
threshold_column_name = (
|
||||
f"{column_from_island_areas} exceeds "
|
||||
f"{threshold_cutoff_for_island_areas*100:.0f}th percentile"
|
||||
)
|
||||
|
||||
df[threshold_column_name] = (
|
||||
df[return_series_name] >= threshold_cutoff_for_island_areas
|
||||
)
|
||||
|
||||
return df, threshold_column_name
|
||||
|
||||
def _increment_total_eligibility_exceeded(
|
||||
self, columns_for_subset: list, skip_fips: tuple = ()
|
||||
) -> None:
|
||||
"""
|
||||
Increments the total eligible factors for a given tract
|
||||
|
||||
The new skip_fips argument specifies which (if any) fips codes to
|
||||
skip over for incrementing.
|
||||
This allows us to essentially skip data we think is of limited veracity,
|
||||
without overriding any values in the data.
|
||||
THIS IS A TEMPORARY FIX.
|
||||
"""
|
||||
if skip_fips:
|
||||
self.df[field_names.THRESHOLD_COUNT] += np.where(
|
||||
self.df[field_names.GEOID_TRACT_FIELD].str.startswith(
|
||||
skip_fips
|
||||
),
|
||||
0,
|
||||
self.df[columns_for_subset].sum(axis=1, skipna=True),
|
||||
)
|
||||
else:
|
||||
self.df[field_names.THRESHOLD_COUNT] += self.df[
|
||||
columns_for_subset
|
||||
].sum(axis=1, skipna=True)
|
||||
|
||||
def _climate_factor(self) -> bool:
|
||||
# In Xth percentile or above for FEMA’s Risk Index (Source: FEMA
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and there is low higher ed attendance
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
climate_eligibility_columns = [
|
||||
field_names.EXPECTED_POPULATION_LOSS_RATE_LOW_INCOME_FIELD,
|
||||
field_names.EXPECTED_AGRICULTURE_LOSS_RATE_LOW_INCOME_FIELD,
|
||||
field_names.EXPECTED_BUILDING_LOSS_RATE_LOW_INCOME_FIELD,
|
||||
]
|
||||
|
||||
self.df[
|
||||
field_names.EXPECTED_POPULATION_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
] = (
|
||||
self.df[
|
||||
field_names.EXPECTED_POPULATION_LOSS_RATE_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[
|
||||
field_names.EXPECTED_AGRICULTURAL_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
] = (
|
||||
self.df[
|
||||
field_names.EXPECTED_AGRICULTURE_LOSS_RATE_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.EXPECTED_BUILDING_LOSS_EXCEEDS_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.EXPECTED_BUILDING_LOSS_RATE_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.CLIMATE_THRESHOLD_EXCEEDED] = (
|
||||
self.df[
|
||||
field_names.EXPECTED_POPULATION_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
]
|
||||
| self.df[
|
||||
field_names.EXPECTED_AGRICULTURAL_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
]
|
||||
| self.df[
|
||||
field_names.EXPECTED_BUILDING_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
]
|
||||
)
|
||||
|
||||
self.df[field_names.EXPECTED_POPULATION_LOSS_RATE_LOW_INCOME_FIELD] = (
|
||||
self.df[
|
||||
field_names.EXPECTED_POPULATION_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self.df[field_names.EXPECTED_AGRICULTURE_LOSS_RATE_LOW_INCOME_FIELD] = (
|
||||
self.df[
|
||||
field_names.EXPECTED_AGRICULTURAL_LOSS_EXCEEDS_PCTILE_THRESHOLD
|
||||
]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self.df[field_names.EXPECTED_BUILDING_LOSS_RATE_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.EXPECTED_BUILDING_LOSS_EXCEEDS_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
climate_eligibility_columns,
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[climate_eligibility_columns].any(axis="columns")
|
||||
|
||||
def _energy_factor(self) -> bool:
|
||||
# In Xth percentile or above for DOE’s energy cost burden score (Source: LEAD Score)
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and has low higher ed attendance.
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
energy_eligibility_columns = [
|
||||
field_names.PM25_EXPOSURE_LOW_INCOME_FIELD,
|
||||
field_names.ENERGY_BURDEN_LOW_INCOME_FIELD,
|
||||
]
|
||||
|
||||
self.df[field_names.ENERGY_BURDEN_EXCEEDS_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.ENERGY_BURDEN_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.PM25_EXCEEDS_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.PM25_FIELD + field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.ENERGY_THRESHOLD_EXCEEDED] = (
|
||||
self.df[field_names.ENERGY_BURDEN_EXCEEDS_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.PM25_EXCEEDS_PCTILE_THRESHOLD]
|
||||
)
|
||||
|
||||
self.df[field_names.PM25_EXPOSURE_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.PM25_EXCEEDS_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self.df[field_names.ENERGY_BURDEN_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.ENERGY_BURDEN_EXCEEDS_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
energy_eligibility_columns,
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[energy_eligibility_columns].any(axis="columns")
|
||||
|
||||
def _transportation_factor(self) -> bool:
|
||||
# In Xth percentile or above for diesel particulate matter (Source: EPA National Air Toxics Assessment (NATA)
|
||||
# or
|
||||
# In Xth percentile or above for PM 2.5 (Source: EPA, Office of Air and Radiation (OAR) fusion of model and monitor data)]
|
||||
# or
|
||||
# In Xth percentile or above traffic proximity and volume (Source: 2017 U.S. Department of Transportation (DOT) traffic data
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and has a low percent of higher ed students.
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
transportion_eligibility_columns = [
|
||||
field_names.DIESEL_PARTICULATE_MATTER_LOW_INCOME_FIELD,
|
||||
field_names.TRAFFIC_PROXIMITY_LOW_INCOME_FIELD,
|
||||
]
|
||||
|
||||
self.df[field_names.DIESEL_EXCEEDS_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.DIESEL_FIELD + field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.TRAFFIC_PROXIMITY_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.TRAFFIC_FIELD + field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.TRAFFIC_THRESHOLD_EXCEEDED] = (
|
||||
self.df[field_names.TRAFFIC_PROXIMITY_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.DIESEL_EXCEEDS_PCTILE_THRESHOLD]
|
||||
)
|
||||
|
||||
self.df[field_names.DIESEL_PARTICULATE_MATTER_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.DIESEL_EXCEEDS_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self.df[field_names.TRAFFIC_PROXIMITY_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.TRAFFIC_PROXIMITY_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
transportion_eligibility_columns,
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[transportion_eligibility_columns].any(axis="columns")
|
||||
|
||||
def _housing_factor(self) -> bool:
|
||||
# (
|
||||
# In Xth percentile or above for lead paint (Source: Census's American Community Survey’s
|
||||
# percent of housing units built pre-1960, used as an indicator of potential lead paint exposure in homes)
|
||||
# AND
|
||||
# In Yth percentile or below for Median House Value (Source: Census's American Community Survey)
|
||||
# )
|
||||
# or
|
||||
# In Xth percentile or above for housing cost burden (Source: HUD's Comprehensive Housing Affordability Strategy dataset
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and has a low percent of higher ed students.
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
housing_eligibility_columns = [
|
||||
field_names.LEAD_PAINT_MEDIAN_HOUSE_VALUE_LOW_INCOME_FIELD,
|
||||
field_names.HOUSING_BURDEN_LOW_INCOME_FIELD,
|
||||
]
|
||||
|
||||
self.df[field_names.LEAD_PAINT_PROXY_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.LEAD_PAINT_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
) & (
|
||||
self.df[
|
||||
field_names.MEDIAN_HOUSE_VALUE_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
<= self.MEDIAN_HOUSE_VALUE_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.HOUSING_BURDEN_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.HOUSING_BURDEN_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.HOUSING_THREHSOLD_EXCEEDED] = (
|
||||
self.df[field_names.LEAD_PAINT_PROXY_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.HOUSING_BURDEN_PCTILE_THRESHOLD]
|
||||
)
|
||||
|
||||
# series by series indicators
|
||||
self.df[field_names.LEAD_PAINT_MEDIAN_HOUSE_VALUE_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.LEAD_PAINT_PROXY_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self.df[field_names.HOUSING_BURDEN_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.HOUSING_BURDEN_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
housing_eligibility_columns,
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[housing_eligibility_columns].any(axis="columns")
|
||||
|
||||
def _pollution_factor(self) -> bool:
|
||||
# Proximity to Risk Management Plan sites is > X
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and has a low percent of higher ed students.
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
pollution_eligibility_columns = [
|
||||
field_names.RMP_LOW_INCOME_FIELD,
|
||||
field_names.SUPERFUND_LOW_INCOME_FIELD,
|
||||
field_names.HAZARDOUS_WASTE_LOW_INCOME_FIELD,
|
||||
]
|
||||
|
||||
self.df[field_names.RMP_PCTILE_THRESHOLD] = (
|
||||
self.df[field_names.RMP_FIELD + field_names.PERCENTILE_FIELD_SUFFIX]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.NPL_PCTILE_THRESHOLD] = (
|
||||
self.df[field_names.NPL_FIELD + field_names.PERCENTILE_FIELD_SUFFIX]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.TSDF_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.TSDF_FIELD + field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.POLLUTION_THRESHOLD_EXCEEDED] = (
|
||||
self.df[field_names.RMP_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.NPL_PCTILE_THRESHOLD]
|
||||
) | self.df[field_names.TSDF_PCTILE_THRESHOLD]
|
||||
|
||||
# individual series-by-series
|
||||
self.df[field_names.RMP_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.RMP_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
self.df[field_names.SUPERFUND_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.NPL_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
self.df[field_names.HAZARDOUS_WASTE_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.TSDF_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
pollution_eligibility_columns,
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[pollution_eligibility_columns].any(axis="columns")
|
||||
|
||||
def _water_factor(self) -> bool:
|
||||
# In Xth percentile or above for wastewater discharge (Source: EPA Risk-Screening Environmental Indicators (RSEI) Model)
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and has a low percent of higher ed students
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
self.df[field_names.WASTEWATER_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.WASTEWATER_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
# Straight copy here in case we add additional water fields.
|
||||
self.df[field_names.WATER_THRESHOLD_EXCEEDED] = self.df[
|
||||
field_names.WASTEWATER_PCTILE_THRESHOLD
|
||||
].copy()
|
||||
|
||||
self.df[field_names.WASTEWATER_DISCHARGE_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.WASTEWATER_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
[field_names.WASTEWATER_DISCHARGE_LOW_INCOME_FIELD],
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[field_names.WASTEWATER_DISCHARGE_LOW_INCOME_FIELD]
|
||||
|
||||
def _health_factor(self) -> bool:
|
||||
# In Xth percentile or above for diabetes (Source: CDC Places)
|
||||
# or
|
||||
# In Xth percentile or above for asthma (Source: CDC Places)
|
||||
# or
|
||||
# In Xth percentile or above for heart disease
|
||||
# or
|
||||
# In Xth percentile or above for low life expectancy (Source: CDC Places)
|
||||
# AND
|
||||
# Low income: In Nth percentile or above for percent of block group population
|
||||
# of households where household income is less than or equal to twice the federal
|
||||
# poverty level and has a low percent of higher ed students
|
||||
# Source: Census's American Community Survey
|
||||
|
||||
health_eligibility_columns = [
|
||||
field_names.DIABETES_LOW_INCOME_FIELD,
|
||||
field_names.ASTHMA_LOW_INCOME_FIELD,
|
||||
field_names.HEART_DISEASE_LOW_INCOME_FIELD,
|
||||
field_names.LOW_LIFE_EXPECTANCY_LOW_INCOME_FIELD,
|
||||
]
|
||||
|
||||
self.df[field_names.DIABETES_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.DIABETES_FIELD + field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.ASTHMA_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.ASTHMA_FIELD + field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.HEART_DISEASE_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.HEART_DISEASE_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.LOW_LIFE_EXPECTANCY_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.LOW_LIFE_EXPECTANCY_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.HEALTH_THRESHOLD_EXCEEDED] = (
|
||||
(
|
||||
self.df[field_names.DIABETES_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.ASTHMA_PCTILE_THRESHOLD]
|
||||
)
|
||||
| self.df[field_names.HEART_DISEASE_PCTILE_THRESHOLD]
|
||||
) | self.df[field_names.LOW_LIFE_EXPECTANCY_PCTILE_THRESHOLD]
|
||||
|
||||
self.df[field_names.DIABETES_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.DIABETES_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
self.df[field_names.ASTHMA_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.ASTHMA_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
self.df[field_names.HEART_DISEASE_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.HEART_DISEASE_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
self.df[field_names.LOW_LIFE_EXPECTANCY_LOW_INCOME_FIELD] = (
|
||||
self.df[field_names.LOW_LIFE_EXPECTANCY_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED]
|
||||
)
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
health_eligibility_columns,
|
||||
skip_fips=constants.DROP_FIPS_FROM_NON_WTD_THRESHOLDS,
|
||||
)
|
||||
|
||||
return self.df[health_eligibility_columns].any(axis="columns")
|
||||
|
||||
def _workforce_factor(self) -> bool:
|
||||
# Where unemployment is above Xth percentile
|
||||
# or
|
||||
# Where median income as a percent of area median income is above Xth percentile
|
||||
# or
|
||||
# Where the percent of households at or below 100% of the federal poverty level
|
||||
# is above Xth percentile
|
||||
# or
|
||||
# Where linguistic isolation is above Xth percentile
|
||||
# AND
|
||||
# Where the high school degree achievement rates for adults 25 years and older
|
||||
# is less than Y%
|
||||
# AND the higher ed attendance rates are under Z%
|
||||
# (necessary to screen out university tracts)
|
||||
|
||||
# Workforce criteria for states fields.
|
||||
workforce_eligibility_columns = [
|
||||
field_names.UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD,
|
||||
field_names.POVERTY_LOW_HS_EDUCATION_FIELD,
|
||||
field_names.LINGUISTIC_ISOLATION_LOW_HS_EDUCATION_FIELD,
|
||||
field_names.LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD,
|
||||
]
|
||||
|
||||
self.df[field_names.LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[field_names.HIGH_SCHOOL_ED_FIELD]
|
||||
>= self.LACK_OF_HIGH_SCHOOL_MINIMUM_THRESHOLD
|
||||
)
|
||||
self.df[field_names.UNEMPLOYMENT_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.UNEMPLOYMENT_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.LOW_MEDIAN_INCOME_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.LINGUISTIC_ISOLATION_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.LINGUISTIC_ISO_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.POVERTY_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.POVERTY_LESS_THAN_100_FPL_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.LINGUISTIC_ISOLATION_LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[field_names.LINGUISTIC_ISOLATION_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
self.df[field_names.POVERTY_LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[field_names.POVERTY_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
self.df[field_names.LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[field_names.LOW_MEDIAN_INCOME_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
self.df[field_names.UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[field_names.UNEMPLOYMENT_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
workforce_combined_criteria_for_states = self.df[
|
||||
workforce_eligibility_columns
|
||||
].any(axis="columns")
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
workforce_eligibility_columns
|
||||
)
|
||||
|
||||
# Now, calculate workforce criteria for island territories.
|
||||
island_areas_workforce_eligibility_columns = [
|
||||
field_names.ISLAND_AREAS_UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD,
|
||||
field_names.ISLAND_AREAS_POVERTY_LOW_HS_EDUCATION_FIELD,
|
||||
field_names.ISLAND_AREAS_LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD,
|
||||
]
|
||||
|
||||
# First, combine unemployment.
|
||||
# This will include an adjusted percentile column for the island areas
|
||||
# to be used by the front end.
|
||||
(
|
||||
self.df,
|
||||
island_areas_unemployment_criteria_field_name,
|
||||
) = self._combine_island_areas_with_states_and_set_thresholds(
|
||||
df=self.df,
|
||||
column_from_island_areas=field_names.CENSUS_DECENNIAL_UNEMPLOYMENT_FIELD_2009,
|
||||
column_from_decennial_census=field_names.CENSUS_UNEMPLOYMENT_FIELD_2010,
|
||||
combined_column_name=field_names.COMBINED_UNEMPLOYMENT_2010,
|
||||
threshold_cutoff_for_island_areas=self.ENVIRONMENTAL_BURDEN_THRESHOLD,
|
||||
)
|
||||
|
||||
# TODO: Remove this, it's for checking only
|
||||
assert (
|
||||
island_areas_unemployment_criteria_field_name
|
||||
== field_names.ISLAND_UNEMPLOYMENT_PCTILE_THRESHOLD
|
||||
), "Error combining island columns"
|
||||
|
||||
# Next, combine poverty.
|
||||
# This will include an adjusted percentile column for the island areas
|
||||
# to be used by the front end.
|
||||
(
|
||||
self.df,
|
||||
island_areas_poverty_criteria_field_name,
|
||||
) = self._combine_island_areas_with_states_and_set_thresholds(
|
||||
df=self.df,
|
||||
column_from_island_areas=field_names.CENSUS_DECENNIAL_POVERTY_LESS_THAN_100_FPL_FIELD_2009,
|
||||
column_from_decennial_census=field_names.CENSUS_POVERTY_LESS_THAN_100_FPL_FIELD_2010,
|
||||
combined_column_name=field_names.COMBINED_POVERTY_LESS_THAN_100_FPL_FIELD_2010,
|
||||
threshold_cutoff_for_island_areas=self.ENVIRONMENTAL_BURDEN_THRESHOLD,
|
||||
)
|
||||
|
||||
# TODO: Remove this, it's for checking only
|
||||
assert (
|
||||
island_areas_poverty_criteria_field_name
|
||||
== field_names.ISLAND_POVERTY_PCTILE_THRESHOLD
|
||||
), "Error combining island columns"
|
||||
|
||||
# Also check whether low area median income is 90th percentile or higher
|
||||
# within the islands.
|
||||
|
||||
# Note that because the field for low median does not have to be combined,
|
||||
# unlike the other fields, we do not need to create a new percentile
|
||||
# column. This code should probably be refactored when (TODO) we do the big
|
||||
# refactor.
|
||||
self.df[field_names.ISLAND_LOW_MEDIAN_INCOME_PCTILE_THRESHOLD] = (
|
||||
self.df[
|
||||
field_names.LOW_CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.ENVIRONMENTAL_BURDEN_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.ISLAND_AREAS_LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[field_names.CENSUS_DECENNIAL_HIGH_SCHOOL_ED_FIELD_2009]
|
||||
>= self.LACK_OF_HIGH_SCHOOL_MINIMUM_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[
|
||||
field_names.ISLAND_AREAS_UNEMPLOYMENT_LOW_HS_EDUCATION_FIELD
|
||||
] = (
|
||||
self.df[island_areas_unemployment_criteria_field_name]
|
||||
& self.df[field_names.ISLAND_AREAS_LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
self.df[field_names.ISLAND_AREAS_POVERTY_LOW_HS_EDUCATION_FIELD] = (
|
||||
self.df[island_areas_poverty_criteria_field_name]
|
||||
& self.df[field_names.ISLAND_AREAS_LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
self.df[
|
||||
field_names.ISLAND_AREAS_LOW_MEDIAN_INCOME_LOW_HS_EDUCATION_FIELD
|
||||
] = (
|
||||
self.df[field_names.ISLAND_LOW_MEDIAN_INCOME_PCTILE_THRESHOLD]
|
||||
& self.df[field_names.ISLAND_AREAS_LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
workforce_combined_criteria_for_island_areas = self.df[
|
||||
island_areas_workforce_eligibility_columns
|
||||
].any(axis="columns")
|
||||
|
||||
self._increment_total_eligibility_exceeded(
|
||||
island_areas_workforce_eligibility_columns
|
||||
)
|
||||
|
||||
percent_of_island_tracts_highlighted = (
|
||||
100
|
||||
* workforce_combined_criteria_for_island_areas.sum()
|
||||
# Choosing a random column from island areas to calculate the denominator.
|
||||
/ self.df[field_names.CENSUS_DECENNIAL_UNEMPLOYMENT_FIELD_2009]
|
||||
.notnull()
|
||||
.sum()
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"For workforce criteria in island areas, "
|
||||
f"{workforce_combined_criteria_for_island_areas.sum()} ("
|
||||
f"{percent_of_island_tracts_highlighted:.2f}% of tracts that have non-null data "
|
||||
f"in the column) have a value of TRUE."
|
||||
)
|
||||
|
||||
# Because these criteria are calculated differently for the islands, we also calculate the
|
||||
# thresholds to pass to the FE slightly differently
|
||||
|
||||
self.df[field_names.WORKFORCE_THRESHOLD_EXCEEDED] = (
|
||||
## First we calculate for the non-island areas
|
||||
(
|
||||
(
|
||||
self.df[field_names.POVERTY_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.LINGUISTIC_ISOLATION_PCTILE_THRESHOLD]
|
||||
)
|
||||
| self.df[field_names.LOW_MEDIAN_INCOME_PCTILE_THRESHOLD]
|
||||
)
|
||||
| self.df[field_names.UNEMPLOYMENT_PCTILE_THRESHOLD]
|
||||
) | (
|
||||
## then we calculate just for the island areas
|
||||
(
|
||||
self.df[field_names.ISLAND_UNEMPLOYMENT_PCTILE_THRESHOLD]
|
||||
| self.df[field_names.ISLAND_POVERTY_PCTILE_THRESHOLD]
|
||||
)
|
||||
| self.df[field_names.ISLAND_LOW_MEDIAN_INCOME_PCTILE_THRESHOLD]
|
||||
)
|
||||
|
||||
# Because of the island complications, we also have to separately calculate the threshold for
|
||||
# socioeconomic thresholds
|
||||
self.df[field_names.WORKFORCE_SOCIO_INDICATORS_EXCEEDED] = (
|
||||
self.df[field_names.ISLAND_AREAS_LOW_HS_EDUCATION_FIELD]
|
||||
| self.df[field_names.LOW_HS_EDUCATION_FIELD]
|
||||
)
|
||||
|
||||
# A tract is included if it meets either the states tract criteria or the
|
||||
# island areas tract criteria.
|
||||
return (
|
||||
workforce_combined_criteria_for_states
|
||||
| workforce_combined_criteria_for_island_areas
|
||||
)
|
||||
|
||||
def add_columns(self) -> pd.DataFrame:
|
||||
logger.info("Adding Score M")
|
||||
|
||||
self.df[field_names.THRESHOLD_COUNT] = 0
|
||||
|
||||
# TODO: move this inside of
|
||||
# `_create_low_income_and_low_college_attendance_threshold`
|
||||
# and change the return signature of that method.
|
||||
# Create a standalone field that captures the college attendance boolean
|
||||
# threshold.
|
||||
self.df[field_names.FPL_200_SERIES_IMPUTED_AND_ADJUSTED] = (
|
||||
self.df[
|
||||
# UPDATE: Pull the imputed poverty statistic
|
||||
field_names.POVERTY_LESS_THAN_200_FPL_IMPUTED_FIELD
|
||||
+ field_names.PERCENTILE_FIELD_SUFFIX
|
||||
]
|
||||
>= self.LOW_INCOME_THRESHOLD
|
||||
)
|
||||
|
||||
self.df[field_names.N_CLIMATE] = self._climate_factor()
|
||||
self.df[field_names.N_ENERGY] = self._energy_factor()
|
||||
self.df[field_names.N_TRANSPORTATION] = self._transportation_factor()
|
||||
self.df[field_names.N_HOUSING] = self._housing_factor()
|
||||
self.df[field_names.N_POLLUTION] = self._pollution_factor()
|
||||
self.df[field_names.N_WATER] = self._water_factor()
|
||||
self.df[field_names.N_HEALTH] = self._health_factor()
|
||||
self.df[field_names.N_WORKFORCE] = self._workforce_factor()
|
||||
|
||||
factors = [
|
||||
field_names.N_CLIMATE,
|
||||
field_names.N_ENERGY,
|
||||
field_names.N_TRANSPORTATION,
|
||||
field_names.N_HOUSING,
|
||||
field_names.N_POLLUTION,
|
||||
field_names.N_WATER,
|
||||
field_names.N_HEALTH,
|
||||
field_names.N_WORKFORCE,
|
||||
]
|
||||
self.df[field_names.CATEGORY_COUNT] = self.df[factors].sum(axis=1)
|
||||
self.df[field_names.SCORE_N_COMMUNITIES] = self.df[factors].any(axis=1)
|
||||
|
||||
return self.df
|
|
@ -10,6 +10,7 @@ from data_pipeline.score.score_i import ScoreI
|
|||
from data_pipeline.score.score_k import ScoreK
|
||||
from data_pipeline.score.score_l import ScoreL
|
||||
from data_pipeline.score.score_m import ScoreM
|
||||
from data_pipeline.score.score_narwhal import ScoreNarwhal
|
||||
from data_pipeline.score import field_names
|
||||
|
||||
from data_pipeline.utils import get_module_logger
|
||||
|
@ -35,6 +36,7 @@ class ScoreRunner:
|
|||
self.df = ScoreK(df=self.df).add_columns()
|
||||
self.df = ScoreL(df=self.df).add_columns()
|
||||
self.df = ScoreM(df=self.df).add_columns()
|
||||
self.df = ScoreNarwhal(df=self.df).add_columns()
|
||||
|
||||
# TODO do this with each score instead of in a bundle
|
||||
# Create percentiles for these index scores
|
||||
|
|
471
data/data-pipeline/poetry.lock
generated
471
data/data-pipeline/poetry.lock
generated
|
@ -11,7 +11,7 @@ textwrap3 = ">=0.9.2"
|
|||
|
||||
[[package]]
|
||||
name = "appnope"
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
description = "Disable App Nap on macOS >= 10.9"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -50,7 +50,7 @@ tests = ["pytest"]
|
|||
|
||||
[[package]]
|
||||
name = "astroid"
|
||||
version = "2.11.2"
|
||||
version = "2.11.3"
|
||||
description = "An abstract syntax tree for Python with inference support."
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -93,11 +93,11 @@ python-versions = "*"
|
|||
|
||||
[[package]]
|
||||
name = "beautifulsoup4"
|
||||
version = "4.10.0"
|
||||
version = "4.11.1"
|
||||
description = "Screen-scraping library"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">3.0.0"
|
||||
python-versions = ">=3.6.0"
|
||||
|
||||
[package.dependencies]
|
||||
soupsieve = ">1.2"
|
||||
|
@ -134,17 +134,20 @@ uvloop = ["uvloop (>=0.15.2)"]
|
|||
|
||||
[[package]]
|
||||
name = "bleach"
|
||||
version = "4.1.0"
|
||||
version = "5.0.0"
|
||||
description = "An easy safelist-based HTML-sanitizing tool."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
packaging = "*"
|
||||
six = ">=1.9.0"
|
||||
webencodings = "*"
|
||||
|
||||
[package.extras]
|
||||
css = ["tinycss2 (>=1.1.0)"]
|
||||
dev = ["pip-tools (==6.5.1)", "pytest (==7.1.1)", "flake8 (==4.0.1)", "tox (==3.24.5)", "sphinx (==4.3.2)", "twine (==4.0.0)", "wheel (==0.37.1)", "hashin (==0.17.0)", "black (==22.3.0)", "mypy (==0.942)"]
|
||||
|
||||
[[package]]
|
||||
name = "censusdata"
|
||||
version = "1.15.post1"
|
||||
|
@ -315,7 +318,7 @@ pipenv = ["pipenv"]
|
|||
|
||||
[[package]]
|
||||
name = "dynaconf"
|
||||
version = "3.1.7"
|
||||
version = "3.1.8"
|
||||
description = "The dynamic configurator for your Python Project"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -326,6 +329,7 @@ all = ["redis", "ruamel.yaml", "configobj", "hvac"]
|
|||
configobj = ["configobj"]
|
||||
ini = ["configobj"]
|
||||
redis = ["redis"]
|
||||
test = ["pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "flake8", "pep8-naming", "flake8-debugger", "flake8-print", "flake8-todo", "radon", "flask (>=0.12)", "django", "python-dotenv", "toml", "codecov", "redis", "hvac", "configobj"]
|
||||
toml = ["toml"]
|
||||
vault = ["hvac"]
|
||||
yaml = ["ruamel.yaml"]
|
||||
|
@ -346,6 +350,17 @@ category = "dev"
|
|||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "fastjsonschema"
|
||||
version = "2.15.3"
|
||||
description = "Fastest Python implementation of JSON schema"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.extras]
|
||||
devel = ["colorama", "jsonschema", "json-spec", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]
|
||||
|
||||
[[package]]
|
||||
name = "filelock"
|
||||
version = "3.6.0"
|
||||
|
@ -396,19 +411,20 @@ pyflakes = ">=2.3.0,<2.4.0"
|
|||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.31.2"
|
||||
version = "4.33.3"
|
||||
description = "Tools to manipulate font files"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
all = ["fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "zopfli (>=0.1.4)", "lz4 (>=1.7.4.2)", "matplotlib", "sympy", "skia-pathops (>=0.5.0)", "brotlicffi (>=0.8.0)", "scipy", "brotli (>=1.0.1)", "munkres", "unicodedata2 (>=14.0.0)", "xattr"]
|
||||
all = ["fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "zopfli (>=0.1.4)", "lz4 (>=1.7.4.2)", "matplotlib", "sympy", "skia-pathops (>=0.5.0)", "uharfbuzz (>=0.23.0)", "brotlicffi (>=0.8.0)", "scipy", "brotli (>=1.0.1)", "munkres", "unicodedata2 (>=14.0.0)", "xattr"]
|
||||
graphite = ["lz4 (>=1.7.4.2)"]
|
||||
interpolatable = ["scipy", "munkres"]
|
||||
lxml = ["lxml (>=4.0,<5)"]
|
||||
pathops = ["skia-pathops (>=0.5.0)"]
|
||||
plot = ["matplotlib"]
|
||||
repacker = ["uharfbuzz (>=0.23.0)"]
|
||||
symfont = ["sympy"]
|
||||
type1 = ["xattr"]
|
||||
ufo = ["fs (>=2.2.0,<3)"]
|
||||
|
@ -439,7 +455,7 @@ python-versions = ">=3.5"
|
|||
|
||||
[[package]]
|
||||
name = "importlib-resources"
|
||||
version = "5.6.0"
|
||||
version = "5.7.1"
|
||||
description = "Read resources from Python packages"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -475,7 +491,7 @@ toml = {version = ">=0.10.2", markers = "python_version > \"3.6\""}
|
|||
|
||||
[[package]]
|
||||
name = "ipykernel"
|
||||
version = "6.10.0"
|
||||
version = "6.13.0"
|
||||
description = "IPython Kernel for Jupyter"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -483,17 +499,18 @@ python-versions = ">=3.7"
|
|||
|
||||
[package.dependencies]
|
||||
appnope = {version = "*", markers = "platform_system == \"Darwin\""}
|
||||
debugpy = ">=1.0.0,<2.0"
|
||||
debugpy = ">=1.0"
|
||||
ipython = ">=7.23.1"
|
||||
jupyter-client = "<8.0"
|
||||
matplotlib-inline = ">=0.1.0,<0.2.0"
|
||||
jupyter-client = ">=6.1.12"
|
||||
matplotlib-inline = ">=0.1"
|
||||
nest-asyncio = "*"
|
||||
packaging = "*"
|
||||
psutil = "*"
|
||||
tornado = ">=5.0,<7.0"
|
||||
traitlets = ">=5.1.0,<6.0"
|
||||
tornado = ">=6.1"
|
||||
traitlets = ">=5.1.0"
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest (!=5.3.4)", "pytest-cov", "flaky", "ipyparallel"]
|
||||
test = ["pytest (>=6.0)", "pytest-cov", "flaky", "ipyparallel", "pre-commit", "pytest-timeout"]
|
||||
|
||||
[[package]]
|
||||
name = "ipython"
|
||||
|
@ -641,7 +658,7 @@ qtconsole = "*"
|
|||
|
||||
[[package]]
|
||||
name = "jupyter-client"
|
||||
version = "7.2.1"
|
||||
version = "7.3.0"
|
||||
description = "Jupyter protocol implementation and client libraries"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -722,16 +739,19 @@ test = ["nbformat", "nose", "pip", "requests", "mock"]
|
|||
|
||||
[[package]]
|
||||
name = "jupyter-core"
|
||||
version = "4.9.2"
|
||||
version = "4.10.0"
|
||||
description = "Jupyter core package. A base package on which Jupyter projects rely."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
pywin32 = {version = ">=1.0", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""}
|
||||
traitlets = "*"
|
||||
|
||||
[package.extras]
|
||||
test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"]
|
||||
|
||||
[[package]]
|
||||
name = "jupyter-highlight-selected-word"
|
||||
version = "0.2.0"
|
||||
|
@ -776,14 +796,11 @@ test = ["jupyter-contrib-core", "nose", "requests", "selenium", "mock"]
|
|||
|
||||
[[package]]
|
||||
name = "jupyterlab-pygments"
|
||||
version = "0.1.2"
|
||||
version = "0.2.2"
|
||||
description = "Pygments theme using JupyterLab CSS variables"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
pygments = ">=2.4.1,<3"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[[package]]
|
||||
name = "jupyterlab-widgets"
|
||||
|
@ -863,7 +880,7 @@ tests = ["pytest", "pytz", "simplejson"]
|
|||
|
||||
[[package]]
|
||||
name = "marshmallow-dataclass"
|
||||
version = "8.5.3"
|
||||
version = "8.5.7"
|
||||
description = "Python library to convert dataclasses into marshmallow schemas."
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -874,11 +891,11 @@ marshmallow = ">=3.13.0,<4.0"
|
|||
typing-inspect = ">=0.7.1"
|
||||
|
||||
[package.extras]
|
||||
dev = ["marshmallow-enum", "typeguard", "pre-commit (>=1.18,<2.0)", "sphinx", "pytest (>=5.4)", "pytest-mypy-plugins (>=1.2.0)", "typing-extensions (>=3.7.2,<3.8.0)"]
|
||||
dev = ["marshmallow-enum", "typeguard", "pre-commit (>=2.17,<3.0)", "sphinx", "pytest (>=5.4)", "pytest-mypy-plugins (>=1.2.0)", "typing-extensions (>=3.7.2)"]
|
||||
docs = ["sphinx"]
|
||||
enum = ["marshmallow-enum"]
|
||||
lint = ["pre-commit (>=1.18,<2.0)"]
|
||||
tests = ["pytest (>=5.4)", "pytest-mypy-plugins (>=1.2.0)", "typing-extensions (>=3.7.2,<3.8.0)"]
|
||||
lint = ["pre-commit (>=2.17,<3.0)"]
|
||||
tests = ["pytest (>=5.4)", "pytest-mypy-plugins (>=1.2.0)", "typing-extensions (>=3.7.2)"]
|
||||
union = ["typeguard"]
|
||||
|
||||
[[package]]
|
||||
|
@ -991,7 +1008,7 @@ ipython = "*"
|
|||
|
||||
[[package]]
|
||||
name = "nbclient"
|
||||
version = "0.5.13"
|
||||
version = "0.6.0"
|
||||
description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor."
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1004,12 +1021,12 @@ nest-asyncio = "*"
|
|||
traitlets = ">=5.0.0"
|
||||
|
||||
[package.extras]
|
||||
sphinx = ["Sphinx (>=1.7)", "sphinx-book-theme", "mock", "moto", "myst-parser"]
|
||||
test = ["ipython (<8.0.0)", "ipykernel", "ipywidgets (<8.0.0)", "pytest (>=4.1)", "pytest-asyncio", "pytest-cov (>=2.6.1)", "check-manifest", "flake8", "mypy", "xmltodict", "black", "pip (>=18.1)", "wheel (>=0.31.0)", "setuptools (>=38.6.0)", "twine (>=1.11.0)"]
|
||||
sphinx = ["mock", "moto", "myst-parser", "Sphinx (>=1.7)", "sphinx-book-theme"]
|
||||
test = ["black", "check-manifest", "flake8", "ipykernel", "ipython (<8.0.0)", "ipywidgets (<8.0.0)", "mypy", "pip (>=18.1)", "pre-commit", "pytest (>=4.1)", "pytest-asyncio", "pytest-cov (>=2.6.1)", "setuptools (>=60.0)", "testpath", "twine (>=1.11.0)", "xmltodict"]
|
||||
|
||||
[[package]]
|
||||
name = "nbconvert"
|
||||
version = "6.4.5"
|
||||
version = "6.5.0"
|
||||
description = "Converting Jupyter Notebooks"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1020,45 +1037,46 @@ beautifulsoup4 = "*"
|
|||
bleach = "*"
|
||||
defusedxml = "*"
|
||||
entrypoints = ">=0.2.2"
|
||||
jinja2 = ">=2.4"
|
||||
jupyter-core = "*"
|
||||
jinja2 = ">=3.0"
|
||||
jupyter-core = ">=4.7"
|
||||
jupyterlab-pygments = "*"
|
||||
MarkupSafe = ">=2.0"
|
||||
mistune = ">=0.8.1,<2"
|
||||
nbclient = ">=0.5.0,<0.6.0"
|
||||
nbformat = ">=4.4"
|
||||
nbclient = ">=0.5.0"
|
||||
nbformat = ">=5.1"
|
||||
packaging = "*"
|
||||
pandocfilters = ">=1.4.1"
|
||||
pygments = ">=2.4.1"
|
||||
testpath = "*"
|
||||
tinycss2 = "*"
|
||||
traitlets = ">=5.0"
|
||||
|
||||
[package.extras]
|
||||
all = ["pytest", "pytest-cov", "pytest-dependency", "ipykernel", "ipywidgets (>=7)", "pyppeteer (>=1,<1.1)", "tornado (>=4.0)", "sphinx (>=1.5.1)", "sphinx-rtd-theme", "nbsphinx (>=0.2.12)", "ipython"]
|
||||
all = ["pytest", "pytest-cov", "pytest-dependency", "ipykernel", "ipywidgets (>=7)", "pre-commit", "pyppeteer (>=1,<1.1)", "tornado (>=6.1)", "sphinx (>=1.5.1)", "sphinx-rtd-theme", "nbsphinx (>=0.2.12)", "ipython"]
|
||||
docs = ["sphinx (>=1.5.1)", "sphinx-rtd-theme", "nbsphinx (>=0.2.12)", "ipython"]
|
||||
serve = ["tornado (>=4.0)"]
|
||||
test = ["pytest", "pytest-cov", "pytest-dependency", "ipykernel", "ipywidgets (>=7)", "pyppeteer (>=1,<1.1)"]
|
||||
serve = ["tornado (>=6.1)"]
|
||||
test = ["pytest", "pytest-cov", "pytest-dependency", "ipykernel", "ipywidgets (>=7)", "pre-commit", "pyppeteer (>=1,<1.1)"]
|
||||
webpdf = ["pyppeteer (>=1,<1.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "nbformat"
|
||||
version = "5.2.0"
|
||||
version = "5.3.0"
|
||||
description = "The Jupyter Notebook format"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
jsonschema = ">=2.4,<2.5.0 || >2.5.0"
|
||||
fastjsonschema = "*"
|
||||
jsonschema = ">=2.6"
|
||||
jupyter-core = "*"
|
||||
traitlets = ">=4.1"
|
||||
|
||||
[package.extras]
|
||||
fast = ["fastjsonschema"]
|
||||
test = ["check-manifest", "fastjsonschema", "testpath", "pytest"]
|
||||
test = ["check-manifest", "testpath", "pytest", "pre-commit"]
|
||||
|
||||
[[package]]
|
||||
name = "nest-asyncio"
|
||||
version = "1.5.4"
|
||||
version = "1.5.5"
|
||||
description = "Patch asyncio to allow nested event loops"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1066,11 +1084,11 @@ python-versions = ">=3.5"
|
|||
|
||||
[[package]]
|
||||
name = "notebook"
|
||||
version = "6.4.10"
|
||||
version = "6.4.11"
|
||||
description = "A web-based notebook environment for interactive computing"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
argon2-cffi = "*"
|
||||
|
@ -1092,7 +1110,7 @@ traitlets = ">=4.2.1"
|
|||
[package.extras]
|
||||
docs = ["sphinx", "nbsphinx", "sphinxcontrib-github-alt", "sphinx-rtd-theme", "myst-parser"]
|
||||
json-logging = ["json-logging"]
|
||||
test = ["pytest", "coverage", "requests", "nbval", "selenium", "pytest-cov", "requests-unixsocket"]
|
||||
test = ["pytest", "coverage", "requests", "testpath", "nbval", "selenium", "pytest-cov", "requests-unixsocket"]
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
|
@ -1126,7 +1144,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
|
|||
|
||||
[[package]]
|
||||
name = "pandas"
|
||||
version = "1.4.1"
|
||||
version = "1.4.2"
|
||||
description = "Powerful data structures for data analysis, time series, and statistics"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1244,15 +1262,15 @@ python-versions = ">=3.7"
|
|||
|
||||
[[package]]
|
||||
name = "platformdirs"
|
||||
version = "2.5.1"
|
||||
version = "2.5.2"
|
||||
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"]
|
||||
test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"]
|
||||
docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)", "sphinx (>=4)"]
|
||||
test = ["appdirs (==1.4.4)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)", "pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
|
@ -1268,7 +1286,7 @@ testing = ["pytest", "pytest-benchmark"]
|
|||
|
||||
[[package]]
|
||||
name = "prometheus-client"
|
||||
version = "0.13.1"
|
||||
version = "0.14.1"
|
||||
description = "Python client for the Prometheus monitoring system."
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1279,7 +1297,7 @@ twisted = ["twisted"]
|
|||
|
||||
[[package]]
|
||||
name = "prompt-toolkit"
|
||||
version = "3.0.28"
|
||||
version = "3.0.29"
|
||||
description = "Library for building powerful interactive command lines in Python"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1331,6 +1349,21 @@ category = "main"
|
|||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "1.9.0"
|
||||
description = "Data validation and settings management using python 3.6 type hinting"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=3.7.4.3"
|
||||
|
||||
[package.extras]
|
||||
dotenv = ["python-dotenv (>=0.10.4)"]
|
||||
email = ["email-validator (>=1.0.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyflakes"
|
||||
version = "2.3.1"
|
||||
|
@ -1341,22 +1374,22 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
|||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.11.2"
|
||||
version = "2.12.0"
|
||||
description = "Pygments is a syntax highlighting package written in Python."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "pylint"
|
||||
version = "2.13.3"
|
||||
version = "2.13.7"
|
||||
description = "python code static checker"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6.2"
|
||||
|
||||
[package.dependencies]
|
||||
astroid = ">=2.11.2,<=2.12.0-dev0"
|
||||
astroid = ">=2.11.3,<=2.12.0-dev0"
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
dill = ">=0.2"
|
||||
isort = ">=4.2.5,<6"
|
||||
|
@ -1370,7 +1403,7 @@ testutil = ["gitpython (>3)"]
|
|||
|
||||
[[package]]
|
||||
name = "pypandoc"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Thin wrapper for pandoc."
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1378,18 +1411,18 @@ python-versions = "^2.7 || ^3.6"
|
|||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.0.7"
|
||||
description = "Python parsing module"
|
||||
version = "3.0.8"
|
||||
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.6.8"
|
||||
|
||||
[package.extras]
|
||||
diagrams = ["jinja2", "railroad-diagrams"]
|
||||
diagrams = ["railroad-diagrams", "jinja2"]
|
||||
|
||||
[[package]]
|
||||
name = "pyproj"
|
||||
version = "3.3.0"
|
||||
version = "3.3.1"
|
||||
description = "Python interface to PROJ (cartographic projections and coordinate transformations library)"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1664,7 +1697,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
|
|||
|
||||
[[package]]
|
||||
name = "soupsieve"
|
||||
version = "2.3.1"
|
||||
version = "2.3.2.post1"
|
||||
description = "A modern CSS selector implementation for Beautiful Soup."
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1697,17 +1730,6 @@ tornado = ">=4"
|
|||
[package.extras]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "testpath"
|
||||
version = "0.6.0"
|
||||
description = "Test utilities for code working with files and commands"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">= 3.5"
|
||||
|
||||
[package.extras]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "textwrap3"
|
||||
version = "0.9.2"
|
||||
|
@ -1716,6 +1738,21 @@ category = "dev"
|
|||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "tinycss2"
|
||||
version = "1.1.1"
|
||||
description = "A tiny CSS parser"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
webencodings = ">=0.4"
|
||||
|
||||
[package.extras]
|
||||
doc = ["sphinx", "sphinx-rtd-theme"]
|
||||
test = ["pytest", "pytest-cov", "pytest-flake8", "pytest-isort", "coverage"]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.10.2"
|
||||
|
@ -1742,7 +1779,7 @@ python-versions = ">= 3.5"
|
|||
|
||||
[[package]]
|
||||
name = "tox"
|
||||
version = "3.24.5"
|
||||
version = "3.25.0"
|
||||
description = "tox is a generic virtualenv management and test command line tool"
|
||||
category = "dev"
|
||||
optional = false
|
||||
|
@ -1807,7 +1844,7 @@ test = ["pytest"]
|
|||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.27.15"
|
||||
version = "2.27.22"
|
||||
description = "Typing stubs for requests"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1818,7 +1855,7 @@ types-urllib3 = "<1.27"
|
|||
|
||||
[[package]]
|
||||
name = "types-urllib3"
|
||||
version = "1.26.11"
|
||||
version = "1.26.13"
|
||||
description = "Typing stubs for urllib3"
|
||||
category = "main"
|
||||
optional = false
|
||||
|
@ -1826,11 +1863,11 @@ python-versions = "*"
|
|||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.1.1"
|
||||
description = "Backported and Experimental Type Hints for Python 3.6+"
|
||||
version = "4.2.0"
|
||||
description = "Backported and Experimental Type Hints for Python 3.7+"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[[package]]
|
||||
name = "typing-inspect"
|
||||
|
@ -1870,7 +1907,7 @@ jellyfish = "0.6.1"
|
|||
|
||||
[[package]]
|
||||
name = "virtualenv"
|
||||
version = "20.14.0"
|
||||
version = "20.14.1"
|
||||
description = "Virtual Python Environment builder"
|
||||
category = "dev"
|
||||
optional = false
|
||||
|
@ -1931,20 +1968,20 @@ python-versions = "*"
|
|||
|
||||
[[package]]
|
||||
name = "zipp"
|
||||
version = "3.7.0"
|
||||
version = "3.8.0"
|
||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
|
||||
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
|
||||
docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"]
|
||||
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.8"
|
||||
content-hash = "120a7d23ab8c6bb5f17e226f844627d124e7e3a986d1b7fe72b41ce5b45bbb78"
|
||||
content-hash = "e4462f3e9a5d1cf2449ac9ad0d9ed250a5fda5d03d04e2845e4be3526d943b2b"
|
||||
|
||||
[metadata.files]
|
||||
ansiwrap = [
|
||||
|
@ -1952,8 +1989,8 @@ ansiwrap = [
|
|||
{file = "ansiwrap-0.8.4.zip", hash = "sha256:ca0c740734cde59bf919f8ff2c386f74f9a369818cdc60efe94893d01ea8d9b7"},
|
||||
]
|
||||
appnope = [
|
||||
{file = "appnope-0.1.2-py2.py3-none-any.whl", hash = "sha256:93aa393e9d6c54c5cd570ccadd8edad61ea0c4b9ea7a01409020c9aa019eb442"},
|
||||
{file = "appnope-0.1.2.tar.gz", hash = "sha256:dd83cd4b5b460958838f6eb3000c660b1f9caf2a5b1de4264e941512f603258a"},
|
||||
{file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
|
||||
{file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
|
||||
]
|
||||
argon2-cffi = [
|
||||
{file = "argon2-cffi-21.3.0.tar.gz", hash = "sha256:d384164d944190a7dd7ef22c6aa3ff197da12962bd04b17f64d4e93d934dba5b"},
|
||||
|
@ -1983,8 +2020,8 @@ argon2-cffi-bindings = [
|
|||
{file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"},
|
||||
]
|
||||
astroid = [
|
||||
{file = "astroid-2.11.2-py3-none-any.whl", hash = "sha256:cc8cc0d2d916c42d0a7c476c57550a4557a083081976bf42a73414322a6411d9"},
|
||||
{file = "astroid-2.11.2.tar.gz", hash = "sha256:8d0a30fe6481ce919f56690076eafbb2fb649142a89dc874f1ec0e7a011492d0"},
|
||||
{file = "astroid-2.11.3-py3-none-any.whl", hash = "sha256:f1af57483cd17e963b2eddce8361e00fc593d1520fe19948488e94ff6476bd71"},
|
||||
{file = "astroid-2.11.3.tar.gz", hash = "sha256:4e5ba10571e197785e312966ea5efb2f5783176d4c1a73fa922d474ae2be59f7"},
|
||||
]
|
||||
atomicwrites = [
|
||||
{file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"},
|
||||
|
@ -1999,16 +2036,16 @@ backcall = [
|
|||
{file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
|
||||
]
|
||||
beautifulsoup4 = [
|
||||
{file = "beautifulsoup4-4.10.0-py3-none-any.whl", hash = "sha256:9a315ce70049920ea4572a4055bc4bd700c940521d36fc858205ad4fcde149bf"},
|
||||
{file = "beautifulsoup4-4.10.0.tar.gz", hash = "sha256:c23ad23c521d818955a4151a67d81580319d4bf548d3d49f4223ae041ff98891"},
|
||||
{file = "beautifulsoup4-4.11.1-py3-none-any.whl", hash = "sha256:58d5c3d29f5a36ffeb94f02f0d786cd53014cf9b3b3951d42e0080d8a9498d30"},
|
||||
{file = "beautifulsoup4-4.11.1.tar.gz", hash = "sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693"},
|
||||
]
|
||||
black = [
|
||||
{file = "black-21.12b0-py3-none-any.whl", hash = "sha256:a615e69ae185e08fdd73e4715e260e2479c861b5740057fde6e8b4e3b7dd589f"},
|
||||
{file = "black-21.12b0.tar.gz", hash = "sha256:77b80f693a569e2e527958459634f18df9b0ba2625ba4e0c2d5da5be42e6f2b3"},
|
||||
]
|
||||
bleach = [
|
||||
{file = "bleach-4.1.0-py2.py3-none-any.whl", hash = "sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994"},
|
||||
{file = "bleach-4.1.0.tar.gz", hash = "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da"},
|
||||
{file = "bleach-5.0.0-py3-none-any.whl", hash = "sha256:08a1fe86d253b5c88c92cc3d810fd8048a16d15762e1e5b74d502256e5926aa1"},
|
||||
{file = "bleach-5.0.0.tar.gz", hash = "sha256:c6d6cc054bdc9c83b48b8083e236e5f00f238428666d2ce2e083eaa5fd568565"},
|
||||
]
|
||||
censusdata = [
|
||||
{file = "CensusData-1.15.post1.tar.gz", hash = "sha256:408410b2942e0d2885a18a5b1cff85c283564fe0ae6c8bd65ddccee7e234d4fb"},
|
||||
|
@ -2138,8 +2175,8 @@ dparse = [
|
|||
{file = "dparse-0.5.1.tar.gz", hash = "sha256:a1b5f169102e1c894f9a7d5ccf6f9402a836a5d24be80a986c7ce9eaed78f367"},
|
||||
]
|
||||
dynaconf = [
|
||||
{file = "dynaconf-3.1.7-py2.py3-none-any.whl", hash = "sha256:f52fe5db7622da56a552275e8f64e4df46e3b4ae11158831b042e8ba2f6d1c96"},
|
||||
{file = "dynaconf-3.1.7.tar.gz", hash = "sha256:e9d80b46ba4d9372f2f40c812594c963f74178140c0b596e57f2881001fc4d35"},
|
||||
{file = "dynaconf-3.1.8-py2.py3-none-any.whl", hash = "sha256:dea41800cf4eef488f49d3b5d1ff6305b85c3c21538f4dcfc39ab34d29606d28"},
|
||||
{file = "dynaconf-3.1.8.tar.gz", hash = "sha256:d141a6664fca3648d2d8e84440966af9f58c4f4201ca78353a3f595a67c19ab4"},
|
||||
]
|
||||
entrypoints = [
|
||||
{file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"},
|
||||
|
@ -2149,6 +2186,10 @@ et-xmlfile = [
|
|||
{file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"},
|
||||
{file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"},
|
||||
]
|
||||
fastjsonschema = [
|
||||
{file = "fastjsonschema-2.15.3-py3-none-any.whl", hash = "sha256:ddb0b1d8243e6e3abb822bd14e447a89f4ab7439342912d590444831fa00b6a0"},
|
||||
{file = "fastjsonschema-2.15.3.tar.gz", hash = "sha256:0a572f0836962d844c1fc435e200b2e4f4677e4e6611a2e3bdd01ba697c275ec"},
|
||||
]
|
||||
filelock = [
|
||||
{file = "filelock-3.6.0-py3-none-any.whl", hash = "sha256:f8314284bfffbdcfa0ff3d7992b023d4c628ced6feb957351d4c48d059f56bc0"},
|
||||
{file = "filelock-3.6.0.tar.gz", hash = "sha256:9cd540a9352e432c7246a48fe4e8712b10acb1df2ad1f30e8c070b82ae1fed85"},
|
||||
|
@ -2171,8 +2212,8 @@ flake8 = [
|
|||
{file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"},
|
||||
]
|
||||
fonttools = [
|
||||
{file = "fonttools-4.31.2-py3-none-any.whl", hash = "sha256:2df636a3f402ef14593c6811dac0609563b8c374bd7850e76919eb51ea205426"},
|
||||
{file = "fonttools-4.31.2.zip", hash = "sha256:236b29aee6b113e8f7bee28779c1230a86ad2aac9a74a31b0aedf57e7dfb62a4"},
|
||||
{file = "fonttools-4.33.3-py3-none-any.whl", hash = "sha256:f829c579a8678fa939a1d9e9894d01941db869de44390adb49ce67055a06cc2a"},
|
||||
{file = "fonttools-4.33.3.zip", hash = "sha256:c0fdcfa8ceebd7c1b2021240bd46ef77aa8e7408cf10434be55df52384865f8e"},
|
||||
]
|
||||
geopandas = [
|
||||
{file = "geopandas-0.9.0-py2.py3-none-any.whl", hash = "sha256:79f6e557ba0dba76eec44f8351b1c6b42a17c38f5f08fef347e98fe4dae563c7"},
|
||||
|
@ -2183,8 +2224,8 @@ idna = [
|
|||
{file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
|
||||
]
|
||||
importlib-resources = [
|
||||
{file = "importlib_resources-5.6.0-py3-none-any.whl", hash = "sha256:a9dd72f6cc106aeb50f6e66b86b69b454766dd6e39b69ac68450253058706bcc"},
|
||||
{file = "importlib_resources-5.6.0.tar.gz", hash = "sha256:1b93238cbf23b4cde34240dd8321d99e9bf2eb4bc91c0c99b2886283e7baad85"},
|
||||
{file = "importlib_resources-5.7.1-py3-none-any.whl", hash = "sha256:e447dc01619b1e951286f3929be820029d48c75eb25d265c28b92a16548212b8"},
|
||||
{file = "importlib_resources-5.7.1.tar.gz", hash = "sha256:b6062987dfc51f0fcb809187cffbd60f35df7acb4589091f154214af6d0d49d3"},
|
||||
]
|
||||
iniconfig = [
|
||||
{file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"},
|
||||
|
@ -2194,8 +2235,8 @@ ipdb = [
|
|||
{file = "ipdb-0.13.9.tar.gz", hash = "sha256:951bd9a64731c444fd907a5ce268543020086a697f6be08f7cc2c9a752a278c5"},
|
||||
]
|
||||
ipykernel = [
|
||||
{file = "ipykernel-6.10.0-py3-none-any.whl", hash = "sha256:86ebe63f58cb68f299e06a2add4957df0eaebc7b0864de5711accd9c532d7810"},
|
||||
{file = "ipykernel-6.10.0.tar.gz", hash = "sha256:d1c7d92daea5d9b55a33e523d4d17c09ad38e0df17a4e0ed2fa5c97f07f200ba"},
|
||||
{file = "ipykernel-6.13.0-py3-none-any.whl", hash = "sha256:2b0987af43c0d4b62cecb13c592755f599f96f29aafe36c01731aaa96df30d39"},
|
||||
{file = "ipykernel-6.13.0.tar.gz", hash = "sha256:0e28273e290858393e86e152b104e5506a79c13d25b951ac6eca220051b4be60"},
|
||||
]
|
||||
ipython = [
|
||||
{file = "ipython-7.32.0-py3-none-any.whl", hash = "sha256:86df2cf291c6c70b5be6a7b608650420e89180c8ec74f376a34e2dc15c3400e7"},
|
||||
|
@ -2234,8 +2275,8 @@ jupyter = [
|
|||
{file = "jupyter-1.0.0.zip", hash = "sha256:3e1f86076bbb7c8c207829390305a2b1fe836d471ed54be66a3b8c41e7f46cc7"},
|
||||
]
|
||||
jupyter-client = [
|
||||
{file = "jupyter_client-7.2.1-py3-none-any.whl", hash = "sha256:d10e31ac4b8364d1cb30ebcee9e5cc7b7eb5d23b76912be9ef3d4c75167fbc68"},
|
||||
{file = "jupyter_client-7.2.1.tar.gz", hash = "sha256:aa177279e93205d0681ec0e2e210da01b22c5a1464a56abd455adcac64f0de91"},
|
||||
{file = "jupyter_client-7.3.0-py3-none-any.whl", hash = "sha256:671dd2d90d03f41716b09627a4eb06bb37875f92bf6563cc2ce4fe71c61c5cda"},
|
||||
{file = "jupyter_client-7.3.0.tar.gz", hash = "sha256:3bcc8e08a294d0fa9406e48cfe17e11ef0efdb7c504fe8cc335128e3ef8f3dac"},
|
||||
]
|
||||
jupyter-console = [
|
||||
{file = "jupyter_console-6.4.3-py3-none-any.whl", hash = "sha256:e630bcb682c0088dda45688ad7c2424d4a825c8acf494cb036ced03ed0424841"},
|
||||
|
@ -2250,8 +2291,8 @@ jupyter-contrib-nbextensions = [
|
|||
{file = "jupyter_contrib_nbextensions-0.5.1.tar.gz", hash = "sha256:eecd28ecc2fc410226c0a3d4932ed2fac4860ccf8d9e9b1b29548835a35b22ab"},
|
||||
]
|
||||
jupyter-core = [
|
||||
{file = "jupyter_core-4.9.2-py3-none-any.whl", hash = "sha256:f875e4d27e202590311d468fa55f90c575f201490bd0c18acabe4e318db4a46d"},
|
||||
{file = "jupyter_core-4.9.2.tar.gz", hash = "sha256:d69baeb9ffb128b8cd2657fcf2703f89c769d1673c851812119e3a2a0e93ad9a"},
|
||||
{file = "jupyter_core-4.10.0-py3-none-any.whl", hash = "sha256:e7f5212177af7ab34179690140f188aa9bf3d322d8155ed972cbded19f55b6f3"},
|
||||
{file = "jupyter_core-4.10.0.tar.gz", hash = "sha256:a6de44b16b7b31d7271130c71a6792c4040f077011961138afed5e5e73181aec"},
|
||||
]
|
||||
jupyter-highlight-selected-word = [
|
||||
{file = "jupyter_highlight_selected_word-0.2.0-py2.py3-none-any.whl", hash = "sha256:9545dfa9cb057eebe3a5795604dcd3a5294ea18637e553f61a0b67c1b5903c58"},
|
||||
|
@ -2264,8 +2305,8 @@ jupyter-nbextensions-configurator = [
|
|||
{file = "jupyter_nbextensions_configurator-0.4.1.tar.gz", hash = "sha256:e5e86b5d9d898e1ffb30ebb08e4ad8696999f798fef3ff3262d7b999076e4e83"},
|
||||
]
|
||||
jupyterlab-pygments = [
|
||||
{file = "jupyterlab_pygments-0.1.2-py2.py3-none-any.whl", hash = "sha256:abfb880fd1561987efaefcb2d2ac75145d2a5d0139b1876d5be806e32f630008"},
|
||||
{file = "jupyterlab_pygments-0.1.2.tar.gz", hash = "sha256:cfcda0873626150932f438eccf0f8bf22bfa92345b814890ab360d666b254146"},
|
||||
{file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"},
|
||||
{file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"},
|
||||
]
|
||||
jupyterlab-widgets = [
|
||||
{file = "jupyterlab_widgets-1.1.0-py3-none-any.whl", hash = "sha256:c2a9bd3789f120f64d73268c066ed3b000c56bc1dda217be5cdc43e7b4ebad3f"},
|
||||
|
@ -2469,8 +2510,8 @@ marshmallow = [
|
|||
{file = "marshmallow-3.15.0.tar.gz", hash = "sha256:2aaaab4f01ef4f5a011a21319af9fce17ab13bf28a026d1252adab0e035648d5"},
|
||||
]
|
||||
marshmallow-dataclass = [
|
||||
{file = "marshmallow_dataclass-8.5.3-py3-none-any.whl", hash = "sha256:eefeff62ee975c64d293d2db9370e7e748a2ff83dcb5109416b75e087a2ac02e"},
|
||||
{file = "marshmallow_dataclass-8.5.3.tar.gz", hash = "sha256:c0c5e1ea8d0e557b6fa00343799a9a9e60757b948fb096076beb6aa76bd68d30"},
|
||||
{file = "marshmallow_dataclass-8.5.7-py3-none-any.whl", hash = "sha256:da530f92f806673b9f40d8dc671ca18848b6cebded0eaecef720e256b5143e69"},
|
||||
{file = "marshmallow_dataclass-8.5.7.tar.gz", hash = "sha256:0bdb779939b4656a40430a6a8390af698676eef89c2e583deb06e3585bf81bba"},
|
||||
]
|
||||
marshmallow-enum = [
|
||||
{file = "marshmallow-enum-1.5.1.tar.gz", hash = "sha256:38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58"},
|
||||
|
@ -2562,24 +2603,24 @@ nb-black = [
|
|||
{file = "nb_black-1.0.7.tar.gz", hash = "sha256:1ca52e3a46675f6a0a6d79ac73a1f8f951bef60f919eced56173e76ab1b6d62b"},
|
||||
]
|
||||
nbclient = [
|
||||
{file = "nbclient-0.5.13-py3-none-any.whl", hash = "sha256:47ac905af59379913c1f8f541098d2550153cf8dc58553cbe18c702b181518b0"},
|
||||
{file = "nbclient-0.5.13.tar.gz", hash = "sha256:40c52c9b5e3c31faecaee69f202b3f53e38d7c1c563de0fadde9d7eda0fdafe8"},
|
||||
{file = "nbclient-0.6.0-py3-none-any.whl", hash = "sha256:2eed35fc954716cdf0a01ea8cbdd9f9316761479008570059e2f5de29e139423"},
|
||||
{file = "nbclient-0.6.0.tar.gz", hash = "sha256:3f89a403c6badf24d2855a455b69a80985b3b27e04111243fdb6a88a28d27031"},
|
||||
]
|
||||
nbconvert = [
|
||||
{file = "nbconvert-6.4.5-py3-none-any.whl", hash = "sha256:e01d219f55cc79f9701c834d605e8aa3acf35725345d3942e3983937f368ce14"},
|
||||
{file = "nbconvert-6.4.5.tar.gz", hash = "sha256:21163a8e2073c07109ca8f398836e45efdba2aacea68d6f75a8a545fef070d4e"},
|
||||
{file = "nbconvert-6.5.0-py3-none-any.whl", hash = "sha256:c56dd0b8978a1811a5654f74c727ff16ca87dd5a43abd435a1c49b840fcd8360"},
|
||||
{file = "nbconvert-6.5.0.tar.gz", hash = "sha256:223e46e27abe8596b8aed54301fadbba433b7ffea8196a68fd7b1ff509eee99d"},
|
||||
]
|
||||
nbformat = [
|
||||
{file = "nbformat-5.2.0-py3-none-any.whl", hash = "sha256:3e30424e8291b2188347f5c3ba5273ed3766f12f8c5137c2e456a0815f36e785"},
|
||||
{file = "nbformat-5.2.0.tar.gz", hash = "sha256:93df0b9c67221d38fb970c48f6d361819a6c388299a0ef3171bbb912edfe1324"},
|
||||
{file = "nbformat-5.3.0-py3-none-any.whl", hash = "sha256:38856d97de49e8292e2d5d8f595e9d26f02abfd87e075d450af4511870b40538"},
|
||||
{file = "nbformat-5.3.0.tar.gz", hash = "sha256:fcc5ab8cb74e20b19570b5be809e2dba9b82836fd2761a89066ad43394ba29f5"},
|
||||
]
|
||||
nest-asyncio = [
|
||||
{file = "nest_asyncio-1.5.4-py3-none-any.whl", hash = "sha256:3fdd0d6061a2bb16f21fe8a9c6a7945be83521d81a0d15cff52e9edee50101d6"},
|
||||
{file = "nest_asyncio-1.5.4.tar.gz", hash = "sha256:f969f6013a16fadb4adcf09d11a68a4f617c6049d7af7ac2c676110169a63abd"},
|
||||
{file = "nest_asyncio-1.5.5-py3-none-any.whl", hash = "sha256:b98e3ec1b246135e4642eceffa5a6c23a3ab12c82ff816a92c612d68205813b2"},
|
||||
{file = "nest_asyncio-1.5.5.tar.gz", hash = "sha256:e442291cd942698be619823a17a86a5759eabe1f8613084790de189fe9e16d65"},
|
||||
]
|
||||
notebook = [
|
||||
{file = "notebook-6.4.10-py3-none-any.whl", hash = "sha256:49cead814bff0945fcb2ee07579259418672ac175d3dc3d8102a4b0a656ed4df"},
|
||||
{file = "notebook-6.4.10.tar.gz", hash = "sha256:2408a76bc6289283a8eecfca67e298ec83c67db51a4c2e1b713dd180bb39e90e"},
|
||||
{file = "notebook-6.4.11-py3-none-any.whl", hash = "sha256:b4a6baf2eba21ce67a0ca11a793d1781b06b8078f34d06c710742e55f3eee505"},
|
||||
{file = "notebook-6.4.11.tar.gz", hash = "sha256:709b1856a564fe53054796c80e17a67262071c86bfbdfa6b96aaa346113c555a"},
|
||||
]
|
||||
numpy = [
|
||||
{file = "numpy-1.22.3-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:92bfa69cfbdf7dfc3040978ad09a48091143cffb778ec3b03fa170c494118d75"},
|
||||
|
@ -2612,27 +2653,27 @@ packaging = [
|
|||
{file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
|
||||
]
|
||||
pandas = [
|
||||
{file = "pandas-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3dfb32ed50122fe8c5e7f2b8d97387edd742cc78f9ec36f007ee126cd3720907"},
|
||||
{file = "pandas-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0259cd11e7e6125aaea3af823b80444f3adad6149ff4c97fef760093598b3e34"},
|
||||
{file = "pandas-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:96e9ece5759f9b47ae43794b6359bbc54805d76e573b161ae770c1ea59393106"},
|
||||
{file = "pandas-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508c99debccd15790d526ce6b1624b97a5e1e4ca5b871319fb0ebfd46b8f4dad"},
|
||||
{file = "pandas-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6a7bbbb7950063bfc942f8794bc3e31697c020a14f1cd8905fc1d28ec674a01"},
|
||||
{file = "pandas-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:c614001129b2a5add5e3677c3a213a9e6fd376204cb8d17c04e84ff7dfc02a73"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4e1176f45981c8ccc8161bc036916c004ca51037a7ed73f2d2a9857e6dbe654f"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bbb15ad79050e8b8d39ec40dd96a30cd09b886a2ae8848d0df1abba4d5502a67"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6d6ad1da00c7cc7d8dd1559a6ba59ba3973be6b15722d49738b2be0977eb8a0c"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:358b0bc98a5ff067132d23bf7a2242ee95db9ea5b7bbc401cf79205f11502fd3"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6105af6533f8b63a43ea9f08a2ede04e8f43e49daef0209ab0d30352bcf08bee"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-win32.whl", hash = "sha256:04dd15d9db538470900c851498e532ef28d4e56bfe72c9523acb32042de43dfb"},
|
||||
{file = "pandas-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b384516dbb4e6aae30e3464c2e77c563da5980440fbdfbd0968e3942f8f9d70"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f02e85e6d832be37d7f16cf6ac8bb26b519ace3e5f3235564a91c7f658ab2a43"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0b1a13f647e4209ed7dbb5da3497891d0045da9785327530ab696417ef478f84"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:19f7c632436b1b4f84615c3b127bbd7bc603db95e3d4332ed259dc815c9aaa26"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ea47ba1d6f359680130bd29af497333be6110de8f4c35b9211eec5a5a9630fa"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e5a7a1e0ecaac652326af627a3eca84886da9e667d68286866d4e33f6547caf"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-win32.whl", hash = "sha256:1d85d5f6be66dfd6d1d8d13b9535e342a2214260f1852654b19fa4d7b8d1218b"},
|
||||
{file = "pandas-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:3129a35d9dad1d80c234dd78f8f03141b914395d23f97cf92a366dcd19f8f8bf"},
|
||||
{file = "pandas-1.4.1.tar.gz", hash = "sha256:8db93ec98ac7cb5f8ac1420c10f5e3c43533153f253fe7fb6d891cf5aa2b80d2"},
|
||||
{file = "pandas-1.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be67c782c4f1b1f24c2f16a157e12c2693fd510f8df18e3287c77f33d124ed07"},
|
||||
{file = "pandas-1.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5a206afa84ed20e07603f50d22b5f0db3fb556486d8c2462d8bc364831a4b417"},
|
||||
{file = "pandas-1.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0010771bd9223f7afe5f051eb47c4a49534345dfa144f2f5470b27189a4dd3b5"},
|
||||
{file = "pandas-1.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3228198333dd13c90b6434ddf61aa6d57deaca98cf7b654f4ad68a2db84f8cfe"},
|
||||
{file = "pandas-1.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b79af3a69e5175c6fa7b4e046b21a646c8b74e92c6581a9d825687d92071b51"},
|
||||
{file = "pandas-1.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:5586cc95692564b441f4747c47c8a9746792e87b40a4680a2feb7794defb1ce3"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:061609334a8182ab500a90fe66d46f6f387de62d3a9cb9aa7e62e3146c712167"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b8134651258bce418cb79c71adeff0a44090c98d955f6953168ba16cc285d9f7"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:df82739e00bb6daf4bba4479a40f38c718b598a84654cbd8bb498fd6b0aa8c16"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:385c52e85aaa8ea6a4c600a9b2821181a51f8be0aee3af6f2dcb41dafc4fc1d0"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295872bf1a09758aba199992c3ecde455f01caf32266d50abc1a073e828a7b9d"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-win32.whl", hash = "sha256:95c1e422ced0199cf4a34385ff124b69412c4bc912011ce895582bee620dfcaa"},
|
||||
{file = "pandas-1.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:5c54ea4ef3823108cd4ec7fb27ccba4c3a775e0f83e39c5e17f5094cb17748bc"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c072c7f06b9242c855ed8021ff970c0e8f8b10b35e2640c657d2a541c5950f59"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f549097993744ff8c41b5e8f2f0d3cbfaabe89b4ae32c8c08ead6cc535b80139"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff08a14ef21d94cdf18eef7c569d66f2e24e0bc89350bcd7d243dd804e3b5eb2"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c5bf555b6b0075294b73965adaafb39cf71c312e38c5935c93d78f41c19828a"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51649ef604a945f781105a6d2ecf88db7da0f4868ac5d45c51cb66081c4d9c73"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-win32.whl", hash = "sha256:d0d4f13e4be7ce89d7057a786023c461dd9370040bdb5efa0a7fe76b556867a0"},
|
||||
{file = "pandas-1.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:09d8be7dd9e1c4c98224c4dfe8abd60d145d934e9fc1f5f411266308ae683e6a"},
|
||||
{file = "pandas-1.4.2.tar.gz", hash = "sha256:92bc1fc585f1463ca827b45535957815b7deb218c549b7c18402c322c7549a12"},
|
||||
]
|
||||
pandas-vet = [
|
||||
{file = "pandas-vet-0.2.3.tar.gz", hash = "sha256:58b64027a4c192b4b62272c1d8fdecc1733352452401282b697c1a32abe4656a"},
|
||||
|
@ -2700,20 +2741,20 @@ pillow = [
|
|||
{file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"},
|
||||
]
|
||||
platformdirs = [
|
||||
{file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"},
|
||||
{file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
|
||||
{file = "platformdirs-2.5.2-py3-none-any.whl", hash = "sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788"},
|
||||
{file = "platformdirs-2.5.2.tar.gz", hash = "sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19"},
|
||||
]
|
||||
pluggy = [
|
||||
{file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
|
||||
{file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
|
||||
]
|
||||
prometheus-client = [
|
||||
{file = "prometheus_client-0.13.1-py3-none-any.whl", hash = "sha256:357a447fd2359b0a1d2e9b311a0c5778c330cfbe186d880ad5a6b39884652316"},
|
||||
{file = "prometheus_client-0.13.1.tar.gz", hash = "sha256:ada41b891b79fca5638bd5cfe149efa86512eaa55987893becd2c6d8d0a5dfc5"},
|
||||
{file = "prometheus_client-0.14.1-py3-none-any.whl", hash = "sha256:522fded625282822a89e2773452f42df14b5a8e84a86433e3f8a189c1d54dc01"},
|
||||
{file = "prometheus_client-0.14.1.tar.gz", hash = "sha256:5459c427624961076277fdc6dc50540e2bacb98eebde99886e59ec55ed92093a"},
|
||||
]
|
||||
prompt-toolkit = [
|
||||
{file = "prompt_toolkit-3.0.28-py3-none-any.whl", hash = "sha256:30129d870dcb0b3b6a53efdc9d0a83ea96162ffd28ffe077e94215b233dc670c"},
|
||||
{file = "prompt_toolkit-3.0.28.tar.gz", hash = "sha256:9f1cd16b1e86c2968f2519d7fb31dd9d669916f515612c269d14e9ed52b51650"},
|
||||
{file = "prompt_toolkit-3.0.29-py3-none-any.whl", hash = "sha256:62291dad495e665fca0bda814e342c69952086afb0f4094d0893d357e5c78752"},
|
||||
{file = "prompt_toolkit-3.0.29.tar.gz", hash = "sha256:bd640f60e8cecd74f0dc249713d433ace2ddc62b65ee07f96d358e0b152b6ea7"},
|
||||
]
|
||||
psutil = [
|
||||
{file = "psutil-5.9.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:55ce319452e3d139e25d6c3f85a1acf12d1607ddedea5e35fb47a552c051161b"},
|
||||
|
@ -2765,46 +2806,86 @@ pycparser = [
|
|||
{file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
|
||||
{file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
|
||||
]
|
||||
pydantic = [
|
||||
{file = "pydantic-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb23bcc093697cdea2708baae4f9ba0e972960a835af22560f6ae4e7e47d33f5"},
|
||||
{file = "pydantic-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1d5278bd9f0eee04a44c712982343103bba63507480bfd2fc2790fa70cd64cf4"},
|
||||
{file = "pydantic-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab624700dc145aa809e6f3ec93fb8e7d0f99d9023b713f6a953637429b437d37"},
|
||||
{file = "pydantic-1.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8d7da6f1c1049eefb718d43d99ad73100c958a5367d30b9321b092771e96c25"},
|
||||
{file = "pydantic-1.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3c3b035103bd4e2e4a28da9da7ef2fa47b00ee4a9cf4f1a735214c1bcd05e0f6"},
|
||||
{file = "pydantic-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3011b975c973819883842c5ab925a4e4298dffccf7782c55ec3580ed17dc464c"},
|
||||
{file = "pydantic-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:086254884d10d3ba16da0588604ffdc5aab3f7f09557b998373e885c690dd398"},
|
||||
{file = "pydantic-1.9.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0fe476769acaa7fcddd17cadd172b156b53546ec3614a4d880e5d29ea5fbce65"},
|
||||
{file = "pydantic-1.9.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8e9dcf1ac499679aceedac7e7ca6d8641f0193c591a2d090282aaf8e9445a46"},
|
||||
{file = "pydantic-1.9.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1e4c28f30e767fd07f2ddc6f74f41f034d1dd6bc526cd59e63a82fe8bb9ef4c"},
|
||||
{file = "pydantic-1.9.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c86229333cabaaa8c51cf971496f10318c4734cf7b641f08af0a6fbf17ca3054"},
|
||||
{file = "pydantic-1.9.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:c0727bda6e38144d464daec31dff936a82917f431d9c39c39c60a26567eae3ed"},
|
||||
{file = "pydantic-1.9.0-cp36-cp36m-win_amd64.whl", hash = "sha256:dee5ef83a76ac31ab0c78c10bd7d5437bfdb6358c95b91f1ba7ff7b76f9996a1"},
|
||||
{file = "pydantic-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9c9bdb3af48e242838f9f6e6127de9be7063aad17b32215ccc36a09c5cf1070"},
|
||||
{file = "pydantic-1.9.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ee7e3209db1e468341ef41fe263eb655f67f5c5a76c924044314e139a1103a2"},
|
||||
{file = "pydantic-1.9.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b6037175234850ffd094ca77bf60fb54b08b5b22bc85865331dd3bda7a02fa1"},
|
||||
{file = "pydantic-1.9.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b2571db88c636d862b35090ccf92bf24004393f85c8870a37f42d9f23d13e032"},
|
||||
{file = "pydantic-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8b5ac0f1c83d31b324e57a273da59197c83d1bb18171e512908fe5dc7278a1d6"},
|
||||
{file = "pydantic-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bbbc94d0c94dd80b3340fc4f04fd4d701f4b038ebad72c39693c794fd3bc2d9d"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e0896200b6a40197405af18828da49f067c2fa1f821491bc8f5bde241ef3f7d7"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bdfdadb5994b44bd5579cfa7c9b0e1b0e540c952d56f627eb227851cda9db77"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:574936363cd4b9eed8acdd6b80d0143162f2eb654d96cb3a8ee91d3e64bf4cf9"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c556695b699f648c58373b542534308922c46a1cda06ea47bc9ca45ef5b39ae6"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f947352c3434e8b937e3aa8f96f47bdfe6d92779e44bb3f41e4c213ba6a32145"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5e48ef4a8b8c066c4a31409d91d7ca372a774d0212da2787c0d32f8045b1e034"},
|
||||
{file = "pydantic-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:96f240bce182ca7fe045c76bcebfa0b0534a1bf402ed05914a6f1dadff91877f"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:815ddebb2792efd4bba5488bc8fde09c29e8ca3227d27cf1c6990fc830fd292b"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c5b77947b9e85a54848343928b597b4f74fc364b70926b3c4441ff52620640c"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c68c3bc88dbda2a6805e9a142ce84782d3930f8fdd9655430d8576315ad97ce"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a79330f8571faf71bf93667d3ee054609816f10a259a109a0738dac983b23c3"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f5a64b64ddf4c99fe201ac2724daada8595ada0d102ab96d019c1555c2d6441d"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a733965f1a2b4090a5238d40d983dcd78f3ecea221c7af1497b845a9709c1721"},
|
||||
{file = "pydantic-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cc6a4cb8a118ffec2ca5fcb47afbacb4f16d0ab8b7350ddea5e8ef7bcc53a16"},
|
||||
{file = "pydantic-1.9.0-py3-none-any.whl", hash = "sha256:085ca1de245782e9b46cefcf99deecc67d418737a1fd3f6a4f511344b613a5b3"},
|
||||
{file = "pydantic-1.9.0.tar.gz", hash = "sha256:742645059757a56ecd886faf4ed2441b9c0cd406079c2b4bee51bcc3fbcd510a"},
|
||||
]
|
||||
pyflakes = [
|
||||
{file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"},
|
||||
{file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"},
|
||||
]
|
||||
pygments = [
|
||||
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
|
||||
{file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
|
||||
{file = "Pygments-2.12.0-py3-none-any.whl", hash = "sha256:dc9c10fb40944260f6ed4c688ece0cd2048414940f1cea51b8b226318411c519"},
|
||||
{file = "Pygments-2.12.0.tar.gz", hash = "sha256:5eb116118f9612ff1ee89ac96437bb6b49e8f04d8a13b514ba26f620208e26eb"},
|
||||
]
|
||||
pylint = [
|
||||
{file = "pylint-2.13.3-py3-none-any.whl", hash = "sha256:c8837b6ec6440e3490ab8f066054b0645a516a29ca51ce442f16f7004f711a70"},
|
||||
{file = "pylint-2.13.3.tar.gz", hash = "sha256:12ed2520510c40db647e4ec7f747b07e0d669b33ab41479c2a07bb89b92877db"},
|
||||
{file = "pylint-2.13.7-py3-none-any.whl", hash = "sha256:13ddbbd8872c804574149e81197c28877eba75224ba6b76cd8652fc31df55c1c"},
|
||||
{file = "pylint-2.13.7.tar.gz", hash = "sha256:911d3a97c808f7554643bcc5416028cfdc42eae34ed129b150741888c688d5d5"},
|
||||
]
|
||||
pypandoc = [
|
||||
{file = "pypandoc-1.7.4.tar.gz", hash = "sha256:28de23f646d9e804403c6b61d7cd32a6d76ca356abc79eb7217bdbff6748f86e"},
|
||||
{file = "pypandoc-1.7.5.tar.gz", hash = "sha256:802c26aae17b64136c6d006949d8ce183a7d4d9fbd4f2d051e66f4fb9f45ca50"},
|
||||
]
|
||||
pyparsing = [
|
||||
{file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
|
||||
{file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"},
|
||||
{file = "pyparsing-3.0.8-py3-none-any.whl", hash = "sha256:ef7b523f6356f763771559412c0d7134753f037822dad1b16945b7b846f7ad06"},
|
||||
{file = "pyparsing-3.0.8.tar.gz", hash = "sha256:7bf433498c016c4314268d95df76c81b842a4cb2b276fa3312cfb1e1d85f6954"},
|
||||
]
|
||||
pyproj = [
|
||||
{file = "pyproj-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2c41c9b7b5e1a1b0acc2b7b2f5de65b226f7b96c870888e4f679ff96322b1ed0"},
|
||||
{file = "pyproj-3.3.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0e1fd560b509b722db6566fa9685252f25640e93464d09e13d5190ed7ab491ba"},
|
||||
{file = "pyproj-3.3.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277072176a17471c0b1d25d6cae75401d81e9b50ea625ba546f5b79acd757dfc"},
|
||||
{file = "pyproj-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eca8ecf2b6b3225d93c723e6a2f51143d9195ac407f69e979363cdde344b93bb"},
|
||||
{file = "pyproj-3.3.0-cp310-cp310-win32.whl", hash = "sha256:4d2fc49c73d9f34e932bf37926d56916ba1b6f2f693cd4d8cc1d0d9eacc0e537"},
|
||||
{file = "pyproj-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:ce1adec823738e2d7c6af019fc38f58b4204bacfc782e4430373578c672f3833"},
|
||||
{file = "pyproj-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e70a1ea6f198cace1a492397bdd0a46e640201120973293d6c48031e370d6a87"},
|
||||
{file = "pyproj-3.3.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:99f171da5f885efeec8d7fb2e2557175ffa8834eeb488842b1f52ac78a9a98e5"},
|
||||
{file = "pyproj-3.3.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3d28b84913cd849832a8f154c0e0c2ee4618057f7389ee68bfdb2145e7ed78cc"},
|
||||
{file = "pyproj-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab4baf781721640659db83a6b4da636fc403008f4978c668275754284c946778"},
|
||||
{file = "pyproj-3.3.0-cp38-cp38-win32.whl", hash = "sha256:4125e6704751d0e82d8d912d9851da097e8d38599d4c45f9944faaeb21771938"},
|
||||
{file = "pyproj-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b15e199c1da8fd132e11dfa68d8cf65d4812dedabc776b308df778ecd0d07658"},
|
||||
{file = "pyproj-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fcceb6736085bf19291b707bc67c8cebe05330bd02268e9b8eba6d28a1905fce"},
|
||||
{file = "pyproj-3.3.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dbf479bd481774ad217e9db5674868eee8f01dfe3868f61753328895ae7da61a"},
|
||||
{file = "pyproj-3.3.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:067a5c6099949edd66e9a10b139af4e2f65ebadb9f59583923a1d3feefac749a"},
|
||||
{file = "pyproj-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:235b52d8700ffb6be1f3638b1e25d83a9c13edcdb793236d8a98fd39227c5c27"},
|
||||
{file = "pyproj-3.3.0-cp39-cp39-win32.whl", hash = "sha256:44b5590c0b8dd002154916e170ef88f57abf91005b34bcb23faef97abb4d42c2"},
|
||||
{file = "pyproj-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b48dd9e5736957707fce1d9253fb0772bcf80480198c7790e21fed73fee61240"},
|
||||
{file = "pyproj-3.3.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5a105bfe37c78416d2641cd5d3368c99057d041f15f8d51ea3898953b21395c9"},
|
||||
{file = "pyproj-3.3.0.tar.gz", hash = "sha256:ce8bfbc212729e9a643f5f5d77f7a93394e032eda1e2d8799ae902d08add747e"},
|
||||
{file = "pyproj-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:473961faef7a9fd723c5d432f65220ea6ab3854e606bf84b4d409a75a4261c78"},
|
||||
{file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fef9c1e339f25c57f6ae0558b5ab1bbdf7994529a30d8d7504fc6302ea51c03"},
|
||||
{file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:140fa649fedd04f680a39f8ad339799a55cb1c49f6a84e1b32b97e49646647aa"},
|
||||
{file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b59c08aea13ee428cf8a919212d55c036cc94784805ed77c8f31a4d1f541058c"},
|
||||
{file = "pyproj-3.3.1-cp310-cp310-win32.whl", hash = "sha256:1adc9ccd1bf04998493b6a2e87e60656c75ab790653b36cfe351e9ef214828ed"},
|
||||
{file = "pyproj-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:42eea10afc750fccd1c5c4ba56de29ab791ab4d83c1f7db72705566282ac5396"},
|
||||
{file = "pyproj-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:531ea36519fa7b581466d4b6ab32f66ae4dadd9499d726352f71ee5e19c3d1c5"},
|
||||
{file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67025e37598a6bbed2c9c6c9e4c911f6dd39315d3e1148ead935a5c4d64309d5"},
|
||||
{file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aed1a3c0cd4182425f91b48d5db39f459bc2fe0d88017ead6425a1bc85faee33"},
|
||||
{file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cc4771403db54494e1e55bca8e6d33cde322f8cf0ed39f1557ff109c66d2cd1"},
|
||||
{file = "pyproj-3.3.1-cp38-cp38-win32.whl", hash = "sha256:c99f7b5757a28040a2dd4a28c9805fdf13eef79a796f4a566ab5cb362d10630d"},
|
||||
{file = "pyproj-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:5dac03d4338a4c8bd0f69144c527474f517b4cbd7d2d8c532cd8937799723248"},
|
||||
{file = "pyproj-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:56b0f9ee2c5b2520b18db30a393a7b86130cf527ddbb8c96e7f3c837474a9d79"},
|
||||
{file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f92d8f6514516124abb714dce912b20867831162cfff9fae2678ef07b6fcf0f"},
|
||||
{file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ef1bfbe2dcc558c7a98e2f1836abdcd630390f3160724a6f4f5c818b2be0ad5"},
|
||||
{file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ca5f32b56210429b367ca4f9a57ffe67975c487af82e179a24370879a3daf68"},
|
||||
{file = "pyproj-3.3.1-cp39-cp39-win32.whl", hash = "sha256:aba199704c824fb84ab64927e7bc9ef71e603e483130ec0f7e09e97259b8f61f"},
|
||||
{file = "pyproj-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:120d45ed73144c65e9677dc73ba8a531c495d179dd9f9f0471ac5acc02d7ac4b"},
|
||||
{file = "pyproj-3.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:52efb681647dfac185cc655a709bc0caaf910031a0390f816f5fc8ce150cbedc"},
|
||||
{file = "pyproj-3.3.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ab0d6e38fda7c13726afacaf62e9f9dd858089d67910471758afd9cb24e0ecd"},
|
||||
{file = "pyproj-3.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45487942c19c5a8b09c91964ea3201f4e094518e34743cae373889a36e3d9260"},
|
||||
{file = "pyproj-3.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:797ad5655d484feac14b0fbb4a4efeaac0cf780a223046e2465494c767fd1c3b"},
|
||||
{file = "pyproj-3.3.1.tar.gz", hash = "sha256:b3d8e14d91cc95fb3dbc03a9d0588ac58326803eefa5bbb0978d109de3304fbe"},
|
||||
]
|
||||
pyrsistent = [
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
|
||||
|
@ -3040,8 +3121,8 @@ six = [
|
|||
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
|
||||
]
|
||||
soupsieve = [
|
||||
{file = "soupsieve-2.3.1-py3-none-any.whl", hash = "sha256:1a3cca2617c6b38c0343ed661b1fa5de5637f257d4fe22bd9f1338010a1efefb"},
|
||||
{file = "soupsieve-2.3.1.tar.gz", hash = "sha256:b8d49b1cd4f037c7082a9683dfa1801aa2597fb11c3a1155b7a5b94829b4f1f9"},
|
||||
{file = "soupsieve-2.3.2.post1-py3-none-any.whl", hash = "sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759"},
|
||||
{file = "soupsieve-2.3.2.post1.tar.gz", hash = "sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d"},
|
||||
]
|
||||
tenacity = [
|
||||
{file = "tenacity-8.0.1-py3-none-any.whl", hash = "sha256:f78f4ea81b0fabc06728c11dc2a8c01277bfc5181b321a4770471902e3eb844a"},
|
||||
|
@ -3051,14 +3132,14 @@ terminado = [
|
|||
{file = "terminado-0.13.3-py3-none-any.whl", hash = "sha256:874d4ea3183536c1782d13c7c91342ef0cf4e5ee1d53633029cbc972c8760bd8"},
|
||||
{file = "terminado-0.13.3.tar.gz", hash = "sha256:94d1cfab63525993f7d5c9b469a50a18d0cdf39435b59785715539dd41e36c0d"},
|
||||
]
|
||||
testpath = [
|
||||
{file = "testpath-0.6.0-py3-none-any.whl", hash = "sha256:8ada9f80a2ac6fb0391aa7cdb1a7d11cfa8429f693eda83f74dde570fe6fa639"},
|
||||
{file = "testpath-0.6.0.tar.gz", hash = "sha256:2f1b97e6442c02681ebe01bd84f531028a7caea1af3825000f52345c30285e0f"},
|
||||
]
|
||||
textwrap3 = [
|
||||
{file = "textwrap3-0.9.2-py2.py3-none-any.whl", hash = "sha256:bf5f4c40faf2a9ff00a9e0791fed5da7415481054cef45bb4a3cfb1f69044ae0"},
|
||||
{file = "textwrap3-0.9.2.zip", hash = "sha256:5008eeebdb236f6303dcd68f18b856d355f6197511d952ba74bc75e40e0c3414"},
|
||||
]
|
||||
tinycss2 = [
|
||||
{file = "tinycss2-1.1.1-py3-none-any.whl", hash = "sha256:fe794ceaadfe3cf3e686b22155d0da5780dd0e273471a51846d0a02bc204fec8"},
|
||||
{file = "tinycss2-1.1.1.tar.gz", hash = "sha256:b2e44dd8883c360c35dd0d1b5aad0b610e5156c2cb3b33434634e539ead9d8bf"},
|
||||
]
|
||||
toml = [
|
||||
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
|
||||
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
|
||||
|
@ -3111,8 +3192,8 @@ tornado = [
|
|||
{file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
|
||||
]
|
||||
tox = [
|
||||
{file = "tox-3.24.5-py2.py3-none-any.whl", hash = "sha256:be3362472a33094bce26727f5f771ca0facf6dafa217f65875314e9a6600c95c"},
|
||||
{file = "tox-3.24.5.tar.gz", hash = "sha256:67e0e32c90e278251fea45b696d0fef3879089ccbe979b0c556d35d5a70e2993"},
|
||||
{file = "tox-3.25.0-py2.py3-none-any.whl", hash = "sha256:0805727eb4d6b049de304977dfc9ce315a1938e6619c3ab9f38682bb04662a5a"},
|
||||
{file = "tox-3.25.0.tar.gz", hash = "sha256:37888f3092aa4e9f835fc8cc6dadbaaa0782651c41ef359e3a5743fcb0308160"},
|
||||
]
|
||||
tox-poetry = [
|
||||
{file = "tox-poetry-0.4.1.tar.gz", hash = "sha256:2395808e1ce487b5894c10f2202e14702bfa6d6909c0d1e525170d14809ac7ef"},
|
||||
|
@ -3127,16 +3208,16 @@ traitlets = [
|
|||
{file = "traitlets-5.1.1.tar.gz", hash = "sha256:059f456c5a7c1c82b98c2e8c799f39c9b8128f6d0d46941ee118daace9eb70c7"},
|
||||
]
|
||||
types-requests = [
|
||||
{file = "types-requests-2.27.15.tar.gz", hash = "sha256:2d371183c535208d2cc8fe7473d9b49c344c7077eb70302eb708638fb86086a8"},
|
||||
{file = "types_requests-2.27.15-py3-none-any.whl", hash = "sha256:77d09182a68e447e9e8b0ffc21abf54618b96f07689dffbb6a41cf0356542969"},
|
||||
{file = "types-requests-2.27.22.tar.gz", hash = "sha256:2e81a74d2db1e6d06baa4a9e1896720543739297a23daac0436a34e2fc732574"},
|
||||
{file = "types_requests-2.27.22-py3-none-any.whl", hash = "sha256:58730c31469fb959a21496d97d2e59c06ca6de2ccdfecb583cb924b83cb0811e"},
|
||||
]
|
||||
types-urllib3 = [
|
||||
{file = "types-urllib3-1.26.11.tar.gz", hash = "sha256:24d64e441168851eb05f1d022de18ae31558f5649c8f1117e384c2e85e31315b"},
|
||||
{file = "types_urllib3-1.26.11-py3-none-any.whl", hash = "sha256:bd0abc01e9fb963e4fddd561a56d21cc371b988d1245662195c90379077139cd"},
|
||||
{file = "types-urllib3-1.26.13.tar.gz", hash = "sha256:40f8fb5e8cd7d57e8aefdee3fdd5e930aa1a1bb4179cdadd55226cea588af790"},
|
||||
{file = "types_urllib3-1.26.13-py3-none-any.whl", hash = "sha256:ff7500641824f881b2c7bde4cc57e6c3abf03d1e005bae83aca752e77213a5da"},
|
||||
]
|
||||
typing-extensions = [
|
||||
{file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"},
|
||||
{file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"},
|
||||
{file = "typing_extensions-4.2.0-py3-none-any.whl", hash = "sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708"},
|
||||
{file = "typing_extensions-4.2.0.tar.gz", hash = "sha256:f1c24655a0da0d1b67f07e17a5e6b2a105894e6824b92096378bb3668ef02376"},
|
||||
]
|
||||
typing-inspect = [
|
||||
{file = "typing_inspect-0.7.1-py2-none-any.whl", hash = "sha256:b1f56c0783ef0f25fb064a01be6e5407e54cf4a4bf4f3ba3fe51e0bd6dcea9e5"},
|
||||
|
@ -3151,8 +3232,8 @@ us = [
|
|||
{file = "us-2.0.2.tar.gz", hash = "sha256:cb11ad0d43deff3a1c3690c74f0c731cff5b862c73339df2edd91133e1496fbc"},
|
||||
]
|
||||
virtualenv = [
|
||||
{file = "virtualenv-20.14.0-py2.py3-none-any.whl", hash = "sha256:1e8588f35e8b42c6ec6841a13c5e88239de1e6e4e4cedfd3916b306dc826ec66"},
|
||||
{file = "virtualenv-20.14.0.tar.gz", hash = "sha256:8e5b402037287126e81ccde9432b95a8be5b19d36584f64957060a3488c11ca8"},
|
||||
{file = "virtualenv-20.14.1-py2.py3-none-any.whl", hash = "sha256:e617f16e25b42eb4f6e74096b9c9e37713cf10bf30168fb4a739f3fa8f898a3a"},
|
||||
{file = "virtualenv-20.14.1.tar.gz", hash = "sha256:ef589a79795589aada0c1c5b319486797c03b67ac3984c48c669c0e4f50df3a5"},
|
||||
]
|
||||
wcwidth = [
|
||||
{file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"},
|
||||
|
@ -3237,6 +3318,6 @@ xlsxwriter = [
|
|||
{file = "XlsxWriter-2.0.0.tar.gz", hash = "sha256:80ce4aadc638dea452f6e28f70b6223b9b5b5740ff9c57ef6387af115e129bbb"},
|
||||
]
|
||||
zipp = [
|
||||
{file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"},
|
||||
{file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"},
|
||||
{file = "zipp-3.8.0-py3-none-any.whl", hash = "sha256:c4f6e5bbf48e74f7a38e7cc5b0480ff42b0ae5178957d564d18932525d5cf099"},
|
||||
{file = "zipp-3.8.0.tar.gz", hash = "sha256:56bf8aadb83c24db6c4b577e13de374ccfb67da2078beba1d037c17980bf43ad"},
|
||||
]
|
||||
|
|
|
@ -39,6 +39,7 @@ tqdm = "4.62.0"
|
|||
types-requests = "^2.25.0"
|
||||
us = "^2.0.2"
|
||||
xlsxwriter = "^2.0.0"
|
||||
pydantic = "^1.9.0"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
black = {version = "^21.6b0", allow-prereleases = true}
|
||||
|
|
Loading…
Add table
Reference in a new issue