mirror of
https://github.com/DOI-DO/j40-cejst-2.git
synced 2025-02-24 18:44:20 -08:00
* update Python version on README; tuple typing fix * Alaska tribal points fix (#1821) * Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777) Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3. - [Release notes](https://github.com/lepture/mistune/releases) - [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst) - [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3) --- updated-dependencies: - dependency-name: mistune dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * poetry update * initial pass of score tests * add threshold tests * added ses threshold (not donut, not island) * testing suite -- stopping for the day * added test for lead proxy indicator * Refactor score tests to make them less verbose and more direct (#1865) * Cleanup tests slightly before refactor (#1846) * Refactor score calculations tests * Feedback from review * Refactor output tests like calculatoin tests (#1846) (#1870) * Reorganize files (#1846) * Switch from lru_cache to fixture scorpes (#1846) * Add tests for all factors (#1846) * Mark smoketests and run as part of be deply (#1846) * Update renamed var (#1846) * Switch from named tuple to dataclass (#1846) This is annoying, but pylint in python3.8 was crashing parsing the named tuple. We weren't using any namedtuple-specific features, so I made the type a dataclass just to get pylint to behave. * Add default timout to requests (#1846) * Fix type (#1846) * Fix merge mistake on poetry.lock (#1846) Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov> Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com> Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
71 lines
2.6 KiB
Python
71 lines
2.6 KiB
Python
import pandas as pd
|
|
import requests
|
|
|
|
from data_pipeline.etl.base import ExtractTransformLoad
|
|
from data_pipeline.utils import get_module_logger
|
|
from data_pipeline.config import settings
|
|
|
|
logger = get_module_logger(__name__)
|
|
|
|
|
|
class HudRecapETL(ExtractTransformLoad):
|
|
def __init__(self):
|
|
# pylint: disable=line-too-long
|
|
self.HUD_RECAP_CSV_URL = "https://opendata.arcgis.com/api/v3/datasets/56de4edea8264fe5a344da9811ef5d6e_0/downloads/data?format=csv&spatialRefId=4326" # noqa: E501
|
|
self.HUD_RECAP_CSV = (
|
|
self.get_tmp_path()
|
|
/ "Racially_or_Ethnically_Concentrated_Areas_of_Poverty__R_ECAPs_.csv"
|
|
)
|
|
self.CSV_PATH = self.DATA_PATH / "dataset" / "hud_recap"
|
|
|
|
# Definining some variable names
|
|
self.HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME = (
|
|
"hud_recap_priority_community"
|
|
)
|
|
|
|
self.df: pd.DataFrame
|
|
|
|
def extract(self) -> None:
|
|
logger.info("Downloading HUD Recap Data")
|
|
download = requests.get(
|
|
self.HUD_RECAP_CSV_URL,
|
|
verify=None,
|
|
timeout=settings.REQUESTS_DEFAULT_TIMOUT,
|
|
)
|
|
file_contents = download.content
|
|
csv_file = open(self.HUD_RECAP_CSV, "wb")
|
|
csv_file.write(file_contents)
|
|
csv_file.close()
|
|
|
|
def transform(self) -> None:
|
|
logger.info("Transforming HUD Recap Data")
|
|
|
|
# Load comparison index (CalEnviroScreen 4)
|
|
self.df = pd.read_csv(self.HUD_RECAP_CSV, dtype={"GEOID": "string"})
|
|
|
|
self.df.rename(
|
|
columns={
|
|
"GEOID": self.GEOID_TRACT_FIELD_NAME,
|
|
# Interestingly, there's no data dictionary for the RECAP data that I could find.
|
|
# However, this site (http://www.schousing.com/library/Tax%20Credit/2020/QAP%20Instructions%20(2).pdf)
|
|
# suggests:
|
|
# "If RCAP_Current for the tract in which the site is located is 1, the tract is an R/ECAP. If RCAP_Current is 0, it is not."
|
|
"RCAP_Current": self.HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME,
|
|
},
|
|
inplace=True,
|
|
)
|
|
|
|
# Convert to boolean
|
|
self.df[self.HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME] = self.df[
|
|
self.HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME
|
|
].astype("bool")
|
|
|
|
self.df[self.HUD_RECAP_PRIORITY_COMMUNITY_FIELD_NAME].value_counts()
|
|
|
|
self.df.sort_values(by=self.GEOID_TRACT_FIELD_NAME, inplace=True)
|
|
|
|
def load(self) -> None:
|
|
logger.info("Saving HUD Recap CSV")
|
|
# write nationwide csv
|
|
self.CSV_PATH.mkdir(parents=True, exist_ok=True)
|
|
self.df.to_csv(self.CSV_PATH / "usa.csv", index=False)
|