2022-02-08 19:05:32 -05:00
|
|
|
import enum
|
|
|
|
import pathlib
|
2022-08-09 16:37:10 -04:00
|
|
|
import sys
|
2022-02-08 19:05:32 -05:00
|
|
|
import typing
|
2023-03-03 12:26:24 -06:00
|
|
|
import shutil
|
2021-08-24 15:40:54 -05:00
|
|
|
from typing import Optional
|
2023-03-03 12:26:24 -06:00
|
|
|
from abc import ABC, abstractmethod
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2021-10-13 15:54:15 -04:00
|
|
|
import pandas as pd
|
2021-08-05 15:35:54 -04:00
|
|
|
from data_pipeline.config import settings
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
from data_pipeline.etl.score.etl_utils import (
|
|
|
|
compare_to_list_of_expected_state_fips_codes,
|
2021-10-13 15:54:15 -04:00
|
|
|
)
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
from data_pipeline.etl.score.schemas.datasets import DatasetsConfig
|
|
|
|
from data_pipeline.utils import get_module_logger
|
|
|
|
from data_pipeline.utils import load_yaml_dict_from_file
|
|
|
|
from data_pipeline.utils import remove_all_from_dir
|
2023-03-03 12:26:24 -06:00
|
|
|
from data_pipeline.etl.datasource import DataSource
|
2021-10-13 15:54:15 -04:00
|
|
|
|
|
|
|
logger = get_module_logger(__name__)
|
2021-07-12 15:50:44 -04:00
|
|
|
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
class ValidGeoLevel(enum.Enum):
|
|
|
|
"""Enum used for indicating output data's geographic resolution."""
|
|
|
|
|
|
|
|
CENSUS_TRACT = enum.auto()
|
|
|
|
CENSUS_BLOCK_GROUP = enum.auto()
|
|
|
|
|
|
|
|
|
2023-03-03 12:26:24 -06:00
|
|
|
class ExtractTransformLoad(ABC):
|
2021-07-12 15:50:44 -04:00
|
|
|
"""
|
|
|
|
A class used to instantiate an ETL object to retrieve and process data from
|
|
|
|
datasets.
|
|
|
|
|
|
|
|
Attributes:
|
|
|
|
DATA_PATH (pathlib.Path): Local path where all data will be stored
|
|
|
|
TMP_PATH (pathlib.Path): Local path where temporary data will be stored
|
2022-08-09 16:37:10 -04:00
|
|
|
|
|
|
|
TODO: Fill missing attrs here
|
|
|
|
|
2021-07-12 15:50:44 -04:00
|
|
|
GEOID_FIELD_NAME (str): The common column name for a Census Block Group identifier
|
|
|
|
GEOID_TRACT_FIELD_NAME (str): The common column name for a Census Tract identifier
|
|
|
|
"""
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
APP_ROOT: pathlib.Path = settings.APP_ROOT
|
|
|
|
|
|
|
|
# Directories
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
DATA_PATH: pathlib.Path = settings.DATA_PATH
|
2022-02-08 19:05:32 -05:00
|
|
|
TMP_PATH: pathlib.Path = DATA_PATH / "tmp"
|
2023-03-03 12:26:24 -06:00
|
|
|
SOURCES_PATH: pathlib.Path = DATA_PATH / "sources"
|
2022-03-04 15:02:09 -05:00
|
|
|
CONTENT_CONFIG: pathlib.Path = APP_ROOT / "content" / "config"
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
DATASET_CONFIG_PATH: pathlib.Path = APP_ROOT / "etl" / "score" / "config"
|
|
|
|
DATASET_CONFIG: Optional[dict] = None
|
2022-02-08 19:05:32 -05:00
|
|
|
|
|
|
|
# Parameters
|
2021-07-12 15:50:44 -04:00
|
|
|
GEOID_FIELD_NAME: str = "GEOID10"
|
|
|
|
GEOID_TRACT_FIELD_NAME: str = "GEOID10_TRACT"
|
2021-12-03 15:46:10 -05:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
# Parameters that will be changed by children of the class
|
|
|
|
# NAME is used to create output path and populate logger info.
|
|
|
|
NAME: str = None
|
|
|
|
|
|
|
|
# LAST_UPDATED_YEAR is used to create output path.
|
|
|
|
LAST_UPDATED_YEAR: int = None
|
|
|
|
|
|
|
|
# SOURCE_URL is used to extract source data in extract().
|
|
|
|
SOURCE_URL: str = None
|
|
|
|
|
2022-08-09 16:37:10 -04:00
|
|
|
# INPUT_EXTRACTED_FILE_NAME is the name of the file after extract().
|
|
|
|
INPUT_EXTRACTED_FILE_NAME: str = None
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
# GEO_LEVEL is used to identify whether output data is at the unit of the tract or
|
|
|
|
# census block group.
|
|
|
|
# TODO: add tests that enforce seeing the expected geographic identifier field
|
|
|
|
# in the output file based on this geography level.
|
|
|
|
GEO_LEVEL: ValidGeoLevel = None
|
|
|
|
|
2022-03-21 18:55:15 -04:00
|
|
|
# COLUMNS_TO_KEEP is used to identify which columns to keep in the output df.
|
2022-02-08 19:05:32 -05:00
|
|
|
COLUMNS_TO_KEEP: typing.List[str] = None
|
|
|
|
|
2022-08-09 16:37:10 -04:00
|
|
|
# INPUT_GEOID_TRACT_FIELD_NAME is the field name that identifies the Census Tract ID
|
|
|
|
# on the input file
|
|
|
|
INPUT_GEOID_TRACT_FIELD_NAME: str = None
|
|
|
|
|
|
|
|
# NULL_REPRESENTATION is how nulls are represented on the input field
|
|
|
|
NULL_REPRESENTATION: str = None
|
|
|
|
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
# Whether this ETL contains data for the continental nation (DC & the US states
|
|
|
|
# except for Alaska and Hawaii)
|
|
|
|
CONTINENTAL_US_EXPECTED_IN_DATA: bool = True
|
|
|
|
|
|
|
|
# Whether this ETL contains data for Alaska and Hawaii
|
|
|
|
ALASKA_AND_HAWAII_EXPECTED_IN_DATA: bool = True
|
|
|
|
|
|
|
|
# Whether this ETL contains data for Puerto Rico
|
|
|
|
PUERTO_RICO_EXPECTED_IN_DATA: bool = True
|
|
|
|
|
|
|
|
# Whether this ETL contains data for the island areas
|
|
|
|
ISLAND_AREAS_EXPECTED_IN_DATA: bool = False
|
|
|
|
|
|
|
|
# Whether this ETL contains known missing data for any additional
|
|
|
|
# states/territories
|
|
|
|
EXPECTED_MISSING_STATES: typing.List[str] = []
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
# Thirteen digits in a census block group ID.
|
|
|
|
EXPECTED_CENSUS_BLOCK_GROUPS_CHARACTER_LENGTH: int = 13
|
2021-12-03 15:46:10 -05:00
|
|
|
# TODO: investigate. Census says there are only 217,740 CBGs in the US. This might
|
|
|
|
# be from CBGs at different time periods.
|
2021-11-16 10:05:09 -05:00
|
|
|
EXPECTED_MAX_CENSUS_BLOCK_GROUPS: int = 250000
|
2021-12-03 15:46:10 -05:00
|
|
|
|
2022-03-21 18:55:15 -04:00
|
|
|
# There should be Eleven digits in a census tract ID.
|
2022-02-08 19:05:32 -05:00
|
|
|
EXPECTED_CENSUS_TRACTS_CHARACTER_LENGTH: int = 11
|
2022-03-17 23:19:23 -04:00
|
|
|
# TODO: investigate. Census says there are only 74,134 tracts in the United States,
|
2021-12-03 15:46:10 -05:00
|
|
|
# Puerto Rico, and island areas. This might be from tracts at different time
|
|
|
|
# periods. https://github.com/usds/justice40-tool/issues/964
|
|
|
|
EXPECTED_MAX_CENSUS_TRACTS: int = 74160
|
2021-07-12 15:50:44 -04:00
|
|
|
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
# Should this dataset load its configuration from
|
|
|
|
# the YAML files?
|
|
|
|
LOAD_YAML_CONFIG: bool = False
|
|
|
|
|
2022-08-09 16:37:10 -04:00
|
|
|
# We use output_df as the final dataframe to use to write to the CSV
|
|
|
|
# It is used on the "load" base class method
|
2022-02-08 19:05:32 -05:00
|
|
|
output_df: pd.DataFrame = None
|
|
|
|
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
def __init_subclass__(cls) -> None:
|
|
|
|
if cls.LOAD_YAML_CONFIG:
|
|
|
|
cls.DATASET_CONFIG = cls.yaml_config_load()
|
|
|
|
|
2022-08-09 16:37:10 -04:00
|
|
|
@classmethod
|
|
|
|
def yaml_config_load(cls) -> dict:
|
|
|
|
"""Generate config dictionary and set instance variables from YAML dataset."""
|
|
|
|
# check if the class instance has score YAML definitions
|
|
|
|
datasets_config = load_yaml_dict_from_file(
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
cls.DATASET_CONFIG_PATH / "datasets.yml",
|
2022-08-09 16:37:10 -04:00
|
|
|
DatasetsConfig,
|
|
|
|
)
|
|
|
|
|
|
|
|
# get the config for this dataset
|
|
|
|
try:
|
|
|
|
dataset_config = next(
|
|
|
|
item
|
|
|
|
for item in datasets_config.get("datasets")
|
|
|
|
if item["module_name"] == cls.NAME
|
|
|
|
)
|
|
|
|
except StopIteration:
|
|
|
|
# Note: it'd be nice to log the name of the dataframe, but that's not accessible in this scope.
|
|
|
|
logger.error(
|
|
|
|
f"Exception encountered while extracting dataset config for dataset {cls.NAME}"
|
|
|
|
)
|
|
|
|
sys.exit()
|
|
|
|
|
|
|
|
# set some of the basic fields
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
if "input_geoid_tract_field_name" in dataset_config:
|
|
|
|
cls.INPUT_GEOID_TRACT_FIELD_NAME = dataset_config[
|
|
|
|
"input_geoid_tract_field_name"
|
|
|
|
]
|
2022-08-09 16:37:10 -04:00
|
|
|
|
|
|
|
# get the columns to write on the CSV
|
|
|
|
# and set the constants
|
|
|
|
cls.COLUMNS_TO_KEEP = [
|
|
|
|
cls.GEOID_TRACT_FIELD_NAME, # always index with geoid tract id
|
|
|
|
]
|
|
|
|
for field in dataset_config["load_fields"]:
|
|
|
|
cls.COLUMNS_TO_KEEP.append(field["long_name"])
|
|
|
|
setattr(cls, field["df_field_name"], field["long_name"])
|
|
|
|
return dataset_config
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
# This is a classmethod so it can be used by `get_data_frame` without
|
|
|
|
# needing to create an instance of the class. This is a use case in `etl_score`.
|
|
|
|
@classmethod
|
|
|
|
def _get_output_file_path(cls) -> pathlib.Path:
|
|
|
|
"""Generate the output file path."""
|
|
|
|
if cls.NAME is None:
|
|
|
|
raise NotImplementedError(
|
|
|
|
f"Child ETL class needs to specify `cls.NAME` (currently "
|
2022-08-09 16:37:10 -04:00
|
|
|
f"{cls.NAME})."
|
2022-02-08 19:05:32 -05:00
|
|
|
)
|
|
|
|
|
2022-08-09 16:37:10 -04:00
|
|
|
output_file_path = cls.DATA_PATH / "dataset" / f"{cls.NAME}" / "usa.csv"
|
2022-02-08 19:05:32 -05:00
|
|
|
return output_file_path
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2023-03-03 12:26:24 -06:00
|
|
|
def get_sources_path(self) -> pathlib.Path:
|
|
|
|
"""Returns the sources path associated with this ETL class. The sources path
|
|
|
|
is the home for cached data sources used by this ETL."""
|
|
|
|
|
|
|
|
sources_path = self.SOURCES_PATH / str(self.__class__.__name__)
|
2022-02-11 14:04:53 -05:00
|
|
|
|
2022-02-16 16:45:59 -05:00
|
|
|
# Create directory if it doesn't exist
|
2023-03-03 12:26:24 -06:00
|
|
|
sources_path.mkdir(parents=True, exist_ok=True)
|
2022-02-11 14:04:53 -05:00
|
|
|
|
2023-03-03 12:26:24 -06:00
|
|
|
return sources_path
|
2022-02-11 14:04:53 -05:00
|
|
|
|
2023-03-03 12:26:24 -06:00
|
|
|
@abstractmethod
|
|
|
|
def get_data_sources(self) -> [DataSource]:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _fetch(self) -> None:
|
|
|
|
"""Fetch all data sources for this ETL. When data sources are fetched, they
|
|
|
|
are stored in a cache directory for consistency between runs."""
|
|
|
|
for ds in self.get_data_sources():
|
|
|
|
ds.fetch()
|
|
|
|
|
|
|
|
def clear_data_source_cache(self) -> None:
|
|
|
|
"""Clears the cache for this ETLs data source(s)"""
|
|
|
|
shutil.rmtree(self.get_sources_path())
|
|
|
|
|
|
|
|
def extract(self, use_cached_data_sources: bool = False) -> None:
|
|
|
|
"""Extract (download) data from a remote source, and validate
|
|
|
|
that data. By default, this method fetches data from the set of
|
|
|
|
data sources returned by get_data_sources.
|
|
|
|
|
|
|
|
If use_cached_data_sources is true, this method attempts to use cached data
|
|
|
|
rather than re-downloading from the original source. The cache algorithm is very
|
|
|
|
simple: it just looks to see if the directory has any contents. If so, it uses
|
|
|
|
that content. If not, it downloads all data sources.
|
|
|
|
|
|
|
|
Subclasses should call super() before performing any work if they wish to take
|
|
|
|
advantage of the automatic downloading and caching ability of this superclass.
|
|
|
|
"""
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2023-03-03 12:26:24 -06:00
|
|
|
if use_cached_data_sources and any(self.get_sources_path().iterdir()):
|
|
|
|
logger.info(
|
|
|
|
f"Using cached data sources for {self.__class__.__name__}"
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.clear_data_source_cache()
|
|
|
|
self._fetch()
|
|
|
|
|
|
|
|
# the rest of the work should be performed here
|
|
|
|
|
|
|
|
@abstractmethod
|
2021-07-12 15:50:44 -04:00
|
|
|
def transform(self) -> None:
|
|
|
|
"""Transform the data extracted into a format that can be consumed by the
|
|
|
|
score generator"""
|
2023-03-03 12:26:24 -06:00
|
|
|
pass
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
def validate(self) -> None:
|
|
|
|
"""Validates the output.
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
Runs after the `transform` step and before `load`.
|
|
|
|
"""
|
|
|
|
# TODO: remove this once all ETL classes are converted to using the new
|
|
|
|
# base class parameters and patterns.
|
|
|
|
if self.GEO_LEVEL is None:
|
2023-02-08 13:08:55 -06:00
|
|
|
logger.warning(
|
|
|
|
f"Skipping validation step for {self.__class__.__name__} because it does not "
|
2022-02-08 19:05:32 -05:00
|
|
|
"seem to be converted to new ETL class patterns."
|
|
|
|
)
|
|
|
|
return
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
if self.COLUMNS_TO_KEEP is None:
|
|
|
|
raise NotImplementedError(
|
|
|
|
"`self.COLUMNS_TO_KEEP` must be specified."
|
|
|
|
)
|
2021-07-12 15:50:44 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
if self.output_df is None:
|
|
|
|
raise NotImplementedError(
|
|
|
|
"The `transform` step must set `self.output_df`."
|
|
|
|
)
|
|
|
|
|
|
|
|
for column_to_keep in self.COLUMNS_TO_KEEP:
|
|
|
|
if column_to_keep not in self.output_df.columns:
|
|
|
|
raise ValueError(
|
|
|
|
f"Missing column: `{column_to_keep}` is missing from "
|
|
|
|
f"output"
|
|
|
|
)
|
|
|
|
|
|
|
|
for (
|
|
|
|
geo_level,
|
|
|
|
geo_field,
|
|
|
|
expected_geo_field_characters,
|
|
|
|
expected_rows,
|
|
|
|
) in [
|
|
|
|
(
|
|
|
|
ValidGeoLevel.CENSUS_TRACT,
|
|
|
|
self.GEOID_TRACT_FIELD_NAME,
|
|
|
|
self.EXPECTED_CENSUS_TRACTS_CHARACTER_LENGTH,
|
|
|
|
self.EXPECTED_MAX_CENSUS_TRACTS,
|
|
|
|
),
|
|
|
|
(
|
|
|
|
ValidGeoLevel.CENSUS_BLOCK_GROUP,
|
|
|
|
self.GEOID_FIELD_NAME,
|
|
|
|
self.EXPECTED_CENSUS_BLOCK_GROUPS_CHARACTER_LENGTH,
|
|
|
|
self.EXPECTED_MAX_CENSUS_BLOCK_GROUPS,
|
|
|
|
),
|
|
|
|
]:
|
|
|
|
if self.GEO_LEVEL is geo_level:
|
|
|
|
if geo_field not in self.COLUMNS_TO_KEEP:
|
|
|
|
raise ValueError(
|
|
|
|
f"Must have `{geo_field}` in columns if "
|
|
|
|
f"specifying geo level as `{geo_level} "
|
|
|
|
)
|
|
|
|
if self.output_df.shape[0] > expected_rows:
|
|
|
|
raise ValueError(
|
|
|
|
f"Too many rows: `{self.output_df.shape[0]}` rows in "
|
|
|
|
f"output exceeds expectation of `{expected_rows}` "
|
|
|
|
f"rows."
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.output_df[geo_field].str.len().nunique() > 1:
|
|
|
|
raise ValueError(
|
|
|
|
f"Multiple character lengths for geo field "
|
|
|
|
f"present: {self.output_df[geo_field].str.len().unique()}."
|
|
|
|
)
|
|
|
|
|
|
|
|
elif (
|
|
|
|
len(self.output_df[geo_field].array[0])
|
|
|
|
!= expected_geo_field_characters
|
|
|
|
):
|
|
|
|
raise ValueError(
|
|
|
|
"Wrong character length: the census geography data "
|
|
|
|
"has the wrong length."
|
|
|
|
)
|
|
|
|
|
|
|
|
duplicate_geo_field_values = (
|
|
|
|
self.output_df[geo_field].shape[0]
|
|
|
|
- self.output_df[geo_field].nunique()
|
|
|
|
)
|
|
|
|
if duplicate_geo_field_values > 0:
|
|
|
|
raise ValueError(
|
|
|
|
f"Duplicate values: There are {duplicate_geo_field_values} "
|
|
|
|
f"duplicate values in "
|
|
|
|
f"`{geo_field}`."
|
|
|
|
)
|
|
|
|
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
# Check whether data contains expected states
|
|
|
|
states_in_output_df = (
|
|
|
|
self.output_df[self.GEOID_TRACT_FIELD_NAME]
|
|
|
|
.str[0:2]
|
|
|
|
.unique()
|
|
|
|
.tolist()
|
|
|
|
)
|
|
|
|
|
|
|
|
compare_to_list_of_expected_state_fips_codes(
|
|
|
|
actual_state_fips_codes=states_in_output_df,
|
|
|
|
continental_us_expected=self.CONTINENTAL_US_EXPECTED_IN_DATA,
|
|
|
|
alaska_and_hawaii_expected=self.ALASKA_AND_HAWAII_EXPECTED_IN_DATA,
|
|
|
|
puerto_rico_expected=self.PUERTO_RICO_EXPECTED_IN_DATA,
|
|
|
|
island_areas_expected=self.ISLAND_AREAS_EXPECTED_IN_DATA,
|
|
|
|
additional_fips_codes_not_expected=self.EXPECTED_MISSING_STATES,
|
|
|
|
dataset_name=self.NAME,
|
|
|
|
)
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
def load(self, float_format=None) -> None:
|
|
|
|
"""Saves the transformed data.
|
|
|
|
|
|
|
|
Data is written in the specified local data folder or remote AWS S3 bucket.
|
|
|
|
|
2022-08-09 16:37:10 -04:00
|
|
|
Uses the directory and the file name from `self._get_output_file_path`.
|
2022-02-08 19:05:32 -05:00
|
|
|
"""
|
2023-02-08 13:08:55 -06:00
|
|
|
logger.debug(f"Saving `{self.NAME}` CSV")
|
2021-10-13 15:54:15 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
# Create directory if necessary.
|
|
|
|
output_file_path = self._get_output_file_path()
|
|
|
|
output_file_path.parent.mkdir(parents=True, exist_ok=True)
|
2021-10-13 15:54:15 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
# Write nationwide csv
|
|
|
|
self.output_df[self.COLUMNS_TO_KEEP].to_csv(
|
|
|
|
output_file_path, index=False, float_format=float_format
|
2021-10-13 15:54:15 -04:00
|
|
|
)
|
2022-02-08 19:05:32 -05:00
|
|
|
|
2023-02-08 13:08:55 -06:00
|
|
|
logger.debug(f"File written to `{output_file_path}`.")
|
2022-02-08 19:05:32 -05:00
|
|
|
|
|
|
|
# This is a classmethod so it can be used without needing to create an instance of
|
|
|
|
# the class. This is a use case in `etl_score`.
|
|
|
|
@classmethod
|
|
|
|
def get_data_frame(cls) -> pd.DataFrame:
|
|
|
|
"""Return the output data frame for this class.
|
|
|
|
|
|
|
|
Must be run after a full ETL process has been run for this class.
|
|
|
|
|
|
|
|
If the ETL has been not run for this class, this will error.
|
2021-10-13 15:54:15 -04:00
|
|
|
"""
|
2022-02-08 19:05:32 -05:00
|
|
|
# Read in output file
|
|
|
|
output_file_path = cls._get_output_file_path()
|
|
|
|
if not output_file_path.exists():
|
|
|
|
raise ValueError(
|
|
|
|
f"Make sure to run ETL process first for `{cls}`. "
|
|
|
|
f"No file found at `{output_file_path}`."
|
|
|
|
)
|
|
|
|
|
2023-02-08 13:08:55 -06:00
|
|
|
logger.debug(
|
Backend release branch to main (#1822)
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* Create deploy_be_staging.yml (#1575)
* Imputing income using geographic neighbors (#1559)
Imputes income field with a light refactor. Needs more refactor and more tests (I spotchecked). Next ticket will check and address but a lot of "narwhal" architecture is here.
* Adding HOLC indicator (#1579)
Added HOLC indicator (Historic Redlining Score) from NCRC work; included 3.25 cutoff and low income as part of the housing burden category.
* Update backend for Puerto Rico (#1686)
* Update PR threshold count to 10
We now show 10 indicators for PR. See the discussion on the github issue for more info: https://github.com/usds/justice40-tool/issues/1621
* Do not use linguistic iso for Puerto Rico
Closes 1350.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* updating
* Do not drop Guam and USVI from ETL (#1681)
* Remove code that drops Guam and USVI from ETL
* Add back code for dropping rows by FIPS code
We may want this functionality, so let's keep it and just make the constant currently be an empty array.
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
* Emma nechamkin/holc patch (#1742)
Removing HOLC calculation from score narwhal.
* updating ejscreen data, try two (#1747)
* Rescaling linguistic isolation (#1750)
Rescales linguistic isolation to drop puerto rico
* adds UST indicator (#1786)
adds leaky underground storage tanks
* Changing LHE in tiles to a boolean (#1767)
also includes merging / clean up of the release
* added indoor plumbing to chas
* added indoor plumbing to score housing burden
* added indoor plumbing to score housing burden
* first run through
* Refactor DOE Energy Burden and COI to use YAML (#1796)
* added tribalId for Supplemental dataset (#1804)
* Setting zoom levels for tribal map (#1810)
* NRI dataset and initial score YAML configuration (#1534)
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* update be staging gha
* checkpoint
* update be staging gha
* NRI dataset and initial score YAML configuration
* checkpoint
* adding data checks for release branch
* passing tests
* adding INPUT_EXTRACTED_FILE_NAME to base class
* lint
* columns to keep and tests
* checkpoint
* PR Review
* renoving source url
* tests
* stop execution of ETL if there's a YAML schema issue
* update be staging gha
* adding source url as class var again
* clean up
* force cache bust
* gha cache bust
* dynamically set score vars from YAML
* docsctrings
* removing last updated year - optional reverse percentile
* passing tests
* sort order
* column ordening
* PR review
* class level vars
* Updating DatasetsConfig
* fix pylint errors
* moving metadata hint back to code
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Correct copy typo (#1809)
* Add basic test suite for COI (#1518)
* Update COI to use new yaml (#1518)
* Add tests for DOE energy budren (1518
* Add dataset config for energy budren (1518)
* Refactor ETL to use datasets.yml (#1518)
* Add fake GEOIDs to COI tests (#1518)
* Refactor _setup_etl_instance_and_run_extract to base (#1518)
For the three classes we've done so far, a generic
_setup_etl_instance_and_run_extract will work fine, for the moment we
can reuse the same setup method until we decide future classes need more
flexibility --- but they can also always subclass so...
* Add output-path tests (#1518)
* Update YAML to match constant (#1518)
* Don't blindly set float format (#1518)
* Add defaults for extract (#1518)
* Run YAML load on all subclasses (#1518)
* Update description fields (#1518)
* Update YAML per final format (#1518)
* Update fixture tract IDs (#1518)
* Update base class refactor (#1518)
Now that NRI is final I needed to make a small number of updates to my
refactored code.
* Remove old comment (#1518)
* Fix type signature and return (#1518)
* Update per code review (#1518)
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Vim <86254807+vim-usds@users.noreply.github.com>
* Update etl_score_geo.py
Yikes! Fixing merge messup!
* updated to fix linting errors (#1818)
Cleans and updates base branch
* Adding back MapComparison video
* Add FUDS ETL (#1817)
* Add spatial join method (#1871)
Since we'll need to figure out the tracts for a large number of points
in future tickets, add a utility to handle grabbing the tract geometries
and adding tract data to a point dataset.
* Add FUDS, also jupyter lab (#1871)
* Add YAML configs for FUDS (#1871)
* Allow input geoid to be optional (#1871)
* Add FUDS ETL, tests, test-datae noteobook (#1871)
This adds the ETL class for Formerly Used Defense Sites (FUDS). This is
different from most other ETLs since these FUDS are not provided by
tract, but instead by geographic point, so we need to assign FUDS to
tracts and then do calculations from there.
* Floats -> Ints, as I intended (#1871)
* Floats -> Ints, as I intended (#1871)
* Formatting fixes (#1871)
* Add test false positive GEOIDs (#1871)
* Add gdal binaries (#1871)
* Refactor pandas code to be more idiomatic (#1871)
Per Emma, the more pandas-y way of doing my counts is using np.where to
add the values i need, then groupby and size. It is definitely more
compact, and also I think more correct!
* Update configs per Emma suggestions (#1871)
* Type fixed! (#1871)
* Remove spurious import from vscode (#1871)
* Snapshot update after changing col name (#1871)
* Move up GDAL (#1871)
* Adjust geojson strategy (#1871)
* Try running census separately first (#1871)
* Fix import order (#1871)
* Cleanup cache strategy (#1871)
* Download census data from S3 instead of re-calculating (#1871)
* Clarify pandas code per Emma (#1871)
* Disable markdown check for link
* Adding DOT composite to travel score (#1820)
This adds the DOT dataset to the ETL and to the score. Note that currently we take a percentile of an average of percentiles.
* Adding first street foundation data (#1823)
Adding FSF flood and wildfire risk datasets to the score.
* first run -- adding NCLD data to the ETL, but not yet to the score
* Add abandoned mine lands data (#1824)
* Add notebook to generate test data (#1780)
* Add Abandoned Mine Land data (#1780)
Using a similar structure but simpler apporach compared to FUDs, add an
indicator for whether a tract has an abandonded mine.
* Adding some detail to dataset readmes
Just a thought!
* Apply feedback from revieiw (#1780)
* Fixup bad string that broke test (#1780)
* Update a string that I should have renamed (#1780)
* Reduce number of threads to reduce memory pressure (#1780)
* Try not running geo data (#1780)
* Run the high-memory sets separately (#1780)
* Actually deduplicate (#1780)
* Add flag for memory intensive ETLs (#1780)
* Document new flag for datasets (#1780)
* Add flag for new datasets fro rebase (#1780)
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
* Adding NLCD data (#1826)
Adding NLCD's natural space indicator end to end to the score.
* Add donut hole calculation to score (#1828)
Adds adjacency index to the pipeline. Requires thorough QA
* Adding eamlis and fuds data to legacy pollution in score (#1832)
Update to add EAMLIS and FUDS data to score
* Update to use new FSF files (#1838)
backend is partially done!
* Quick fix to kitchen or plumbing indicator
Yikes! I think I messed something up and dropped the pctile field suffix from when the KP score gets calculated. Fixing right quick.
* Fast flag update (#1844)
Added additional flags for the front end based on our conversation in stand up this morning.
* Tiles fix (#1845)
Fixes score-geo and adds flags
* Update etl_score_geo.py
* Issue 1827: Add demographics to tiles and download files (#1833)
* Adding demographics for use in sidebar and download files
* Updates backend constants to N (#1854)
* updated to show T/F/null vs T/F for AML and FUDS (#1866)
* fix markdown
* just testing that the boolean is preserved on gha
* checking drop tracts works
* OOPS!
Old changes persisted
* adding a check to the agvalue calculation for nri
* updated with error messages
* updated error message
* tuple type
* Score tests (#1847)
* update Python version on README; tuple typing fix
* Alaska tribal points fix (#1821)
* Bump mistune from 0.8.4 to 2.0.3 in /data/data-pipeline (#1777)
Bumps [mistune](https://github.com/lepture/mistune) from 0.8.4 to 2.0.3.
- [Release notes](https://github.com/lepture/mistune/releases)
- [Changelog](https://github.com/lepture/mistune/blob/master/docs/changes.rst)
- [Commits](https://github.com/lepture/mistune/compare/v0.8.4...v2.0.3)
---
updated-dependencies:
- dependency-name: mistune
dependency-type: indirect
...
Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
* poetry update
* initial pass of score tests
* add threshold tests
* added ses threshold (not donut, not island)
* testing suite -- stopping for the day
* added test for lead proxy indicator
* Refactor score tests to make them less verbose and more direct (#1865)
* Cleanup tests slightly before refactor (#1846)
* Refactor score calculations tests
* Feedback from review
* Refactor output tests like calculatoin tests (#1846) (#1870)
* Reorganize files (#1846)
* Switch from lru_cache to fixture scorpes (#1846)
* Add tests for all factors (#1846)
* Mark smoketests and run as part of be deply (#1846)
* Update renamed var (#1846)
* Switch from named tuple to dataclass (#1846)
This is annoying, but pylint in python3.8 was crashing parsing the named
tuple. We weren't using any namedtuple-specific features, so I made the
type a dataclass just to get pylint to behave.
* Add default timout to requests (#1846)
* Fix type (#1846)
* Fix merge mistake on poetry.lock (#1846)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* just testing that the boolean is preserved on gha (#1867)
* updated with hopefully a fix; coercing aml, fuds, hrs to booleans for the raw value to preserve null character.
* Adding tests to ensure proper calculations (#1871)
* just testing that the boolean is preserved on gha
* checking drop tracts works
* adding a check to the agvalue calculation for nri
* updated with error messages
* tribal tiles fix (#1874)
* Alaska tribal points fix (#1821)
* tribal tiles fix
* disabling child opportunity
* lint
* removing COI
* removing commented out code
* Pipeline tile tests (#1864)
* temp update
* updating with fips check
* adding check on pfs
* updating with pfs test
* Update test_tiles_smoketests.py
* Fix lint errors (#1848)
* Add column names test (#1848)
* Mark tests as smoketests (#1848)
* Move to other score-related tests (#1848)
* Recast Total threshold criteria exceeded to int (#1848)
In writing tests to verify the output of the tiles csv matches the final
score CSV, I noticed TC/Total threshold criteria exceeded was getting
cast from an int64 to a float64 in the process of PostScoreETL. I
tracked it down to the line where we merge the score dataframe with
constants.DATA_CENSUS_CSV_FILE_PATH --- there where > 100 tracts in the
national census CSV that don't exist in the score, so those ended up
with a Total threshhold count of np.nan, which is a float, and thereby
cast those columns to float. For the moment I just cast it back.
* No need for low memeory (#1848)
* Add additional tests of tiles.csv (#1848)
* Drop pre-2010 rows before computing score (#1848)
Note this is probably NOT the optimal place for this change; it might
make more sense for each source to filter its own tracts down to the
acceptable tract list. However, that would be a pretty invasive change,
where this is central and plenty of other things are happening in score
transform that could be moved to sources, so for today, here's where the
change will live.
* Fix typo (#1848)
* Switch from filter to inner join (#1848)
* Remove no-op lines from tiles (#1848)
* Apply feedback from review, linter (#1848)
* Check the values oeverything in the frame (#1848)
* Refactor checker class (#1848)
* Add test for state names (#1848)
* cleanup from reviewing my own code (#1848)
* Fix lint error (#1858)
* Apply Emma's feedback from review (#1848)
* Remove refs to national_df (#1848)
* Account for new, fake nullable bools in tiles (#1848)
To handle a geojson limitation, Emma converted some nullable boolean
colunms to float64 in the tiles export with the values {0.0, 1.0, nan},
giving us the same expressiveness. Sadly, this broke my assumption that
all columns between the score and tiles csvs would have the same dtypes,
so I need to account for these new, fake bools in my test.
* Use equals instead of my worse version (#1848)
* Missed a spot where we called _create_score_data (#1848)
* Update per safety (#1848)
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Add tests to make sure each source makes it to the score correctly (#1878)
* Remove unused persistent poverty from score (#1835)
* Test a few datasets for overlap in the final score (#1835)
* Add remaining data sources (#1853)
* Apply code-review feedback (#1835)
* Rearrange a little for readabililty (#1835)
* Add tract test (#1835)
* Add test for score values (#1835)
* Check for unmatched source tracts (#1835)
* Cleanup numeric code to plaintext (#1835)
* Make import more obvious (#1835)
* Updating traffic barriers to include low pop threshold (#1889)
Changing the traffic barriers to only be included for places with recorded population
* Remove no land tracts from map (#1894)
remove from map
* Issue 1831: missing life expectancy data from Maine and Wisconsin (#1887)
* Fixing missing states and adding tests for states to all classes
* Removing low pop tracts from FEMA population loss (#1898)
dropping 0 population from FEMA
* 1831 Follow up (#1902)
This code causes no functional change to the code. It does two things:
1. Uses difference instead of - to improve code style for working with sets.
2. Removes the line EXPECTED_MISSING_STATES = ["02", "15"], which is now redundant because of the line I added (in a previous pull request) of ALASKA_AND_HAWAII_EXPECTED_IN_DATA = False.
* Add tests for all non-census sources (#1899)
* Refactor CDC life-expectancy (1554)
* Update to new tract list (#1554)
* Adjust for tests (#1848)
* Add tests for cdc_places (#1848)
* Add EJScreen tests (#1848)
* Add tests for HUD housing (#1848)
* Add tests for GeoCorr (#1848)
* Add persistent poverty tests (#1848)
* Update for sources without zips, for new validation (#1848)
* Update tests for new multi-CSV but (#1848)
Lucas updated the CDC life expectancy data to handle a bug where two
states are missing from the US Overall download. Since virtually none of
our other ETL classes download multiple CSVs directly like this, it
required a pretty invasive new mocking strategy.
* Add basic tests for nature deprived (#1848)
* Add wildfire tests (#1848)
* Add flood risk tests (#1848)
* Add DOT travel tests (#1848)
* Add historic redlining tests (#1848)
* Add tests for ME and WI (#1848)
* Update now that validation exists (#1848)
* Adjust for validation (#1848)
* Add health insurance back to cdc places (#1848)
Ooops
* Update tests with new field (#1848)
* Test for blank tract removal (#1848)
* Add tracts for clipping behavior
* Test clipping and zfill behavior (#1848)
* Fix bad test assumption (#1848)
* Simplify class, add test for tract padding (#1848)
* Fix percentage inversion, update tests (#1848)
Looking through the transformations, I noticed that we were subtracting
a percentage that is usually between 0-100 from 1 instead of 100, and so
were endind up with some surprising results. Confirmed with lucasmbrown-usds
* Add note about first street data (#1848)
* Issue 1900: Tribal overlap with Census tracts (#1903)
* working notebook
* updating notebook
* wip
* fixing broken tests
* adding tribal overlap files
* WIP
* WIP
* WIP, calculated count and names
* working
* partial cleanup
* partial cleanup
* updating field names
* fixing bug
* removing pyogrio
* removing unused imports
* updating test fixtures to be more realistic
* cleaning up notebook
* fixing black
* fixing flake8 errors
* adding tox instructions
* updating etl_score
* suppressing warning
* Use projected CRSes, ignore geom types (#1900)
I looked into this a bit, and in general the geometry type mismatch
changes very little about the calculation; we have a mix of
multipolygons and polygons. The fastest thing to do is just not keep
geom type; I did some runs with it set to both True and False, and
they're the same within 9 digits of precision. Logically we just want to
overlaps, regardless of how the actual geometries are encoded between
the frames, so we can in this case ignore the geom types and feel OKAY.
I also moved to projected CRSes, since we are actually trying to do area
calculations and so like, we should. Again, the change is small in
magnitude but logically more sound.
* Readd CDC dataset config (#1900)
* adding comments to fips code
* delete unnecessary loggers
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Improve score test documentation based on Lucas's feedback (#1835) (#1914)
* Better document base on Lucas's feedback (#1835)
* Fix typo (#1835)
* Add test to verify GEOJSON matches tiles (#1835)
* Remove NOOP line (#1835)
* Move GEOJSON generation up for new smoketest (#1835)
* Fixup code format (#1835)
* Update readme for new somketest (#1835)
* Cleanup source tests (#1912)
* Move test to base for broader coverage (#1848)
* Remove duplicate line (#1848)
* FUDS needed an extra mock (#1848)
* Add tribal count notebook (#1917) (#1919)
* Add tribal count notebook (#1917)
* test without caching
* added comment
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
* Add tribal overlap to downloads (#1907)
* Add tribal data to downloads (#1904)
* Update test pickle with current cols (#1904)
* Remove text of tribe names from GeoJSON (#1904)
* Update test data (#1904)
* Add tribal overlap to smoketests (#1904)
* Issue 1910: Do not impute income for 0 population tracts (#1918)
* should be working, has unnecessary loggers
* removing loggers and cleaning up
* updating ejscreen tests
* adding tests and responding to PR feedback
* fixing broken smoke test
* delete smoketest docs
* updating click
* updating click
* Bump just jupyterlab (#1930)
* Fixing link checker (#1929)
* Update deps safety says are vulnerable (#1937) (#1938)
Co-authored-by: matt bowen <matt@mattbowen.net>
* Add demos for island areas (#1932)
* Backfill population in island areas (#1882)
* Update smoketest to account for backfills (#1882)
As I wrote in the commend:
We backfill island areas with data from the 2010 census, so if THOSE tracts
have data beyond the data source, that's to be expected and is fine to pass.
If some other state or territory does though, this should fail
This ends up being a nice way of documenting that behavior i guess!
* Fixup lint issues (#1882)
* Add in race demos to 2010 census pull (#1851)
* Add backfill data to score (#1851)
* Change column name (#1851)
* Fill demos after the score (#1851)
* Add income back, adjust test (#1882)
* Apply code-review feedback (#1851)
* Add test for island area backfill (#1851)
* Fix bad rename (#1851)
* Reorder download fields, add plumbing back (#1942)
* Add back lack of plumbing fields (#1920)
* Reorder fields for excel (#1921)
* Reorder excel fields (#1921)
* Fix formating, lint errors, pickes (#1921)
* Add missing plumbing col, fix order again (#1921)
* Update that pickle (#1921)
* refactoring tribal (#1960)
* updated with scoring comparison
* updated for narhwal -- leaving commented code in for now
* pydantic upgrade
* produce a string for the front end to ingest (#1963)
* wip
* i believe this works -- let's see the pipeline
* updated fixtures
* Adding ADJLI_ET (#1976)
* updated tile data
* ensuring adjli_et in
* Add back income percentile (#1977)
* Add missing field to download (#1964)
* Remove pydantic since it's unused (#1964)
* Add percentile to CSV (#1964)
* Update downloadable pickle (#1964)
* Issue 105: Configure and run `black` and other pre-commit hooks (clean branch) (#1962)
* Configure and run `black` and other pre-commit hooks
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
* Removing fixed python version for black (#1985)
* Fixup TA_COUNT and TA_PERC (#1991)
* Change TA_PERC, change TA_COUNT (#1988, #1989)
- Make TA_PERC_STR back into a nullable float following the rules
requestsed in #1989
- Move TA_COUNT to be TA_COUNT_AK, also add a null TA_COUNT_C for CONUS
that we can fill in later.
* Fix typo comment (#1988)
* Issue 1992: Do not impute income for null population tracts (#1993)
* Hotfix for DOT data source DNS issue (#1999)
* Make tribal overlap set score N (#2004)
* Add "Is a Tribal DAC" field (#1998)
* Add tribal DACs to score N final (#1998)
* Add new fields to downloads (#1998)
* Make a int a float (#1998)
* Update field names, apply feedback (#1998)
* Add assertions around codebook (#2014)
* Add assertion around codebook (#1505)
* Assert csv and excel have same cols (#1505)
* Remove suffixes from tribal lands (#1974) (#2008)
* Data source location (#2015)
* data source location
* toml
* cdc_places
* cdc_svi_index
* url updates
* child oppy and dot travel
* up to hud_recap
* completed ticket
* cache bust
* hud_recap
* us_army_fuds
* Remove vars the frontend doesn't use (#2020) (#2022)
I did a pretty rough and simple analysis of the variables we put in the
tiles and grepped the frontend code to see if (1) they're ever accessed
and (2) if they're used, even if they're read once. I removed everything
I noticed was not accessed.
* Disable file size limits on tiles (#2031)
* Disable file size limits on tiles
* Remove print debugs
I know.
* Update file name pattern (#2037) (#2038)
* Update file name pattern (#2037)
* Remove ETL from generation (2037)
I looked more carefully, and this ETL step isn't used in the score, so
there's no need to run it every time. Per previous steps, I removed it
from constants so the code is there it won't run by default.
* Round ALL the float fields for the tiles (#2040)
* Round ALL the float fields for the tiles (#2033)
* Floor in a simpler way (#2033)
Emma pointed out that all teh stuff we're doing in floor_series is
probably unnecessary for this case, so just use the built-in floor.
* Update pickle I missed (#2033)
* Clean commit of just aggregate burden notebook (#1819)
added a burden notebook
* Update the dockerfile (#2045)
* Update so the image builds (#2026)
* Fix bad dict (2026)
* Rename census tract field in downloads (#2068)
* Change tract ID field name (2060)
* Update lockfile (#2061)
* Bump safety, jupyter, wheel (#2061)
* DOn't depend directly on wheel (2061)
* Bring narwhal reqs in line with main
* Update tribal area counts (#2071)
* Rename tribal area field (2062)
* Add missing file (#2062)
* Add checks to create version (#2047) (#2052)
* Fix failing safety (#2114)
* Ignore vuln that doesn't affect us 2113
https://nvd.nist.gov/vuln/detail/CVE-2022-42969 landed recently and
there's no fix in py (which is maintenance mode). From my analysis, that
CVE cannot hurt us (famous last words), so we'll ignore the vuln for
now.
* 2113 Update our gdal ppa
* that didn't work (2113)
* Don't add the PPA, the package exists (#2113)
* Fix type (#2113)
* Force an update of wheel 2113
* Also remove PPA line from create-score-versions
* Drop 3.8 because of wheel 2113
* Put back 3.8, use newer actions
* Try another way of upgrading wheel 2113
* Upgrade wheel in tox too 2113
* Typo fix 2113
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: Emma Nechamkin <97977170+emma-nechamkin@users.noreply.github.com>
Co-authored-by: Shelby Switzer <shelby.c.switzer@omb.eop.gov>
Co-authored-by: Shelby Switzer <shelbyswitzer@gmail.com>
Co-authored-by: Emma Nechamkin <Emma.J.Nechamkin@omb.eop.gov>
Co-authored-by: Matt Bowen <83967628+mattbowen-usds@users.noreply.github.com>
Co-authored-by: Jorge Escobar <83969469+esfoobar-usds@users.noreply.github.com>
Co-authored-by: lucasmbrown-usds <lucas.m.brown@omb.eop.gov>
Co-authored-by: Jorge Escobar <jorge.e.escobar@omb.eop.gov>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: matt bowen <matthew.r.bowen@omb.eop.gov>
Co-authored-by: matt bowen <matt@mattbowen.net>
2022-12-01 18:50:54 -08:00
|
|
|
f"Reading in CSV `{output_file_path}` for ETL of class `{cls}`."
|
|
|
|
)
|
2022-02-08 19:05:32 -05:00
|
|
|
output_df = pd.read_csv(
|
|
|
|
output_file_path,
|
2021-10-13 15:54:15 -04:00
|
|
|
dtype={
|
2022-02-08 19:05:32 -05:00
|
|
|
# Not all outputs will have both a Census Block Group ID and a
|
|
|
|
# Tract ID, but these will be ignored if they're not present.
|
|
|
|
cls.GEOID_FIELD_NAME: "string",
|
|
|
|
cls.GEOID_TRACT_FIELD_NAME: "string",
|
2021-10-13 15:54:15 -04:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
return output_df
|
2021-10-13 15:54:15 -04:00
|
|
|
|
2022-02-08 19:05:32 -05:00
|
|
|
def cleanup(self) -> None:
|
|
|
|
"""Clears out any files stored in the TMP folder"""
|
2022-02-11 14:04:53 -05:00
|
|
|
remove_all_from_dir(self.get_tmp_path())
|
2023-03-03 12:26:24 -06:00
|
|
|
|
|
|
|
def get_tmp_path(self) -> pathlib.Path:
|
|
|
|
"""Returns the temporary path associated with this ETL class."""
|
|
|
|
# Note: the temporary path will be defined on `init`, because it uses the class
|
|
|
|
# of the instance which is often a child class.
|
|
|
|
tmp_path = self.DATA_PATH / "tmp" / str(self.__class__.__name__)
|
|
|
|
|
|
|
|
# Create directory if it doesn't exist
|
|
|
|
tmp_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
return tmp_path
|