From a9b0b3a5fc9b046288d757591c14874a6757c085 Mon Sep 17 00:00:00 2001 From: Justin Terry Date: Tue, 19 Sep 2023 13:06:25 -0700 Subject: [PATCH 1/4] Revert "Revert" This reverts commit ba627d59c4c53b7125180db761505395a486022f. --- lambdas/raster_analysis/src/lambda_function.py | 2 +- raster_analysis/geometry.py | 3 --- raster_analysis/tiling.py | 12 +++++++----- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/lambdas/raster_analysis/src/lambda_function.py b/lambdas/raster_analysis/src/lambda_function.py index 7f3bd5d..87edd9a 100644 --- a/lambdas/raster_analysis/src/lambda_function.py +++ b/lambdas/raster_analysis/src/lambda_function.py @@ -32,7 +32,7 @@ def handler(event, context): if not geom_tile.geom: LOGGER.info(f"Geometry for tile {context.aws_request_id} is empty.") - results_store.save_result({}, context.aws_request_id) + results_store.save_result(DataFrame(), context.aws_request_id) return {} data_environment = DataEnvironment(layers=event["environment"]) diff --git a/raster_analysis/geometry.py b/raster_analysis/geometry.py index 033dfc9..d60f0fa 100644 --- a/raster_analysis/geometry.py +++ b/raster_analysis/geometry.py @@ -37,9 +37,6 @@ def __init__( f"Could not create valid tile from geom {full_geom.wkt} and tile {tile.wkt}" ) - if geom_tile.is_empty: - self.geom = {} - self.geom = geom_tile diff --git a/raster_analysis/tiling.py b/raster_analysis/tiling.py index 2ce704f..679bb9e 100644 --- a/raster_analysis/tiling.py +++ b/raster_analysis/tiling.py @@ -124,16 +124,18 @@ def _postprocess_results(self, results): return results def _execute_tiles(self) -> DataFrame: - tiles = self._get_tiles(self.grid.tile_degrees) payload: Dict[str, Any] = { "query": self.raw_query, "environment": self.data_environment.dict(), } - if sys.getsizeof(json.dumps(self.raw_geom)) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES: - payload["encoded_geometry"] = encode_geometry(self.geom) - else: - payload["geometry"] = self.raw_geom + payload["geometry"] = self.raw_geom + if sys.getsizeof(json.dumps(payload)) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES: + # if payload would be too big, compress geometry + geom = shape(payload.pop("geometry")) + payload["encoded_geometry"] = encode_geometry(geom) + + tiles = self._get_tiles(self.grid.tile_degrees) results_store = AnalysisResultsStore() tile_keys = [ From e1f8f25704715bdedbc2a7f61a64d5b2cea1c0ef Mon Sep 17 00:00:00 2001 From: Solomon Negusse Date: Thu, 22 Aug 2024 17:43:29 +0300 Subject: [PATCH 2/4] remove xray and add code guru profiling --- lambdas/fanout/src/lambda_function.py | 6 ++---- lambdas/raster_analysis/src/lambda_function.py | 6 ++---- lambdas/tiled_analysis/src/lambda_function.py | 6 ++---- setup.py | 2 +- 4 files changed, 7 insertions(+), 13 deletions(-) diff --git a/lambdas/fanout/src/lambda_function.py b/lambdas/fanout/src/lambda_function.py index 716a11f..6f1d81a 100644 --- a/lambdas/fanout/src/lambda_function.py +++ b/lambdas/fanout/src/lambda_function.py @@ -1,14 +1,12 @@ from copy import deepcopy -from aws_xray_sdk.core import patch, xray_recorder +from codeguru_profiler_agent import with_lambda_profiler from raster_analysis.boto import invoke_lambda, lambda_client from raster_analysis.globals import LOGGER, RASTER_ANALYSIS_LAMBDA_NAME -patch(["boto3"]) - -@xray_recorder.capture("Fanout Raster Analysis") +@with_lambda_profiler(profiling_group_name="raster_analysis_fanout_profiler") def handler(event, context): tiles = event.get("tiles", []) payload_base = event["payload"] diff --git a/lambdas/raster_analysis/src/lambda_function.py b/lambdas/raster_analysis/src/lambda_function.py index 87edd9a..b4cb6bb 100644 --- a/lambdas/raster_analysis/src/lambda_function.py +++ b/lambdas/raster_analysis/src/lambda_function.py @@ -1,4 +1,4 @@ -from aws_xray_sdk.core import patch, xray_recorder +from codeguru_profiler_agent import with_lambda_profiler from pandas import DataFrame from raster_analysis.data_cube import DataCube @@ -9,10 +9,8 @@ from raster_analysis.query_executor import QueryExecutor from raster_analysis.results_store import AnalysisResultsStore, ResultStatus -patch(["boto3"]) - -@xray_recorder.capture("Raster Analysis") +@with_lambda_profiler(profiling_group_name="raster_analysis_default_profiler") def handler(event, context): try: LOGGER.info(f"Running analysis with parameters: {event}") diff --git a/lambdas/tiled_analysis/src/lambda_function.py b/lambdas/tiled_analysis/src/lambda_function.py index 146ee8b..b4544e7 100644 --- a/lambdas/tiled_analysis/src/lambda_function.py +++ b/lambdas/tiled_analysis/src/lambda_function.py @@ -1,14 +1,12 @@ -from aws_xray_sdk.core import patch, xray_recorder +from codeguru_profiler_agent import with_lambda_profiler from raster_analysis.data_environment import DataEnvironment from raster_analysis.exceptions import QueryParseException from raster_analysis.globals import LOGGER from raster_analysis.tiling import AnalysisTiler -patch(["boto3"]) - -@xray_recorder.capture("Tiled Analysis") +@with_lambda_profiler(profiling_group_name="raster_analysis_tiled_profiler") def handler(event, context): try: LOGGER.info(f"Running analysis with parameters: {event}") diff --git a/setup.py b/setup.py index e816a15..5099a45 100644 --- a/setup.py +++ b/setup.py @@ -8,11 +8,11 @@ author="Justin Terry/Thomas Maschler", license="MIT", install_requires=[ - "aws-xray-sdk==2.12.0", "requests==2.31.0", "geobuf==1.1.1", "protobuf==3.20.3", "pydantic==1.10.12", "mo_sql_parsing==9.436.23241", + "codeguru-profiler-agent", ], ) From 72f44a28c41e1d943430bf5eeae8328d854ebd3d Mon Sep 17 00:00:00 2001 From: Solomon Negusse Date: Sat, 24 Aug 2024 13:52:48 +0300 Subject: [PATCH 3/4] add back xray for list processing --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 5099a45..c9ca7fd 100644 --- a/setup.py +++ b/setup.py @@ -8,6 +8,7 @@ author="Justin Terry/Thomas Maschler", license="MIT", install_requires=[ + "aws-xray-sdk==2.12.0", "requests==2.31.0", "geobuf==1.1.1", "protobuf==3.20.3", From 69d4db052fb3f73fbd83a61bfd6314a2599e2341 Mon Sep 17 00:00:00 2001 From: jterry64 Date: Thu, 26 Sep 2024 16:59:40 -0700 Subject: [PATCH 4/4] Update README.md --- README.md | 381 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 242 insertions(+), 139 deletions(-) diff --git a/README.md b/README.md index 7e04a02..8668da2 100644 --- a/README.md +++ b/README.md @@ -2,26 +2,20 @@ ### Functionality -Run zonal statistics on tree cover loss, GLAD alerts, or arbitrary contextual layers defined in our data lake. +Run raster zonal analysis on data in [a gfw-data-api](https://github.com/gfw-api/gfw-data-api). Use the lambda to run on one geometry, or the step function to run on a list of geometries. Supported analyses: -See [a gfw-data-api](https://github.com/gfw-api/gfw-data-api) for how to access through analysis API. +- Zonal statistics across multiple layers at once, including masks and grouping +- Pulling point data in a zone, including latitude and longitude +See **Raster SQL** for how to query datasets. ### Query Parameters -All layers should be referred to by their standard data lake column name: __ or is__ for boolean layers. - -See the data API for a full list of registered layers. - - -|Parameter|Type|Description|Example| -|---------|----|-----------|-------| -|geostore_id (required)| String | A valid geostore ID containing the GeoJSON for the geometry of interest (see further specification in `Limitations and assumtions` | cb64960710fd11925df3fda6a2005ca9 | -|group_by| [String] | Rasters with categorical pixel values to group by.| umd_tree_cover_loss__year, umd_glad_alerts, tsc_tree_cover_loss_drivers__type | -|filters| [String] | Rasters to apply as a mask. Pixels with NoData values will be filtered out of the final result. For umd_tree_cover_density_2000/2010, you can put a threshold number as the data type, and it will apply a filter for that threshold| is__umd_regional_primary_forest_2001, umd_tree_cover_density_2000__30| -|sum| [String] | Pixel values will be summed based on intersection with group_by layers. If there are no group_by layers, all pixel values will be summed to a single number. Pixel value must be numerical. This field can also include area__ha or alert__count, which will give the pixel count or area.| area__ha, whrc_aboveground_co2_emissions__Mg | -|start| Date | Filters date group_by columns to this start date. Must be a year or a YYYY-MM-DD formatted date. | 2015, 2015-02-04 | -|end| Date | Same format as 'start'. Must come after start. | 2016 , 2016-02-10 | +|Parameter|Type|Description| +|---------|----|-----------| +|geometry (required)| GeoJSON | A valid GeoJSON geometry to run analysis on. Must be a Polygon or MultiPolygon. | +|sql (required)| String | A **Raster SQL** query string for the analysis you want to run. See below for more details.| SELECT SUM(area__ha), umd_tree_cover_loss__year FROM data WHERE umd_tree_cover_density__percent > 30 GROUP BY umd_tree_cover_loss__year | +|data_environment (required) | Dict | A config telling the raster analysis how to match layer names in the query with the actual source. This is typically created by [a gfw-data-api](https://github.com/gfw-api/gfw-data-api) automatically using layers in the API.| #### Examples Request: @@ -31,12 +25,8 @@ Request: "type": "Polygon", "coordinates": [[[9, 4.1], [9.1, 4.1], [9.1, 4.2], [9, 4.2], [9, 4.1]]], }, - "group_by": ["umd_tree_cover_loss__year"], - "filters": [ - "is__umd_regional_primary_forest_2001", - "umd_tree_cover_density_2000__30", - ], - "sum": ["area__ha", "whrc_aboveground_co2_emissions__Mg"], + "sql": "SELECT umd_tree_cover_loss__year, SUM(area__ha), SUM(whrc_aboveground_co2_emissions__Mg) FROM data + WHERE umd_tree_cover_density_2000__percent > 30 GROUP BY umd_tree_cover_loss__year" } ``` @@ -139,13 +129,237 @@ Response: } ``` +### Raster SQL + +WRI has a strong need for complex analytics beyond just simple zonal statistics. Often, we need to apply multiple masks based on pixel value, group by pixel values, and aggregate multiple values at once. Traditional GIS involves pregenerating and applying masks on each layer, then running a zonal statistics analysis. Because these analyses have a strong overlap with SQL, we invented a subset SQL called Raster SQL, that allows for expressive queries that apply the necessary GIS operations on-the-fly. + +Our SQL subset supports the following: +- `SELECT` statements, which can be either use aggregate functions or pull out pixel values as "rows" +- `WHERE` statements, supporting `AND`/`OR` operators, basic comparison operators and nested expression (e.g. `x > 2 AND (y = 3 OR z < 4)`) +- `GROUP BY` statements, which can include multiple GROUP BY layers +- The aggregates `COUNT` (just counts pixels), SUM (sums values of pixels), and AVG (average values of pixels) + +This translate to GIS operations in the following ways: + +**Basics** + +`SELECT COUNT(*) FROM data` + +While the query will looks like this, based on which datasets in the API you query, it will actually be replaced under the hood to look something like this: + +`SELECT COUNT(*) FROM umd_tree_cover_loss__year` + +Each field name references either an actual raster layer, or special reserved word. In this basic analysis, the `umd_tree_cover_loss__year` will be applied as a basic mask, and then zonal statistics will be collected on the mask in the `geometry`. To perform zonal statistics, the geometry is rasterized and applied as an additional mask. The `COUNT` aggregate will just count all the non-masked pixels. This will return something like: + +```JSON +{ + "status":"success", + "data": [ + { + "count": 242 + } + ] +} +``` + +The `count` is the number of non-NoData pixels in the `umd_tree_cover_loss__year` raster that intersect the `geometry`. + +**Area** + +Usually we care about the actual hectare area of loss, not just the count of pixels. All of our analysis in WGS84, a geographic coordinate system, so pixels are degrees rather than meters. The actual meter length of 1 degree varies based on latitude because of the projection. To calculate real hectare, we introduced a special reserved field `area__ha`: + +`SELECT SUM(area__ha) FROM umd_tree_cover_loss__year` + +Now rather just counting the number of pixels, we're getting the sum of the hectare area of the pixel. When the query executor sees `area__ha`, it will calculate the hectare area of the pixels based on their latitude, and return the sum of area of non-masked pixels. So the results would now look like: + +```JSON +{ + "status":"success", + "data": [ + { + "area__ha": 175.245 + } + ] +} +``` + +Which is the actual hectare of loss in the `geometry`. + +**Aggregation** + +We can also aggregate the values of raster layers, like carbon emissions or biomass. All we need to is apply the `SUM` function to the layer: + +`SELECT SUM(whrc_aboveground_co2_emissions__Mg) FROM umd_tree_cover_loss__year` + +To get the amount of carbon emissions due to tree cover loss. The result might look like: + +```JSON +{ + "status":"success", + "data": [ + { + "whrc_aboveground_co2_emissions__Mg": 245.325 + } + ] +} +``` + +**Masking** + +Often, we want to apply different filters (masks) to our data to get more specific information. For example, people often care about loss specifically in primary forests, since these forests are especially valuable. Using `WHERE` statements, we can add additional masks to our calculation: + +`SELECT SUM(area__ha) FROM umd_tree_cover_loss__year WHERE is__umd_regional_primary_forest_2001 = 'true'` + +`is__umd_regional_primary_forest_2001` is a boolean raster layer showing the extent of primary forest in 2001. The statement `is__umd_regional_primary_forest_2001 = 'true'` will load the raster into an area and apply the filter to include `true` values. For this simple boolean raster, this just means any non-NoData pixels. We then apply this as a mask to `umd_tree_cover_loss__year` before running the aggregation and zonal statics. Our result might now look like: + +```JSON +{ + "status":"success", + "data": [ + { + "area__ha": 75.467 + } + ] +} +``` + +We can also apply multiple masks using `AND`/`OR` conditions. For example, different countries usually have different definitions of how dense a cluster of trees needs to be to be called a "forest" legally. We measure this density by the percent of "canopy cover" in a pixel. We can apply this canopy cover layer as an additional mask with a query like: + +`SELECT SUM(area__ha) FROM umd_tree_cover_loss__year WHERE is__umd_regional_primary_forest_2001 = 'true' AND umd_tree_cover_density_2000__percent > 30` -### Endpoints -```http request -https://staging-data-api.globalforestwatch.org/analysis -https://data-api.globalforestwatch.org/analysis +Now, we'll calculate loss area only in primary forests with a canopy cover percent greater than 30. Internally, the umd_tree_cover_density_2000__percent will be loaded, and the comparison operation will be applied to generate a boolean array that's true where the pixel value is greater than 30. We then combine with the overall mask using a bitwise & operation. If we used an `OR` statement in the query, we would use a bitwise | operation instead. Now our result might look like: + +```JSON +{ + "status":"success", + "data": [ + { + "area__ha": 25.945 + } + ] +} +``` + +**Grouping** + +The final SQL statement we support is `GROUP BY`. This will group the results by unique pixel values in a raster, like a histogram. For example, the loss raster pixel values are actually the year loss occurred, from 2001 to the current year. We often want to know the amount of loss per year, to track how it changed over time. Now we can finally examine the query in our initial example: + +`SELECT umd_tree_cover_loss__year, SUM(area__ha), SUM(whrc_aboveground_co2_emissions__Mg) FROM data WHERE umd_tree_cover_density_2000__percent > 30 GROUP BY umd_tree_cover_loss__year` + +This will first apply the masks, then applies a weighted histogram function where each unique value in `umd_tree_cover_loss__year` becomes a bucket, and the weights are the values of the pixels we want to aggregate (in this case, `area__ha` and `whrc_aboveground_co2_emissions__Mg`). We then end up with aggregation results by unique value, and the results look like: + +Response: +```JSON +{ + "status":"success", + "data":[ + { + "umd_tree_cover_loss__year":2001, + "area__ha":9.894410216509604, + "whrc_aboveground_co2_emissions__Mg":3560.875476837158 + }, + { + "umd_tree_cover_loss__year":2002, + "area__ha":40.0378459923877, + "whrc_aboveground_co2_emissions__Mg":14713.026161193848 + }, + { + "umd_tree_cover_loss__year":2003, + "area__ha":6.442871768889975, + "whrc_aboveground_co2_emissions__Mg":2568.1107501983643 + }, + { + "umd_tree_cover_loss__year":2005, + "area__ha":3.2214358844449875, + "whrc_aboveground_co2_emissions__Mg":1274.5636539459229 + }, + { + "umd_tree_cover_loss__year":2006, + "area__ha":22.01314521037408, + "whrc_aboveground_co2_emissions__Mg":8167.388116836548 + }, + { + "umd_tree_cover_loss__year":2007, + "area__ha":0.23010256317464195, + "whrc_aboveground_co2_emissions__Mg":136.68091201782227 + }, + { + "umd_tree_cover_loss__year":2008, + "area__ha":3.7583418651858187, + "whrc_aboveground_co2_emissions__Mg":1579.5646076202393 + }, + { + "umd_tree_cover_loss__year":2009, + "area__ha":0.7670085439154732, + "whrc_aboveground_co2_emissions__Mg":226.95782279968262 + }, + { + "umd_tree_cover_loss__year":2010, + "area__ha":108.37830725525636, + "whrc_aboveground_co2_emissions__Mg":41855.43841171265 + }, + { + "umd_tree_cover_loss__year":2011, + "area__ha":12.88574353777995, + "whrc_aboveground_co2_emissions__Mg":4887.8897132873535 + }, + { + "umd_tree_cover_loss__year":2012, + "area__ha":0.07670085439154732, + "whrc_aboveground_co2_emissions__Mg":23.061389923095703 + }, + { + "umd_tree_cover_loss__year":2013, + "area__ha":1.6107179422224938, + "whrc_aboveground_co2_emissions__Mg":601.4241733551025 + }, + { + "umd_tree_cover_loss__year":2014, + "area__ha":54.30420490921551, + "whrc_aboveground_co2_emissions__Mg":22433.24832725525 + }, + { + "umd_tree_cover_loss__year":2015, + "area__ha":0.3068034175661893, + "whrc_aboveground_co2_emissions__Mg":119.5254955291748 + }, + { + "umd_tree_cover_loss__year":2016, + "area__ha":5.752564079366049, + "whrc_aboveground_co2_emissions__Mg":2075.9469604492188 + }, + { + "umd_tree_cover_loss__year":2017, + "area__ha":24.774375968469784, + "whrc_aboveground_co2_emissions__Mg":9848.338472366333 + }, + { + "umd_tree_cover_loss__year":2018, + "area__ha":29.75993150392036, + "whrc_aboveground_co2_emissions__Mg":11987.563570022583 + }, + { + "umd_tree_cover_loss__year":2019, + "area__ha":27.382205017782393, + "whrc_aboveground_co2_emissions__Mg":10558.882364273071 + } + ] +} ``` +### Architecture + +To support fast on-the-fly analytics for both small and large areas, we use a serverless AWS Lambda-based architecture. Lambdas allow us to scale up massive parallel processing very quickly, since requests usually come sporadically and may require very variable workloads. + +image + +We have three different lambas. + +**tiled-raster-analysis**: This is the entrypoint function. This function checks the size of the geometry, and splits it up into many chunks if the geometry is large. The chunk size depends on the resolution: 1.25x1.25 degrees for 30m resolution data, and 0.5x0.5 degrees for 10m resolution data. It then invokes a lambda function for each chunk, and waits for the results to be written to a DynamoDB table. Each lambda invocation has 256 KB payload limit, so it may compress or simplify the geometry chunks if the geometry is too complicated. Once all the results are in, it will aggregate the results and return them to the client. + +**fanout**: This lambda is optional. One of the bottlenecks for this architecture is the lambda invocations themselves. If a geometry requires 100+ processing lambdas, it can take some time to invoke it all from one lambda. If more than 10 lambdas need to be invoked, the `tiled-raster-analysis` function will actually split the lambdas into groups of 10, and send each to a `fanout` lambda. All the fanout lambda does is invoke those 10 lambdas. This massively speeds up the invocation time for huge amounts of lambdas. + +**raster-analysis**: This is the core analysis function. This processes each geometry chunk, reads all necessary rasters, runs the query execution, and writes the results out to DynamoDB. Each of these currently runs with 3 GB of RAM. ### Assumptions and limitations @@ -154,6 +368,8 @@ They are saved as Cloud Optimized TIFFs with 400 x 400 pixels blocks. Because we can scale up parallel processing with lambda, size of the geometry shouldn't be an issue unless getting to massive scales (> 1 billion ha). But each lambda has in-memory cap of 3 GB, so currently only so many rasters can be loaded into memory at once. The limit depends on the size of the raster values (e.g. binary is way less memory than float), but generally max 4 or 5 raster layers is a good rule of thumb. +To optimize speed for area calculations, we assume each tile has roughly similar pixel area, and only use the pixel area of the centroid pixel for calculations. This may introduce some inaccuracies of the tile is sparsely covered. + ## Deployment Use terraform: @@ -164,119 +380,6 @@ Use terraform: ``` ``` -Runtime: Python 3.7 +Runtime: Python 3.10 Handler: lambda_function.lambda_handler ``` - -## Future Development - -### Data Lake - -The GFW data lake is now in production, so this service will soon point to that instead of just test layers. Once it does, all data lake layers should be available for analysis. This currently includes: - -* aqueduct_baseline_water_stress -* aqueduct_erosion_risk -* birdlife_alliance_for_zero_extinction_site -* birdlife_endemic_bird_areas -* birdlife_key_biodiversity_area -* bra_biomes -* esa_land_cover_2015 -* gfw_aboveground_carbon_stock_2000 -* gfw_aboveground_carbon_stock_in_emissions_year -* gfw_aboveground_carbon_stock_in_emissions_year__biomass_swap -* gfw_aboveground_carbon_stock_in_emissions_year__legal_amazon_loss -* gfw_aboveground_carbon_stock_in_emissions_year__no_primary_gain -* gfw_aboveground_carbon_stock_in_emissions_year__us_removals -* gfw_belowground_carbon_stock_2000 -* gfw_belowground_carbon_stock_in_emissions_year -* gfw_deadwood_carbon_stock_2000 -* gfw_deadwood_carbon_stock_in_emissions_year -* gfw_forest_age_category -* gfw_gross_annual_removals_biomass -* gfw_gross_cumul_removals_co2 -* gfw_gross_cumul_removals_co2__biomass_swap -* gfw_gross_cumul_removals_co2__legal_amazon_loss -* gfw_gross_cumul_removals_co2__maxgain -* gfw_gross_cumul_removals_co2__no_primary_gain -* gfw_gross_cumul_removals_co2__us_removals -* gfw_gross_emissions_co2e_co2_only -* gfw_gross_emissions_co2e_co2_only__biomass_swap -* gfw_gross_emissions_co2e_co2_only__convert_to_grassland -* gfw_gross_emissions_co2e_co2_only__legal_amazon_loss -* gfw_gross_emissions_co2e_co2_only__no_primary_gain -* gfw_gross_emissions_co2e_co2_only__no_shifting_ag -* gfw_gross_emissions_co2e_co2_only__soil_only -* gfw_gross_emissions_co2e_co2_only__us_removals -* gfw_gross_emissions_co2e_non_co2 -* gfw_gross_emissions_co2e_non_co2__biomass_swap -* gfw_gross_emissions_co2e_non_co2__convert_to_grassland -* gfw_gross_emissions_co2e_non_co2__legal_amazon_loss -* gfw_gross_emissions_co2e_non_co2__no_primary_gain -* gfw_gross_emissions_co2e_non_co2__no_shifting_ag -* gfw_gross_emissions_co2e_non_co2__soil_only -* gfw_gross_emissions_co2e_non_co2__us_removals -* gfw_intact_or_primary_forest_2000 -* gfw_land_rights -* gfw_litter_carbon_stock_2000 -* gfw_litter_carbon_stock_in_emissions_year -* gfw_managed_forests -* gfw_mining -* gfw_net_flux_co2e -* gfw_net_flux_co2e__biomass_swap -* gfw_net_flux_co2e__convert_to_grassland -* gfw_net_flux_co2e__legal_amazon_loss -* gfw_net_flux_co2e__maxgain -* gfw_net_flux_co2e__no_primary_gain -* gfw_net_flux_co2e__no_shifting_ag -* gfw_net_flux_co2e__us_removals -* gfw_oil_gas -* gfw_oil_palm -* gfw_peatlands -* gfw_peatlands__flux -* gfw_pixel_area -* gfw_plantations -* gfw_resource_rights -* gfw_soil_carbon_stock_2000 -* gfw_soil_carbon_stock_in_emissions_year -* gfw_soil_carbon_stock_in_emissions_year__biomass_swap -* gfw_soil_carbon_stock_in_emissions_year__legal_amazon_loss -* gfw_soil_carbon_stock_in_emissions_year__no_primary_gain -* gfw_soil_carbon_stock_in_emissions_year__us_removals -* gfw_tiger_landscapes -* gfw_total_carbon_stock_2000 -* gfw_total_carbon_stock_in_emissions_year -* gfw_wood_fiber -* gmw_mangroves_1996 -* gmw_mangroves_2016 -* idn_forest_area -* idn_forest_moratorium -* idn_land_cover_2017 -* idn_primary_forest_2000 -* ifl_intact_forest_landscapes -* jpl_mangrove_aboveground_biomass_stock_2000 -* jpl_tropics_abovegroundbiomass_2000 -* landmark_land_rights -* mapbox_river_basins -* mex_forest_zoning -* mex_payment_ecosystem_services -* mex_protected_areas -* per_forest_concessions -* per_permanent_production_forests -* per_protected_areas -* rspo_oil_palm -* tnc_urban_water_intake -* tsc_tree_cover_loss_drivers -* umd_regional_primary_forest_2001 -* umd_tree_cover_density_2000 -* umd_tree_cover_density_2010 -* umd_tree_cover_gain -* umd_tree_cover_loss -* usfs_fia_regions -* wdpa_protected_areas -* whrc_aboveground_biomass_stock_2000 -* wwf_eco_regions - -### VIIRS/MODIS Alerts - -These alerts are currently unsupported because we don't rasterize these layers. Instead, we store all enriched points in an document dataset. You can do on-the-fly analysis for these via SQL. (TBD: do we want to just forward that through here so there's only one endpoint?) -