diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..24e1443f --- /dev/null +++ b/404.html @@ -0,0 +1,1464 @@ + + + +
+ + + + + + + + + + + + + + + + +rio-tiler
provides multiple abstract base
+classes from which it derives its
+main readers: Reader
and
+STACReader
. You can also use these classes to build
+custom readers.
Main rio_tiler.io
Abstract Base Class.
tms: The TileMatrixSet define which default projection and map grid the reader uses. Defaults to WebMercatorQuad.
+bounds: Dataset's bounding box. Not in the __init__
method.
__init__
method.__init__
method.Important
+BaseClass Arguments outside the __init__
method and without default value HAVE TO be set in the __attrs_post_init__
step.
self.bounds
and self.crs
).Abstract methods, are method that HAVE TO be implemented in the child class.
+rio_tiler.models.Info
)Dict[str, rio_tiler.models.BandStatistics]
)rio_tiler.models.ImageData
)rio_tiler.models.ImageData
)rio_tiler.models.ImageData
)List
)rio_tiler.models.ImageData
)Example: Reader
The goal of the MultiBaseReader
is to enable joining results from multiple files (e.g STAC).
The MultiBaseReader
has the same attributes/properties/methods as the BaseReader
.
Example: STACReader
import os
+import pathlib
+from typing import Dict, Type
+
+import attr
+from morecantile import TileMatrixSet
+from rio_tiler.io.base import MultiBaseReader
+from rio_tiler.io import Reader, BaseReader
+from rio_tiler.constants import WEB_MERCATOR_TMS
+from rio_tiler.models import Info
+
+@attr.s
+class AssetFileReader(MultiBaseReader):
+
+ input: str = attr.ib()
+ prefix: str = attr.ib() # we add a custom attribute
+
+ # because we add another attribute (prefix) we need to
+ # re-specify the other attribute for the class
+ reader: Type[BaseReader] = attr.ib(default=Reader)
+ reader_options: Dict = attr.ib(factory=dict)
+ tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
+
+ # we place min/max zoom in __init__
+ minzoom: int = attr.ib(default=None)
+ maxzoom: int = attr.ib(default=None)
+
+ def __attrs_post_init__(self):
+ """Parse Sceneid and get grid bounds."""
+ self.assets = sorted(
+ [p.stem.split("_")[1] for p in pathlib.Path(self.input).glob(f"*{self.prefix}*.tif")]
+ )
+ with self.reader(self._get_asset_url(self.assets[0])) as cog:
+ self.bounds = cog.bounds
+ self.crs = cog.crs
+
+ if self.minzoom is None:
+ self.minzoom = cog.minzoom
+
+ if self.maxzoom is None:
+ self.maxzoom = cog.maxzoom
+
+ def _get_asset_url(self, band: str) -> str:
+ """Validate band's name and return band's url."""
+ return os.path.join(self.input, f"{self.prefix}{band}.tif")
+
+# we have a directoty with "scene_b1.tif", "scene_b2.tif"
+with AssetFileReader(input="my_dir/", prefix="scene_") as cr:
+ print(cr.assets)
+ >>> ['band1', 'band2']
+
+ info = cr.info(assets=("band1", "band2"))
+ # MultiBaseReader returns a Dict
+ assert isinstance(info, dict)
+ print(list(info))
+ >>> ['band1', 'band2']
+
+ assert isinstance(info["band1"], Info)
+ print(info["band1"].model_dump_json(exclude_none=True))
+ >>> {
+ 'bounds': [-11.979244865430259, 24.296321392464325, -10.874546803397614, 25.304623891542263],
+ 'minzoom': 7,
+ 'maxzoom': 9,
+ 'band_metadata': [('b1', {})],
+ 'band_descriptions': [('b1', '')],
+ 'dtype': 'uint16',
+ 'nodata_type': 'Nodata',
+ 'colorinterp': ['gray']
+ }
+ img = cr.tile(238, 218, 9, assets=("band1", "band2"))
+
+ print(img.assets)
+ >>> ['my_dir/scene_band1.tif', 'my_dir/scene_band2.tif']
+
+ # Each assets have 1 bands, so when combining each img we get a (2, 256, 256) array.
+ print(img.data.shape)
+ >>> (2, 256, 256)
+
Almost as the previous MultiBaseReader
, the MultiBandsReader
children will merge results extracted from different file but taking each file as individual bands.
The MultiBaseReader
has the same attributes/properties/methods as the BaseReader
.
Example
+import os
+import pathlib
+from typing import Dict, Type
+
+import attr
+from morecantile import TileMatrixSet
+from rio_tiler.io.base import MultiBandReader
+from rio_tiler.io import COGReader, BaseReader
+from rio_tiler.constants import WEB_MERCATOR_TMS
+
+@attr.s
+class BandFileReader(MultiBandReader):
+
+ input: str = attr.ib()
+ prefix: str = attr.ib() # we add a custom attribute
+
+ # because we add another attribute (prefix) we need to
+ # re-specify the other attribute for the class
+ reader: Type[BaseReader] = attr.ib(default=COGReader)
+ reader_options: Dict = attr.ib(factory=dict)
+ tms: TileMatrixSet = attr.ib(default=WEB_MERCATOR_TMS)
+
+ # we place min/max zoom in __init__
+ minzoom: int = attr.ib(default=None)
+ maxzoom: int = attr.ib(default=None)
+
+ def __attrs_post_init__(self):
+ """Parse Sceneid and get grid bounds."""
+ self.bands = sorted(
+ [p.stem.split("_")[1] for p in pathlib.Path(self.input).glob(f"*{self.prefix}*.tif")]
+ )
+ with self.reader(self._get_band_url(self.bands[0])) as cog:
+ self.bounds = cog.bounds
+ self.crs = cog.crs
+
+ if self.minzoom is None:
+ self.minzoom = cog.minzoom
+
+ if self.maxzoom is None:
+ self.maxzoom = cog.maxzoom
+
+ def _get_band_url(self, band: str) -> str:
+ """Validate band's name and return band's url."""
+ return os.path.join(self.input, f"{self.prefix}{band}.tif")
+
+
+# we have a directoty with "scene_b1.tif", "scene_b2.tif"
+with BandFileReader(input="my_dir/", prefix="scene_") as cr:
+ print(cr.bands)
+ >>> ['band1', 'band2']
+
+ print(cr.info(bands=("band1", "band2")).model_dump_json(exclude_none=True))
+ >>> {
+ 'bounds': [-11.979244865430259, 24.296321392464325, -10.874546803397614, 25.304623891542263],
+ 'minzoom': 7,
+ 'maxzoom': 9,
+ 'band_metadata': [('band1', {}), ('band2', {})],
+ 'band_descriptions': [('band1', ''), ('band2', '')],
+ 'dtype': 'uint16',
+ 'nodata_type': 'Nodata',
+ 'colorinterp': ['gray', 'gray']
+ }
+
+ img = cr.tile(238, 218, 9, bands=("band1", "band2"))
+
+ print(img.assets)
+ >>> ['my_dir/scene_band1.tif', 'my_dir/scene_band2.tif']
+
+ print(img.data.shape)
+ >>> (2, 256, 256)
+
Note: rio-tiler-pds
readers are built using the MultiBandReader
base class.
The example was created as a response to developmentseed/titiler?235. In short, the user needed a way to keep metadata information from an asset within a STAC item.
+Sadly when we are using the STAC Reader we only keep the metadata about the item but not the assets metadata (because we built the STAC Reader with the idea that user might first want to merge assets together).
+But rio-tiler has been designed to be easily customizable.
+import attr
+from rasterio.io import DatasetReader
+from rio_tiler.io.stac import fetch, _to_pystac_item
+from rio_tiler.io import Reader
+import pystac
+
+@attr.s
+class CustomSTACReader(Reader):
+ """Custom Reader support."""
+
+ # This will keep the STAC item info within the instance
+ item: pystac.Item = attr.ib(default=None, init=False)
+
+ def __attrs_post_init__(self):
+ """Define _kwargs, open dataset and get info."""
+ # get STAC item URL and asset name
+ asset = self.input.split(":")[-1]
+ stac_url = self.input.replace(f":{asset}", "")
+
+ # Fetch the STAC item
+ self.item = pystac.Item.from_dict(fetch(stac_url), stac_url)
+
+ # Get asset url from the STAC Item
+ self.input = self.item.assets[asset].get_absolute_href()
+ super().__attrs_post_init__()
+
+with CustomSTACReader("https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json:pan") as cog:
+ print(type(cog.dataset))
+ print(cog.input)
+ print(cog.nodata)
+ print(cog.bounds)
+
+>>> rasterio.io.DatasetReader
+>>> "https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/s5_11055_6057_20070622_p10_1_lcc00_cog.tif"
+>>> 0
+>>> (-869900.0, 1370200.0, -786360.0, 1453180.0)
+
In this CustomSTACReader
, we are using a custom path schema
in form of {item-url}:{asset-name}
. When creating an instance of CustomSTACReader
, we will do the following:
input
using the asset full url.Reader
initialization (using super().__attrs_post_init__()
)from typing import Any, Dict
+
+import attr
+import rasterio
+from rasterio.io import DatasetReader
+from rio_tiler.io import BaseReader
+from rio_tiler.models import BandStatistics, Info, ImageData
+from morecantile import TileMatrixSet
+
+from rio_tiler.constants import BBox, WEB_MERCATOR_TMS
+
+@attr.s
+class SimpleReader(BaseReader):
+
+ input: DatasetReader = attr.ib()
+
+ # We force tms to be outside the class __init__
+ tms: TileMatrixSet = attr.ib(init=False, default=WEB_MERCATOR_TMS)
+
+ # We overwrite the abstract base class attribute definition and set default
+ minzoom: int = attr.ib(init=False, default=WEB_MERCATOR_TMS.minzoom)
+ maxzoom: int = attr.ib(init=False, default=WEB_MERCATOR_TMS.maxzoom)
+
+ def __attrs_post_init__(self):
+ # Set bounds and crs variable
+ self.bounds = self.input.bounds
+ self.crs = self.input.crs
+
+ # implement all mandatory methods
+ def info(self) -> Info:
+ raise NotImplemented
+
+ def statistics(self, **kwargs: Any) -> Dict[str, BandStatistics]:
+ raise NotImplemented
+
+ def part(self, bbox: BBox, **kwargs: Any) -> ImageData:
+ raise NotImplemented
+
+ def preview(self, **kwargs: Any) -> ImageData:
+ raise NotImplemented
+
+ def point(self, lon: float, lat: float, **kwargs: Any) -> List:
+ raise NotImplemented
+
+ def feature(self, shape: Dict, **kwargs: Any) -> ImageData:
+ raise NotImplemented
+
+ def tile(self, tile_x: int, tile_y: int, tile_z: int, **kwargs: Any) -> ImageData:
+ if not self.tile_exists(tile_x, tile_y, tile_z):
+ raise TileOutsideBounds(
+ f"Tile {tile_z}/{tile_x}/{tile_y} is outside bounds"
+ )
+
+ tile_bounds = self.tms.xy_bounds(Tile(x=tile_x, y=tile_y, z=tile_z))
+
+ data, mask = reader.part(
+ self.input,
+ tile_bounds,
+ width=256,
+ height=256,
+ bounds_crs=tms.rasterio_crs,
+ dst_crs=tms.rasterio_crs,
+ **kwargs,
+ )
+ return ImageData(
+ data, mask, bounds=tile_bounds, crs=tms.rasterio_crs
+ )
+
+with rasterio.open("file.tif") as src:
+ with SimpleReader(src) as cog:
+ img = cog.tile(1, 1, 1)
+
rio-tiler
aims to be a lightweight plugin for rasterio
to read slippy map
+tiles from a raster sources.
Given that rio-tiler
allows for simple, efficient reading of tiles, you can
+then leverage rio-tiler
to create a dynamic tile server to display raster
+tiles on a web map.
There are couple tile servers built on top of rio-tiler:
+ +To build a simple dynamic tiling application, we can use
+FastAPI. Note that titiler
uses
+FastAPI
internally, so you might consider using titiler
instead of making
+your own API.
rio-tiler ~= 4.0
fastapi
uvicorn
Install with
+pip install fastapi uvicorn rio-tiler
+
app.py
¶"""rio-tiler tile server."""
+
+import os
+
+from fastapi import FastAPI, Query
+from starlette.requests import Request
+from starlette.responses import Response
+
+from rio_tiler.profiles import img_profiles
+from rio_tiler.io import Reader
+
+
+app = FastAPI(
+ title="rio-tiler",
+ description="A lightweight Cloud Optimized GeoTIFF tile server",
+)
+
+
+@app.get(
+ r"/{z}/{x}/{y}.png",
+ responses={
+ 200: {
+ "content": {"image/png": {}}, "description": "Return an image.",
+ }
+ },
+ response_class=Response,
+ description="Read COG and return a tile",
+)
+def tile(
+ z: int,
+ x: int,
+ y: int,
+ url: str = Query(..., description="Cloud Optimized GeoTIFF URL."),
+):
+ """Handle tile requests."""
+ with Reader(url) as cog:
+ img = cog.tile(x, y, z)
+ content = img.render(img_format="PNG", **img_profiles.get("png"))
+ return Response(content, media_type="image/png")
+
+
+@app.get("/tilejson.json", responses={200: {"description": "Return a tilejson"}})
+def tilejson(
+ request: Request,
+ url: str = Query(..., description="Cloud Optimized GeoTIFF URL."),
+):
+ """Return TileJSON document for a COG."""
+ tile_url = str(request.url_for("tile", z="{z}", x="{x}", y="{y}"))
+ tile_url = f"{tile_url}?url={url}"
+
+ with Reader(url) as cog:
+ return {
+ "bounds": cog.geographic_bounds,
+ "minzoom": cog.minzoom,
+ "maxzoom": cog.maxzoom,
+ "name": os.path.basename(url),
+ "tiles": [tile_url],
+ }
+
Use uvicorn
to launch the application. Note that app:app
tells uvicorn
to
+call the app
function within app.py
, so you must be in the same directory as
+app.py
.
uvicorn app:app --reload
+
Starting with rio-tiler
v2, a .feature()
method exists on rio-tiler
's readers (e.g Reader
) which enables data reading for GeoJSON defined (polygon or multipolygon) shapes.
from rio_tiler.io import Reader
+from rio_tiler.models import ImageData
+
+with Reader("my-tif.tif") as cog:
+ # Read data for a given geojson polygon
+ img: ImageData = cog.feature(geojson_feature, max_size=1024) # we limit the max_size to 1024
+
Under the hood, the .feature
method uses rasterio's rasterize
+function and the .part()
method. The below process is roughly what .feature
does for you.
from rasterio.features import rasterize, bounds as featureBounds
+
+from rio_tiler.io import Reader
+
+# Use Reader to open and read the dataset
+with Reader("my_tif.tif") as cog:
+
+ # Get BBOX of the polygon
+ bbox = featureBounds(feat)
+
+ # Read part of the data overlapping with the geometry bbox
+ # assuming that the geometry coordinates are in web mercator
+ img = cog.part(bbox, bounds_crs=f"EPSG:3857", max_size=1024)
+
+ # Rasterize geometry using the same geotransform parameters
+ cutline = rasterize(
+ [feat],
+ out_shape=(img.height, img.width),
+ transform=img.transform,
+ ...
+ )
+
+ # Apply geometry mask to imagery
+ img.array.mask = numpy.where(~cutline, img.array.mask, True)
+
Another interesting way to cut features is to use the GDALWarpVRT's cutline
+option with the .part(), .preview(), or .tile() methods:
from rio_tiler.utils import create_cutline
+
+bbox = featureBounds(feat)
+
+# Use Reader to open and read the dataset
+with Reader("my_tif.tif") as cog:
+ # Create WTT Cutline
+ cutline = create_cutline(cog.dataset, feat, geometry_crs="epsg:4326")
+
+ # Get a part of the geotiff but use the cutline to mask the data
+ bbox = featureBounds(feat)
+ img = cog.part(bbox, vrt_options={'cutline': cutline})
+
+ # Get a preview of the whole geotiff but use the cutline to mask the data
+ img = cog.preview(vrt_options={'cutline': cutline})
+
+ # Read a mercator tile and use the cutline to mask the data
+ img = cog.tile(1, 1, 1, vrt_options={'cutline': cutline})
+
Starting with rio-tiler 2.0, we replaced mercantile
with morecantile
, enabling support for other TileMatrixSets than the default WebMercator grid.
import morecantile
+from rio_tiler.io import Reader
+from rasterio.crs import CRS
+from pyproj import CRS as projCRS
+
+# By default we use WebMercator TMS
+with Reader("my.tif") as cog:
+ img = cog.tile(1, 1, 1)
+ assert img.crs == CRS.from_epsg(3857) # default image output is the TMS crs (WebMercator)
+
+# Print default grids
+for name in morecantile.tms.list():
+ print(name, "-", morecantile.tms.get(name).rasterio_crs)
+
+>>> CanadianNAD83_LCC - EPSG:3978
+ EuropeanETRS89_LAEAQuad - EPSG:3035
+ LINZAntarticaMapTilegrid - EPSG:5482
+ NZTM2000Quad - EPSG:2193
+ UPSAntarcticWGS84Quad - EPSG:5042
+ UPSArcticWGS84Quad - EPSG:5041
+ UTM31WGS84Quad - EPSG:32631
+ WGS1984Quad - EPSG:4326
+ WebMercatorQuad - EPSG:3857
+ WorldCRS84Quad - OGC:CRS84
+ WorldMercatorWGS84Quad - EPSG:3395
+
+
+# Use EPSG:4326 (WGS84) grid
+wgs84_grid = morecantile.tms.get("WorldCRS84Quad")
+with Reader("my.tif", tms=wgs84_grid) as cog:
+ img = cog.tile(1, 1, 1)
+ assert img.crs == CRS.from_epsg(4326)
+
+# Create Custom grid
+extent = [-948.75, -543592.47, 5817.41, -3333128.95] # From https:///epsg.io/3031
+epsg3031TMS = morecantile.TileMatrixSet.custom(
+ extent, projCRS.from_epsg(3031), identifier="MyCustomTmsEPSG3031"
+)
+with Reader("my.tif", tms=epsg3031TMS) as cog:
+ img = cog.tile(1, 1, 1)
+ assert img.crs == CRS.from_epsg(3031)
+
rio-tiler
's Readers provide simple .statistics
method to retrieve dataset statistics (min, max, histogram...). We can easily extend this to create a .zonal_statistics
method which will accept input features to get statistics from.
import attr
+from typing import Any, Union, Optional, List, Dict
+
+from rio_tiler import io
+from rio_tiler.utils import get_array_statistics
+from rio_tiler.models import BandStatistics
+
+from geojson_pydantic.features import Feature, FeatureCollection
+from geojson_pydantic.geometries import Polygon
+
+class Reader(io.Reader):
+ """Custom Reader with zonal_statistics method."""
+
+ def zonal_statistics(
+ self,
+ geojson: Union[FeatureCollection, Feature],
+ categorical: bool = False,
+ categories: Optional[List[float]] = None,
+ percentiles: List[int] = [2, 98],
+ hist_options: Optional[Dict] = None,
+ max_size: int = None,
+ **kwargs: Any,
+ ) -> FeatureCollection:
+ """Return statistics from GeoJSON features.
+
+ Args:
+ geojson (Feature or FeatureCollection): a GeoJSON Feature or FeatureCollection.
+ categorical (bool): treat input data as categorical data. Defaults to False.
+ categories (list of numbers, optional): list of categories to return value for.
+ percentiles (list of numbers, optional): list of percentile values to calculate. Defaults to `[2, 98]`.
+ hist_options (dict, optional): Options to forward to numpy.histogram function.
+ max_size (int, optional): Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to None.
+ kwargs (optional): Options to forward to `self.preview`.
+
+ Returns:
+ FeatureCollection
+
+ """
+ kwargs = {**self._kwargs, **kwargs}
+
+ hist_options = hist_options or {}
+
+ # We transform the input Feature to a FeatureCollection
+ if not isinstance(geojson, FeatureCollection):
+ geojson = FeatureCollection(features=[geojson])
+
+ for feature in geojson:
+ # Get data overlapping with the feature (using Reader.feature method)
+ data = self.feature(
+ feature.model_dump(exclude_none=True),
+ max_size=max_size,
+ **kwargs,
+ )
+
+ # Get band statistics for the data
+ stats = get_array_statistics(
+ data.as_masked(),
+ categorical=categorical,
+ categories=categories,
+ percentiles=percentiles,
+ **hist_options,
+ )
+
+ # Update input feature properties and add the statistics
+ feature.properties = feature.properties or {}
+ feature.properties.update(
+ {
+ "statistics": {
+ f"{data.band_names[ix]}": BandStatistics(
+ **stats[ix]
+ )
+ for ix in range(len(stats))
+ }
+ }
+ )
+
+ return geojson
+
rio-tiler colormap functions and classes.
+DEFAULT_CMAPS_FILES
+
EMPTY_COLORMAP
+
USER_CMAPS_DIR
+
cmap
+
def apply_cmap(
+ data: numpy.ndarray,
+ colormap: Union[Dict[int, Tuple[int, int, int, int]], Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]]]
+) -> Tuple[numpy.ndarray, numpy.ndarray]
+
Apply colormap on data.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +numpy.ndarray | +1D image array to translate to RGB. | +None | +
colormap | +dict or sequence | +GDAL RGBA Color Table dictionary or sequence (for intervals). | +None | +
Returns:
+Type | +Description | +
---|---|
tuple | +Data (numpy.ndarray) and Mask (numpy.ndarray) values. | +
Raises:
+Type | +Description | +
---|---|
InvalidFormat | +If data is not a 1 band dataset (1, col, row). | +
def apply_discrete_cmap(
+ data: numpy.ndarray,
+ colormap: Dict[int, Tuple[int, int, int, int]]
+) -> Tuple[numpy.ndarray, numpy.ndarray]
+
Apply discrete colormap.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +numpy.ndarray | +1D image array to translate to RGB. | +None | +
color_map | +dict | +Discrete ColorMap dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
tuple | +Data (numpy.ndarray) and Alpha band (numpy.ndarray). | +
def apply_intervals_cmap(
+ data: numpy.ndarray,
+ colormap: Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]]
+) -> Tuple[numpy.ndarray, numpy.ndarray]
+
Apply intervals colormap.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +numpy.ndarray | +1D image array to translate to RGB. | +None | +
color_map | +Sequence | +Sequence of intervals and color in form of [([min, max], [r, g, b, a]), ...]. | +None | +
Returns:
+Type | +Description | +
---|---|
tuple | +Data (numpy.ndarray) and Alpha band (numpy.ndarray). | +
def make_lut(
+ colormap: Dict[int, Tuple[int, int, int, int]]
+) -> numpy.ndarray
+
Create a lookup table numpy.ndarray from a GDAL RGBA Color Table dictionary.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
colormap | +dict | +GDAL RGBA Color Table dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
numpy.ndarray | +colormap lookup table. | +
def parse_color(
+ rgba: Union[Sequence[int], str]
+) -> Tuple[int, int, int, int]
+
Parse RGB/RGBA color and return valid rio-tiler compatible RGBA colormap entry.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
rgba | +str or list of int | +HEX encoded or list RGB or RGBA colors. | +None | +
Returns:
+Type | +Description | +
---|---|
tuple | +RGBA values. | +
class ColorMaps(
+ data: Dict[str, Union[str, Dict[int, Tuple[int, int, int, int]], Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]]]] = NOTHING
+)
+
Default Colormaps holder.
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +dict | +colormaps. Defaults to rio_tiler.colormap.DEFAULTS_CMAPS . |
+rio_tiler.colormap.DEFAULTS_CMAPS |
+
def get(
+ self,
+ name: str
+) -> Union[Dict[int, Tuple[int, int, int, int]], Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]]]
+
Fetch a colormap.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
name | +str | +colormap name. | +None | +
def list(
+ self
+) -> List[str]
+
List registered Colormaps.
+Returns + list: list of colormap names.
+def register(
+ self,
+ custom_cmap: Dict[str, Union[str, Dict[int, Tuple[int, int, int, int]], Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]]]],
+ overwrite: bool = False
+) -> 'ColorMaps'
+
Register a custom colormap.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
custom_cmap | +dict | +custom colormap(s) to register. | +None | +
overwrite | +bool | +Overwrite existing colormap with same key. Defaults to False. | +False | +
Errors and warnings.
+class AlphaBandWarning(
+ /,
+ *args,
+ **kwargs
+)
+
Automatically removed Alpha band from output array.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class AssetAsBandError(
+ /,
+ *args,
+ **kwargs
+)
+
Can't use asset_as_band with multiple bands.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class ColorMapAlreadyRegistered(
+ /,
+ *args,
+ **kwargs
+)
+
ColorMap is already registered.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class EmptyMosaicError(
+ /,
+ *args,
+ **kwargs
+)
+
Mosaic method returned empty array.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class ExpressionMixingWarning(
+ /,
+ *args,
+ **kwargs
+)
+
Expression and assets/indexes mixing.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidAssetName(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid Asset name.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidBandName(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid band name.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidBufferSize(
+ /,
+ *args,
+ **kwargs
+)
+
buffer
must be a multiple of 0.5
(e.g: 0.5, 1, 1.5, ...).
args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidColorFormat(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid color format.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidColorMapName(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid colormap name.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidDatatypeWarning(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid Output Datatype.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidExpression(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid Expression.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidFormat(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid image format.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidMosaicMethod(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid Pixel Selection method for mosaic.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class InvalidPointDataError(
+ /,
+ *args,
+ **kwargs
+)
+
Invalid PointData.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class MissingAssets(
+ /,
+ *args,
+ **kwargs
+)
+
Missing Assets.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class MissingBands(
+ /,
+ *args,
+ **kwargs
+)
+
Missing bands.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class NoOverviewWarning(
+ /,
+ *args,
+ **kwargs
+)
+
Dataset has no overviews.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class PointOutsideBounds(
+ /,
+ *args,
+ **kwargs
+)
+
Point is outside image bounds.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class RioTilerError(
+ /,
+ *args,
+ **kwargs
+)
+
Base exception class.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+class TileOutsideBounds(
+ /,
+ *args,
+ **kwargs
+)
+
Z-X-Y Tile is outside image bounds.
+args
+
def with_traceback(
+ ...
+)
+
Exception.with_traceback(tb) --
+set self.traceback to tb and return self.
+ + + + + + + +rio-tiler.expression: Parse and Apply expression.
+def apply_expression(
+ blocks: Sequence[str],
+ bands: Sequence[str],
+ data: numpy.ndarray
+) -> numpy.ma.core.MaskedArray
+
Apply rio-tiler expression.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
blocks | +sequence | +expression for a specific layer. | +None | +
bands | +sequence | +bands names. | +None | +
data | +numpy.array | +array of bands. | +None | +
Returns:
+Type | +Description | +
---|---|
numpy.array | +output data. | +
def get_expression_blocks(
+ expression: str
+) -> List[str]
+
Split expression in blocks.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
expression | +str | +band math/combination expression. | +None | +
Returns:
+Type | +Description | +
---|---|
list | +expression blocks (str). | +
def parse_expression(
+ expression: str,
+ cast: bool = True
+) -> Tuple
+
Parse rio-tiler band math expression.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
expression | +str | +band math/combination expression. | +None | +
cast | +bool | +cast band names to integers (convert to index values). Defaults to True. | +True | +
Returns:
+Type | +Description | +
---|---|
tuple | +band names/indexes. | +
rio_tiler.io.base: ABC class for rio-tiler readers.
+WGS84_CRS
+
class BaseReader(
+ input: Any,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>
+)
+
Rio-tiler.io BaseReader.
+Name | +Type | +Description | +Default | +
---|---|---|---|
input | +any | +Reader's input. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
geographic_bounds
+
Return dataset bounds in geographic_crs.
+def feature(
+ self,
+ shape: Dict
+) -> rio_tiler.models.ImageData
+
Read a Dataset for a GeoJSON feature.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def info(
+ self
+) -> rio_tiler.models.Info
+
Return Dataset's info.
+Returns:
+Type | +Description | +
---|---|
rio_tile.models.Info | +Dataset info. | +
def part(
+ self,
+ bbox: Tuple[float, float, float, float]
+) -> rio_tiler.models.ImageData
+
Read a Part of a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float
+) -> rio_tiler.models.PointData
+
Read a value from a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.PointData | +PointData instance with data, mask and spatial info. | +
def preview(
+ self
+) -> rio_tiler.models.ImageData
+
Read a preview of a Dataset.
+Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def statistics(
+ self
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return bands statistics from a dataset.
+Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> rio_tiler.models.ImageData
+
Read a Map tile from the Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
class MultiBandReader(
+ input: Any,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>,
+ minzoom: int = None,
+ maxzoom: int = None,
+ reader_options: Dict = NOTHING
+)
+
Multi Band Reader.
+This Abstract Base Class Reader is suited for dataset that stores spectral bands as separate files (e.g. Sentinel 2).
+Name | +Type | +Description | +Default | +
---|---|---|---|
input | +any | +input data. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
minzoom | +int | +Set dataset's minzoom. | +None | +
maxzoom | +int | +Set dataset's maxzoom. | +None | +
reader_options | +dict, option | +options to forward to the reader. Defaults to {} . |
+{} |
+
geographic_bounds
+
Return dataset bounds in geographic_crs.
+def feature(
+ self,
+ shape: Dict,
+ bands: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge parts defined by geojson feature from multiple bands.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
bands | +sequence of str or str | +bands to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the band list (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the self.reader.feature method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def info(
+ self,
+ bands: Union[Sequence[str], str] = None,
+ *args,
+ **kwargs: Any
+) -> rio_tiler.models.Info
+
Return metadata from multiple bands.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bands | +sequence of str or str | +band names to fetch info from. Required keyword argument. | +None | +
Returns:
+Type | +Description | +
---|---|
dict | +Multiple bands info in form of {"band1": rio_tile.models.Info}. | +
def parse_expression(
+ self,
+ expression: str
+) -> Tuple
+
Parse rio-tiler band math expression.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ bands: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge parts from multiple bands.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs. | +None | +
bands | +sequence of str or str | +bands to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the band list (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the 'self.reader.part' method. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float,
+ bands: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.PointData
+
Read a pixel values from multiple bands.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
bands | +sequence of str or str | +bands to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the band list (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the self.reader.point method. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +PointData | +
def preview(
+ self,
+ bands: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge previews from multiple bands.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bands | +sequence of str or str | +bands to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the band list (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the self.reader.preview method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def statistics(
+ self,
+ bands: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: Union[List[int], NoneType] = None,
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return array statistics for multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bands | +sequence of str or str | +bands to fetch info from. Required keyword argument. | +None | +
expression | +str | +rio-tiler expression for the band list (e.g. b1/b2+b3). | +None | +
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to the self.preview method. |
+None | +
Returns:
+Type | +Description | +
---|---|
dict | +Multiple assets statistics in form of {"{band}/{expression}": rio_tiler.models.BandStatistics, ...}. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ bands: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge Web Map tiles multiple bands.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
bands | +sequence of str or str | +bands to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the band list (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the self.reader.tile method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
class MultiBaseReader(
+ input: Any,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>,
+ minzoom: int = None,
+ maxzoom: int = None,
+ reader_options: Dict = NOTHING
+)
+
MultiBaseReader Reader.
+This Abstract Base Class Reader is suited for dataset that are composed of multiple assets (e.g. STAC).
+Name | +Type | +Description | +Default | +
---|---|---|---|
input | +any | +input data. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
minzoom | +int | +Set dataset's minzoom. | +None | +
maxzoom | +int | +Set dataset's maxzoom. | +None | +
reader_options | +dict, option | +options to forward to the reader. Defaults to {} . |
+{} |
+
geographic_bounds
+
Return dataset bounds in geographic_crs.
+def feature(
+ self,
+ shape: Dict,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge parts defined by geojson feature from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.feature method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def info(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.Info]
+
Return metadata from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. Required keyword argument. | +None | +
Returns:
+Type | +Description | +
---|---|
dict | +Multiple assets info in form of {"asset1": rio_tile.models.Info}. | +
def merged_statistics(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: Union[List[int], NoneType] = None,
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return array statistics for multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to the self.preview method. |
+None | +
Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def parse_expression(
+ self,
+ expression: str,
+ asset_as_band: bool = False
+) -> Tuple
+
Parse rio-tiler band math expression.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge parts from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.PointData
+
Read pixel value from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.point method. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +PointData | +
def preview(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge previews from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.preview method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def statistics(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_expression: Union[Dict[str, str], NoneType] = None,
+ **kwargs: Any
+) -> Dict[str, Dict[str, rio_tiler.models.BandStatistics]]
+
Return array statistics for multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
asset_expression | +dict | +rio-tiler expression for each asset (e.g. {"asset1": "b1/b2+b3", "asset2": ...}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.statistics method. |
+None | +
Returns:
+Type | +Description | +
---|---|
dict | +Multiple assets statistics in form of {"asset1": {"1": rio_tiler.models.BandStatistics, ...}}. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge Wep Map tiles from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.tile method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
class SpatialMixin(
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>
+)
+
Spatial Info Mixin.
+Name | +Type | +Description | +Default | +
---|---|---|---|
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
geographic_bounds
+
Return dataset bounds in geographic_crs.
+def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
rio_tiler.io.cogeo: raster processing.
+None
+class COGReader(
+ input: str,
+ dataset: Union[rasterio.io.DatasetReader, rasterio.io.DatasetWriter, rasterio.io.MemoryFile, rasterio.vrt.WarpedVRT] = None,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' identifier='WebMercatorQuad'>,
+ minzoom: int = None,
+ maxzoom: int = None,
+ geographic_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ colormap: Dict = None,
+ nodata: Union[float, int, str, NoneType] = None,
+ unscale: Union[bool, NoneType] = None,
+ resampling_method: Union[rasterio.enums.Resampling, NoneType] = None,
+ vrt_options: Union[Dict, NoneType] = None,
+ post_process: Union[Callable[[numpy.ndarray, numpy.ndarray], Tuple[numpy.ndarray, numpy.ndarray]], NoneType] = None
+)
+
Name | +Type | +Description | +Default | +
---|---|---|---|
input | +str | +Cloud Optimized GeoTIFF path. | +None | +
dataset | +rasterio.io.DatasetReader or rasterio.io.DatasetWriter or rasterio.vrt.WarpedVRT | +Rasterio dataset. | +None | +
bounds | +tuple | +Dataset bounds (left, bottom, right, top). | +None | +
crs | +rasterio.crs.CRS | +Dataset CRS. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
minzoom | +int | +Set minzoom for the tiles. | +None | +
maxzoom | +int | +Set maxzoom for the tiles. | +None | +
geographic_crs | +rasterio.crs.CRS | +CRS to use as geographic coordinate system. Defaults to WGS84. | +WGS84 | +
colormap | +dict | +Overwrite internal colormap. | +None | +
nodata | +int or float or str | +Global options, overwrite internal nodata value. | +None | +
unscale | +bool | +Global options, apply internal scale and offset on all read operations. | +None | +
resampling_method | +rasterio.enums.Resampling | +Global options, resampling method to use for read operations. | +None | +
vrt_options | +dict | +Global options, WarpedVRT options to use for read operations. | +None | +
post_process | +callable | +Global options, Function to apply after all read operations. | +None | +
geographic_bounds
+
return bounds in WGS84.
+def close(
+ self
+)
+
Close rasterio dataset.
+def feature(
+ self,
+ shape: Dict,
+ dst_crs: Union[rasterio.crs.CRS, NoneType] = None,
+ shape_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read part of a COG defined by a geojson feature.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
shape_crs | +rasterio.crs.CRS | +Input geojson coordinate reference system. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the COGReader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def get_zooms(
+ self,
+ tilesize: int = 256
+) -> Tuple[int, int]
+
Calculate raster min/max zoom level for input TMS.
+def info(
+ self
+) -> rio_tiler.models.Info
+
Return COG info.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ dst_crs: Union[rasterio.crs.CRS, NoneType] = None,
+ bounds_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[int, Sequence, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read part of a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs ("dst_crs"). | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
bounds_crs | +rasterio.crs.CRS | +Bounds Coordinate Reference System. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.part function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float,
+ coord_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> List
+
Read a pixel value from a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
coord_crs | +rasterio.crs.CRS | +Coordinate Reference System of the input coords. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.point function. |
+None | +
Returns:
+Type | +Description | +
---|---|
list | +Pixel value per band indexes. | +
def preview(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: int = 1024,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Return a preview of a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.preview function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def read(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read the COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.read function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def statistics(
+ self,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: List[int] = [2, 98],
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return bands statistics from a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to self.preview . |
+None | +
Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ tilesize: int = 256,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ tile_buffer: Union[float, int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read a Web Map tile from a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
tilesize | +int | +Output image size. Defaults to 256 . |
+256 |
+
indexes | +int or sequence of int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
tile_buffer | +int or float | +Buffer on each side of the given tile. It must be a multiple of 0.5 . Output tilesize will be expanded to tilesize + 2 * tile_buffer (e.g 0.5 = 257x257, 1.0 = 258x258). |
+None | +
kwargs | +optional | +Options to forward to the COGReader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
class GCPCOGReader(
+ input: str,
+ src_dataset: Union[rasterio.io.DatasetReader, rasterio.io.DatasetWriter, rasterio.io.MemoryFile, rasterio.vrt.WarpedVRT] = None,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' identifier='WebMercatorQuad'>,
+ minzoom: int = None,
+ maxzoom: int = None,
+ geographic_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ colormap: Dict = None,
+ nodata: Union[float, int, str, NoneType] = None,
+ unscale: Union[bool, NoneType] = None,
+ resampling_method: Union[rasterio.enums.Resampling, NoneType] = None,
+ vrt_options: Union[Dict, NoneType] = None,
+ post_process: Union[Callable[[numpy.ndarray, numpy.ndarray], Tuple[numpy.ndarray, numpy.ndarray]], NoneType] = None
+)
+
Name | +Type | +Description | +Default | +
---|---|---|---|
input | +str | +Cloud Optimized GeoTIFF path. | +None | +
src_dataset | +rasterio.io.DatasetReader or rasterio.io.DatasetWriter or rasterio.vrt.WarpedVRT | +Rasterio dataset. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
minzoom | +int | +Overwrite Min Zoom level. | +None | +
maxzoom | +int | +Overwrite Max Zoom level. | +None | +
colormap | +dict | +Overwrite internal colormap. | +None | +
nodata | +int or float or str | +Global options, overwrite internal nodata value. | +None | +
unscale | +bool | +Global options, apply internal scale and offset on all read operations. | +None | +
resampling_method | +rasterio.enums.Resampling | +Global options, resampling method to use for read operations. | +None | +
vrt_options | +dict | +Global options, WarpedVRT options to use for read operations. | +None | +
post_process | +callable | +Global options, Function to apply after all read operations. | +None | +
dataset | +rasterio.vrtWarpedVRT | +Warped VRT constructed with dataset GCPS info. READ ONLY attribute. | +None | +
geographic_bounds
+
return bounds in WGS84.
+def close(
+ self
+)
+
Close rasterio dataset.
+def feature(
+ self,
+ shape: Dict,
+ dst_crs: Union[rasterio.crs.CRS, NoneType] = None,
+ shape_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read part of a COG defined by a geojson feature.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
shape_crs | +rasterio.crs.CRS | +Input geojson coordinate reference system. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the COGReader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def get_zooms(
+ self,
+ tilesize: int = 256
+) -> Tuple[int, int]
+
Calculate raster min/max zoom level for input TMS.
+def info(
+ self
+) -> rio_tiler.models.Info
+
Return COG info.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ dst_crs: Union[rasterio.crs.CRS, NoneType] = None,
+ bounds_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[int, Sequence, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read part of a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs ("dst_crs"). | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
bounds_crs | +rasterio.crs.CRS | +Bounds Coordinate Reference System. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.part function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float,
+ coord_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> List
+
Read a pixel value from a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
coord_crs | +rasterio.crs.CRS | +Coordinate Reference System of the input coords. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.point function. |
+None | +
Returns:
+Type | +Description | +
---|---|
list | +Pixel value per band indexes. | +
def preview(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: int = 1024,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Return a preview of a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.preview function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def read(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read the COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.read function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def statistics(
+ self,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: List[int] = [2, 98],
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return bands statistics from a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to self.preview . |
+None | +
Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ tilesize: int = 256,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ tile_buffer: Union[float, int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read a Web Map tile from a COG.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
tilesize | +int | +Output image size. Defaults to 256 . |
+256 |
+
indexes | +int or sequence of int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
tile_buffer | +int or float | +Buffer on each side of the given tile. It must be a multiple of 0.5 . Output tilesize will be expanded to tilesize + 2 * tile_buffer (e.g 0.5 = 257x257, 1.0 = 258x258). |
+None | +
kwargs | +optional | +Options to forward to the COGReader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
rio_tiler.io.rasterio: rio-tiler reader built on top Rasterio
+WGS84_CRS
+
class ImageReader(
+ input: str,
+ dataset: Union[rasterio.io.DatasetReader, rasterio.io.DatasetWriter, rasterio.io.MemoryFile, rasterio.vrt.WarpedVRT] = None,
+ colormap: Dict = None,
+ options: rio_tiler.reader.Options = NOTHING
+)
+
Non Geo Image Reader
+geographic_bounds
+
Return dataset bounds in geographic_crs.
+maxzoom
+
Return dataset maxzoom.
+minzoom
+
Return dataset minzoom.
+def close(
+ self
+)
+
Close rasterio dataset.
+def feature(
+ self,
+ shape: Dict,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ force_binary_mask: bool = True,
+ resampling_method: Literal['nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos', 'average', 'mode', 'gauss', 'rms'] = 'nearest',
+ unscale: bool = False,
+ post_process: Union[Callable[[numpy.ma.core.MaskedArray], numpy.ma.core.MaskedArray], NoneType] = None
+) -> rio_tiler.models.ImageData
+
Read part of an Image defined by a geojson feature.
+def get_maxzoom(
+ self
+) -> int
+
Define dataset maximum zoom level.
+def get_minzoom(
+ self
+) -> int
+
Define dataset minimum zoom level.
+def info(
+ self
+) -> rio_tiler.models.Info
+
Return Dataset info.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ indexes: Union[int, Sequence, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ force_binary_mask: bool = True,
+ resampling_method: Literal['nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos', 'average', 'mode', 'gauss', 'rms'] = 'nearest',
+ unscale: bool = False,
+ post_process: Union[Callable[[numpy.ma.core.MaskedArray], numpy.ma.core.MaskedArray], NoneType] = None
+) -> rio_tiler.models.ImageData
+
Read part of an Image.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top). | +None | +
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
force_binary_mask | +bool | +Cast returned mask to binary values (0 or 255). Defaults to True . |
+True |
+
resampling_method | +RIOResampling | +RasterIO resampling algorithm. Defaults to nearest . |
+nearest |
+
unscale | +bool | +Apply 'scales' and 'offsets' on output data value. Defaults to False . |
+False |
+
post_process | +callable | +Function to apply on output data and mask values. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def point(
+ self,
+ x: float,
+ y: float,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ unscale: bool = False,
+ post_process: Union[Callable[[numpy.ma.core.MaskedArray], numpy.ma.core.MaskedArray], NoneType] = None
+) -> rio_tiler.models.PointData
+
Read a pixel value from an Image.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +X coordinate. | +None | +
lat | +float | +Y coordinate. | +None | +
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
unscale | +bool | +Apply 'scales' and 'offsets' on output data value. Defaults to False . |
+False |
+
post_process | +callable | +Function to apply on output data and mask values. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +PointData | +
def preview(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: int = 1024,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Return a preview of a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the self.read method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def read(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read the Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.read function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def statistics(
+ self,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: Union[List[int], NoneType] = None,
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return bands statistics from a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to self.read . |
+None | +
Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ tilesize: int = 256,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ force_binary_mask: bool = True,
+ resampling_method: Literal['nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos', 'average', 'mode', 'gauss', 'rms'] = 'nearest',
+ unscale: bool = False,
+ post_process: Union[Callable[[numpy.ma.core.MaskedArray], numpy.ma.core.MaskedArray], NoneType] = None
+) -> rio_tiler.models.ImageData
+
Read a Web Map tile from an Image.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
tilesize | +int | +Output image size. Defaults to 256 . |
+256 |
+
indexes | +int or sequence of int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
force_binary_mask | +bool | +Cast returned mask to binary values (0 or 255). Defaults to True . |
+True |
+
resampling_method | +RIOResampling | +RasterIO resampling algorithm. Defaults to nearest . |
+nearest |
+
unscale | +bool | +Apply 'scales' and 'offsets' on output data value. Defaults to False . |
+False |
+
post_process | +callable | +Function to apply on output data and mask values. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
class LocalTileMatrixSet(
+ width: int,
+ height: int,
+ tile_size: int = 256
+)
+
Fake TMS for non-geo image.
+def xy_bounds(
+ self,
+ *tile: morecantile.commons.Tile
+) -> morecantile.commons.BoundingBox
+
Return the bounding box of the (x, y, z) tile
+class Reader(
+ input: str,
+ dataset: Union[rasterio.io.DatasetReader, rasterio.io.DatasetWriter, rasterio.io.MemoryFile, rasterio.vrt.WarpedVRT] = None,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>,
+ geographic_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ colormap: Dict = None,
+ options: rio_tiler.reader.Options = NOTHING
+)
+
Rasterio Reader.
+Name | +Type | +Description | +Default | +
---|---|---|---|
input | +str | +dataset path. | +None | +
dataset | +rasterio.io.DatasetReader or rasterio.io.DatasetWriter or rasterio.vrt.WarpedVRT | +Rasterio dataset. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
geographic_crs | +rasterio.crs.CRS | +CRS to use as geographic coordinate system. Defaults to WGS84. | +WGS84 | +
colormap | +dict | +Overwrite internal colormap. | +None | +
options | +dict | +Options to forward to low-level reader methods. | +None | +
geographic_bounds
+
Return dataset bounds in geographic_crs.
+maxzoom
+
Return dataset maxzoom.
+minzoom
+
Return dataset minzoom.
+def close(
+ self
+)
+
Close rasterio dataset.
+def feature(
+ self,
+ shape: Dict,
+ dst_crs: Union[rasterio.crs.CRS, NoneType] = None,
+ shape_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ buffer: Union[float, int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read part of a Dataset defined by a geojson feature.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
shape_crs | +rasterio.crs.CRS | +Input geojson coordinate reference system. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
buffer | +int or float | +Buffer on each side of the given aoi. It must be a multiple of 0.5 . Output image size will be expanded to output imagesize + 2 * buffer (e.g 0.5 = 257x257, 1.0 = 258x258). |
+None | +
kwargs | +optional | +Options to forward to the Reader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def get_maxzoom(
+ self
+) -> int
+
Define dataset maximum zoom level.
+def get_minzoom(
+ self
+) -> int
+
Define dataset minimum zoom level.
+def info(
+ self
+) -> rio_tiler.models.Info
+
Return Dataset info.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ dst_crs: Union[rasterio.crs.CRS, NoneType] = None,
+ bounds_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[int, Sequence, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: Union[int, NoneType] = None,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ buffer: Union[float, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read part of a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs ("dst_crs"). | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
bounds_crs | +rasterio.crs.CRS | +Bounds Coordinate Reference System. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. | +None | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
buffer | +float | +Buffer on each side of the given aoi. It must be a multiple of 0.5 . Output image size will be expanded to output imagesize + 2 * buffer (e.g 0.5 = 257x257, 1.0 = 258x258). |
+None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.part function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float,
+ coord_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.PointData
+
Read a pixel value from a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
coord_crs | +rasterio.crs.CRS | +Coordinate Reference System of the input coords. Defaults to epsg:4326 . |
+epsg:4326 |
+
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.point function. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +PointData | +
def preview(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ max_size: int = 1024,
+ height: Union[int, NoneType] = None,
+ width: Union[int, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Return a preview of a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
kwargs | +optional | +Options to forward to the self.read method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def read(
+ self,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read the Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indexes | +sequence of int or int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
kwargs | +optional | +Options to forward to the rio_tiler.reader.read function. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def statistics(
+ self,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: Union[List[int], NoneType] = None,
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return bands statistics from a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to self.read . |
+None | +
Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ tilesize: int = 256,
+ indexes: Union[Sequence[int], int, NoneType] = None,
+ expression: Union[str, NoneType] = None,
+ buffer: Union[float, NoneType] = None,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read a Web Map tile from a Dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
tilesize | +int | +Output image size. Defaults to 256 . |
+256 |
+
indexes | +int or sequence of int | +Band indexes. | +None | +
expression | +str | +rio-tiler expression (e.g. b1/b2+b3). | +None | +
buffer | +float | +Buffer on each side of the given tile. It must be a multiple of 0.5 . Output tilesize will be expanded to tilesize + 2 * tile_buffer (e.g 0.5 = 257x257, 1.0 = 258x258). |
+None | +
kwargs | +optional | +Options to forward to the Reader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
rio_tiler.io.stac: STAC reader.
+DEFAULT_VALID_TYPE
+
WGS84_CRS
+
boto3_session
+
def aws_get_object(
+ bucket: str,
+ key: str,
+ request_pays: bool = False,
+ client: 'boto3_session.client' = None
+) -> bytes
+
AWS s3 get object content.
+def fetch(
+ filepath: str,
+ **kwargs: Any
+) -> Dict
+
Fetch STAC items.
+A LRU cache is set on top of this function.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
filepath | +str | +STAC item URL. | +None | +
kwargs | +any | +additional options to pass to client. | +None | +
Returns:
+Type | +Description | +
---|---|
dict | +STAC Item content. | +
class STACReader(
+ input: str,
+ item: Union[NoneType, Dict, pystac.item.Item] = None,
+ tms: morecantile.models.TileMatrixSet = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>,
+ minzoom: int = NOTHING,
+ maxzoom: int = NOTHING,
+ geographic_crs: rasterio.crs.CRS = CRS.from_epsg(4326),
+ include_assets: Union[Set[str], NoneType] = None,
+ exclude_assets: Union[Set[str], NoneType] = None,
+ include_asset_types: Set[str] = {'image/tiff; profile=cloud-optimized; application=geotiff', 'image/jp2', 'image/x.geotiff', 'application/x-hdf', 'image/tiff; application=geotiff; profile=cloud-optimized', 'image/tiff; application=geotiff', 'image/tiff', 'image/vnd.stac.geotiff; cloud-optimized=true', 'application/x-hdf5'},
+ exclude_asset_types: Union[Set[str], NoneType] = None,
+ reader: Type[rio_tiler.io.base.BaseReader] = <class 'rio_tiler.io.rasterio.Reader'>,
+ reader_options: Dict = NOTHING,
+ fetch_options: Dict = NOTHING,
+ ctx: Any = <class 'rasterio.env.Env'>
+)
+
STAC Reader.
+Name | +Type | +Description | +Default | +
---|---|---|---|
input | +str | +STAC Item path, URL or S3 URL. | +None | +
item | +dict or pystac.Item, STAC | +Stac Item. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
minzoom | +int | +Set minzoom for the tiles. | +None | +
maxzoom | +int | +Set maxzoom for the tiles. | +None | +
geographic_crs | +rasterio.crs.CRS | +CRS to use as geographic coordinate system. Defaults to WGS84. | +WGS84 | +
include_assets | +set of string | +Only Include specific assets. | +None | +
exclude_assets | +set of string | +Exclude specific assets. | +None | +
include_asset_types | +set of string | +Only include some assets base on their type. | +None | +
exclude_asset_types | +set of string | +Exclude some assets base on their type. | +None | +
reader | +rio_tiler.io.BaseReader | +rio-tiler Reader. Defaults to rio_tiler.io.Reader . |
+rio_tiler.io.Reader |
+
reader_options | +dict | +Additional option to forward to the Reader. Defaults to {} . |
+{} |
+
fetch_options | +dict | +Options to pass to rio_tiler.io.stac.fetch function fetching the STAC Items. Defaults to {} . |
+{} |
+
geographic_bounds
+
Return dataset bounds in geographic_crs.
+def feature(
+ self,
+ shape: Dict,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge parts defined by geojson feature from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.feature method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def info(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.Info]
+
Return metadata from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. Required keyword argument. | +None | +
Returns:
+Type | +Description | +
---|---|
dict | +Multiple assets info in form of {"asset1": rio_tile.models.Info}. | +
def merged_statistics(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: Union[List[int], NoneType] = None,
+ hist_options: Union[Dict, NoneType] = None,
+ max_size: int = 1024,
+ **kwargs: Any
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return array statistics for multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
categorical | +bool | +treat input data as categorical data. Defaults to False. | +False | +
categories | +list of numbers | +list of categories to return value for. | +None | +
percentiles | +list of numbers | +list of percentile values to calculate. Defaults to [2, 98] . |
+[2, 98] |
+
hist_options | +dict | +Options to forward to numpy.histogram function. | +None | +
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
kwargs | +optional | +Options to forward to the self.preview method. |
+None | +
Returns:
+Type | +Description | +
---|---|
Dict[str, rio_tiler.models.BandStatistics] | +bands statistics. | +
def parse_expression(
+ self,
+ expression: str,
+ asset_as_band: bool = False
+) -> Tuple
+
Parse rio-tiler band math expression.
+def part(
+ self,
+ bbox: Tuple[float, float, float, float],
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge parts from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.part method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def point(
+ self,
+ lon: float,
+ lat: float,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.PointData
+
Read pixel value from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.point method. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +PointData | +
def preview(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge previews from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.preview method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def statistics(
+ self,
+ assets: Union[Sequence[str], str] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_expression: Union[Dict[str, str], NoneType] = None,
+ **kwargs: Any
+) -> Dict[str, Dict[str, rio_tiler.models.BandStatistics]]
+
Return array statistics for multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
assets | +sequence of str or str | +assets to fetch info from. | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
asset_expression | +dict | +rio-tiler expression for each asset (e.g. {"asset1": "b1/b2+b3", "asset2": ...}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.statistics method. |
+None | +
Returns:
+Type | +Description | +
---|---|
dict | +Multiple assets statistics in form of {"asset1": {"1": rio_tiler.models.BandStatistics, ...}}. | +
def tile(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int,
+ assets: Union[Sequence[str], str] = None,
+ expression: Union[str, NoneType] = None,
+ asset_indexes: Union[Dict[str, Union[Sequence[int], int]], NoneType] = None,
+ asset_as_band: bool = False,
+ **kwargs: Any
+) -> rio_tiler.models.ImageData
+
Read and merge Wep Map tiles from multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
assets | +sequence of str or str | +assets to fetch info from. | +None | +
expression | +str | +rio-tiler expression for the asset list (e.g. asset1/asset2+asset3). | +None | +
asset_indexes | +dict | +Band indexes for each asset (e.g {"asset1": 1, "asset2": (1, 2,)}). | +None | +
kwargs | +optional | +Options to forward to the self.reader.tile method. |
+None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
rio_tiler.io.xarray: Xarray Reader.
+WGS84_CRS
+
rioxarray
+
xarray
+
class XarrayReader(
+ input: 'xarray.DataArray',
+ tms: 'TileMatrixSet' = <TileMatrixSet title='Google Maps Compatible for the World' id='WebMercatorQuad' crs='http://www.opengis.net/def/crs/EPSG/0/3857>,
+ geographic_crs: 'CRS' = CRS.from_epsg(4326)
+)
+
Xarray Reader.
+Name | +Type | +Description | +Default | +
---|---|---|---|
dataset | +xarray.DataArray | +Xarray DataArray dataset. | +None | +
tms | +morecantile.TileMatrixSet | +TileMatrixSet grid definition. Defaults to WebMercatorQuad . |
+WebMercatorQuad |
+
geographic_crs | +rasterio.crs.CRS | +CRS to use as geographic coordinate system. Defaults to WGS84. | +WGS84 | +
band_names
+
Return list of band names
in DataArray.
geographic_bounds
+
Return dataset bounds in geographic_crs.
+maxzoom
+
Return dataset maxzoom.
+minzoom
+
Return dataset minzoom.
+def feature(
+ self,
+ shape: 'Dict',
+ dst_crs: 'Optional[CRS]' = None,
+ shape_crs: 'CRS' = CRS.from_epsg(4326),
+ resampling_method: 'WarpResampling' = 'nearest',
+ nodata: 'Optional[NoData]' = None
+) -> 'ImageData'
+
Read part of a dataset defined by a geojson feature.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
shape | +dict | +Valid GeoJSON feature. | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
shape_crs | +rasterio.crs.CRS | +Input geojson coordinate reference system. Defaults to epsg:4326 . |
+epsg:4326 |
+
resampling_method | +WarpResampling | +WarpKernel resampling algorithm. Defaults to nearest . |
+nearest |
+
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def get_maxzoom(
+ self
+) -> 'int'
+
Define dataset maximum zoom level.
+def get_minzoom(
+ self
+) -> 'int'
+
Define dataset minimum zoom level.
+def info(
+ self
+) -> 'Info'
+
Return xarray.DataArray info.
+def part(
+ self,
+ bbox: 'BBox',
+ dst_crs: 'Optional[CRS]' = None,
+ bounds_crs: 'CRS' = CRS.from_epsg(4326),
+ resampling_method: 'WarpResampling' = 'nearest',
+ auto_expand: 'bool' = True,
+ nodata: 'Optional[NoData]' = None
+) -> 'ImageData'
+
Read part of a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
bbox | +tuple | +Output bounds (left, bottom, right, top) in target crs ("dst_crs"). | +None | +
dst_crs | +rasterio.crs.CRS | +Overwrite target coordinate reference system. | +None | +
bounds_crs | +rasterio.crs.CRS | +Bounds Coordinate Reference System. Defaults to epsg:4326 . |
+epsg:4326 |
+
resampling_method | +WarpResampling | +WarpKernel resampling algorithm. Defaults to nearest . |
+nearest |
+
auto_expand | +boolean | +When True, rioxarray's clip_box will expand clip search if only 1D raster found with clip. When False, will throw OneDimensionalRaster error if only 1 x or y data point is found. Defaults to True. |
+True | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def point(
+ self,
+ lon: 'float',
+ lat: 'float',
+ coord_crs: 'CRS' = CRS.from_epsg(4326),
+ nodata: 'Optional[NoData]' = None
+) -> 'PointData'
+
Read a pixel value from a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
lon | +float | +Longitude. | +None | +
lat | +float | +Latitude. | +None | +
coord_crs | +rasterio.crs.CRS | +Coordinate Reference System of the input coords. Defaults to epsg:4326 . |
+epsg:4326 |
+
Returns:
+Type | +Description | +
---|---|
None | +PointData | +
def preview(
+ self,
+ max_size: 'int' = 1024,
+ height: 'Optional[int]' = None,
+ width: 'Optional[int]' = None
+) -> 'ImageData'
+
Return a preview of a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
max_size | +int | +Limit the size of the longest dimension of the dataset read, respecting bounds X/Y aspect ratio. Defaults to 1024. | +1024 | +
height | +int | +Output height of the array. | +None | +
width | +int | +Output width of the array. | +None | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and input spatial info. | +
def statistics(
+ self,
+ categorical: 'bool' = False,
+ categories: 'Optional[List[float]]' = None,
+ percentiles: 'Optional[List[int]]' = None,
+ hist_options: 'Optional[Dict]' = None,
+ max_size: 'int' = 1024,
+ **kwargs: 'Any'
+) -> 'Dict[str, BandStatistics]'
+
Return bands statistics from a dataset.
+def tile(
+ self,
+ tile_x: 'int',
+ tile_y: 'int',
+ tile_z: 'int',
+ tilesize: 'int' = 256,
+ resampling_method: 'WarpResampling' = 'nearest',
+ auto_expand: 'bool' = True,
+ nodata: 'Optional[NoData]' = None
+) -> 'ImageData'
+
Read a Web Map tile from a dataset.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
tilesize | +int | +Output image size. Defaults to 256 . |
+256 |
+
resampling_method | +WarpResampling | +WarpKernel resampling algorithm. Defaults to nearest . |
+nearest |
+
auto_expand | +boolean | +When True, rioxarray's clip_box will expand clip search if only 1D raster found with clip. When False, will throw OneDimensionalRaster error if only 1 x or y data point is found. Defaults to True. |
+True | +
Returns:
+Type | +Description | +
---|---|
rio_tiler.models.ImageData | +ImageData instance with data, mask and tile spatial info. | +
def tile_exists(
+ self,
+ tile_x: int,
+ tile_y: int,
+ tile_z: int
+) -> bool
+
Check if a tile intersects the dataset bounds.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
tile_x | +int | +Tile's horizontal index. | +None | +
tile_y | +int | +Tile's vertical index. | +None | +
tile_z | +int | +Tile's zoom level index. | +None | +
Returns:
+Type | +Description | +
---|---|
bool | +True if the tile intersects the dataset bounds. | +
rio-tiler models.
+dtype_ranges
+
def masked_and_3d(
+ array: numpy.ndarray
+) -> numpy.ma.core.MaskedArray
+
Makes sure we have a 3D array and mask
+def rescale_image(
+ array: numpy.ma.core.MaskedArray,
+ in_range: Sequence[Tuple[Union[float, int], Union[float, int]]],
+ out_range: Sequence[Tuple[Union[float, int], Union[float, int]]] = ((0, 255),),
+ out_dtype: Union[str, numpy.number] = 'uint8'
+) -> numpy.ma.core.MaskedArray
+
Rescale image data in-place.
+def to_coordsbbox(
+ bbox
+) -> Union[rasterio.coords.BoundingBox, NoneType]
+
Convert bbox to CoordsBbox nameTuple.
+def to_masked(
+ array: numpy.ndarray
+) -> numpy.ma.core.MaskedArray
+
Makes sure we have a MaskedArray.
+class BandStatistics(
+ __pydantic_self__,
+ **data: 'Any'
+)
+
Band statistics
+model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
def from_orm(
+ obj: 'Any'
+) -> 'Model'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
+Behaves as if Config.extra = 'allow'
was set since it adds all passed values
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +The set of field names accepted for the Model instance. | +None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to raise an exception on invalid fields. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If json_data is not a JSON string. |
+
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Model'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'typing.Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Model'
+
model_computed_fields
+
Get the computed fields of this model instance.
+model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been set on this model instance.
+def copy(
+ self: 'Model',
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'typing.Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. |
+None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. |
+None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. |
+None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'typing.Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'typing.Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self: 'Model',
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal[('json', 'python')] | str" = 'python',
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the dictionary will only contain JSON serializable types. If mode is 'python', the dictionary may contain any Python objects. |
+None | +
include | +None | +A list of fields to include in the output. | +None | +
exclude | +None | +A list of fields to exclude from the output. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that are unset or None from the output. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value from the output. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None from the output. |
+None | +
round_trip | +None | +Whether to enable serialization and deserialization round-trip support. | +None | +
warnings | +None | +Whether to log warnings when invalid fields are encountered. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. Can take either a string or set of strings. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. Can take either a string or set of strings. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that have the default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +Whether to use serialization/deserialization between JSON and class instance. | +None | +
warnings | +None | +Whether to show any warnings that occurred during serialization. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class Bounds(
+ __pydantic_self__,
+ **data: 'Any'
+)
+
Dataset Bounding box
+model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
def from_orm(
+ obj: 'Any'
+) -> 'Model'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
+Behaves as if Config.extra = 'allow'
was set since it adds all passed values
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +The set of field names accepted for the Model instance. | +None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to raise an exception on invalid fields. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If json_data is not a JSON string. |
+
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Model'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'typing.Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Model'
+
model_computed_fields
+
Get the computed fields of this model instance.
+model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been set on this model instance.
+def copy(
+ self: 'Model',
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'typing.Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. |
+None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. |
+None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. |
+None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'typing.Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'typing.Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self: 'Model',
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal[('json', 'python')] | str" = 'python',
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the dictionary will only contain JSON serializable types. If mode is 'python', the dictionary may contain any Python objects. |
+None | +
include | +None | +A list of fields to include in the output. | +None | +
exclude | +None | +A list of fields to exclude from the output. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that are unset or None from the output. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value from the output. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None from the output. |
+None | +
round_trip | +None | +Whether to enable serialization and deserialization round-trip support. | +None | +
warnings | +None | +Whether to log warnings when invalid fields are encountered. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. Can take either a string or set of strings. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. Can take either a string or set of strings. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that have the default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +Whether to use serialization/deserialization between JSON and class instance. | +None | +
warnings | +None | +Whether to show any warnings that occurred during serialization. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class ImageData(
+ array: numpy.ndarray,
+ cutline_mask: Union[numpy.ndarray, NoneType] = None,
+ *,
+ assets: Union[List, NoneType] = None,
+ bounds=None,
+ crs: Union[rasterio.crs.CRS, NoneType] = None,
+ metadata: Union[Dict, NoneType] = NOTHING,
+ band_names: List[str] = NOTHING,
+ dataset_statistics: Union[Sequence[Tuple[float, float]], NoneType] = None
+)
+
Image Data class.
+Name | +Type | +Description | +Default | +
---|---|---|---|
array | +numpy.ma.MaskedArray | +image values. | +None | +
assets | +list | +list of assets used to construct the data values. | +None | +
bounds | +BoundingBox | +bounding box of the data. | +None | +
crs | +rasterio.crs.CRS | +Coordinates Reference System of the bounds. | +None | +
metadata | +dict | +Additional metadata. Defaults to {} . |
+{} |
+
band_names | +list | +name of each band. Defaults to ["1", "2", "3"] for 3 bands image. |
+["1", "2", "3"] for 3 bands image |
+
dataset_statistics | +list | +dataset statistics [(min, max), (min, max)] |
+None | +
def create_from_list(
+ data: Sequence[ForwardRef('ImageData')]
+) -> 'ImageData'
+
Create ImageData from a sequence of ImageData objects.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +sequence | +sequence of ImageData. | +None | +
def from_array(
+ arr: numpy.ndarray
+) -> 'ImageData'
+
Create ImageData from a numpy array.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
arr | +numpy.ndarray | +Numpy array or Numpy masked array. | +None | +
def from_bytes(
+ data: bytes
+) -> 'ImageData'
+
Create ImageData from bytes.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +bytes | +raster dataset as bytes. | +None | +
count
+
Number of band.
+data
+
Return data part of the masked array.
+height
+
Height of the data array.
+mask
+
Return Mask in form of rasterio dataset mask.
+transform
+
Returns the affine transform.
+width
+
Width of the data array.
+def apply_color_formula(
+ self,
+ color_formula: Union[str, NoneType]
+)
+
Apply color-operations formula in place.
+def apply_colormap(
+ self,
+ colormap: Union[Dict[int, Tuple[int, int, int, int]], Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]]]
+) -> 'ImageData'
+
Apply colormap to the image data.
+def apply_expression(
+ self,
+ expression: str
+) -> 'ImageData'
+
Apply expression to the image data.
+def as_masked(
+ self
+) -> numpy.ma.core.MaskedArray
+
return a numpy masked array.
+def clip(
+ self,
+ bbox: Tuple[float, float, float, float]
+) -> 'ImageData'
+
Clip data and mask to a bbox.
+def data_as_image(
+ self
+) -> numpy.ndarray
+
Return the data array reshaped into an image processing/visualization software friendly order.
+(bands, rows, columns) -> (rows, columns, bands).
+def post_process(
+ self,
+ in_range: Union[Sequence[Tuple[Union[float, int], Union[float, int]]], NoneType] = None,
+ out_dtype: Union[str, numpy.number] = 'uint8',
+ color_formula: Union[str, NoneType] = None,
+ **kwargs: Any
+) -> 'ImageData'
+
Post-process image data.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
in_range | +tuple | +input min/max bounds value to rescale from. | +None | +
out_dtype | +str | +output datatype after rescaling. Defaults to uint8 . |
+uint8 |
+
color_formula | +str | +color-ops formula (see: vincentsarago/color-ops). | +None | +
kwargs | +optional | +keyword arguments to forward to rio_tiler.utils.linear_rescale . |
+None | +
Returns:
+Type | +Description | +
---|---|
ImageData | +new ImageData object with the updated data. | +
def render(
+ self,
+ add_mask: bool = True,
+ img_format: str = 'PNG',
+ colormap: Union[Dict[int, Tuple[int, int, int, int]], Sequence[Tuple[Tuple[Union[float, int], Union[float, int]], Tuple[int, int, int, int]]], NoneType] = None,
+ **kwargs
+) -> bytes
+
Render data to image blob.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
add_mask | +bool | +add mask to output image. Defaults to True . |
+True |
+
img_format | +str | +output image format. Defaults to PNG . |
+PNG |
+
colormap | +dict or sequence | +RGBA Color Table dictionary or sequence. | +None | +
kwargs | +optional | +keyword arguments to forward to rio_tiler.utils.render . |
+None | +
Returns:
+Type | +Description | +
---|---|
bytes | +image. | +
def rescale(
+ self,
+ in_range: Sequence[Tuple[Union[float, int], Union[float, int]]],
+ out_range: Sequence[Tuple[Union[float, int], Union[float, int]]] = ((0, 255),),
+ out_dtype: Union[str, numpy.number] = 'uint8'
+)
+
Rescale data in place.
+def resize(
+ self,
+ height: int,
+ width: int,
+ resampling_method: Literal['nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos', 'average', 'mode', 'gauss', 'rms'] = 'nearest'
+) -> 'ImageData'
+
Resize data and mask.
+def statistics(
+ self,
+ categorical: bool = False,
+ categories: Union[List[float], NoneType] = None,
+ percentiles: Union[List[int], NoneType] = None,
+ hist_options: Union[Dict, NoneType] = None
+) -> Dict[str, rio_tiler.models.BandStatistics]
+
Return statistics from ImageData.
+class Info(
+ __pydantic_self__,
+ **data: 'Any'
+)
+
Dataset Info.
+model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
def from_orm(
+ obj: 'Any'
+) -> 'Model'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
+Behaves as if Config.extra = 'allow'
was set since it adds all passed values
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +The set of field names accepted for the Model instance. | +None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to raise an exception on invalid fields. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If json_data is not a JSON string. |
+
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Model'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'typing.Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Model'
+
model_computed_fields
+
Get the computed fields of this model instance.
+model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been set on this model instance.
+def copy(
+ self: 'Model',
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'typing.Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. |
+None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. |
+None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. |
+None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'typing.Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'typing.Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self: 'Model',
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal[('json', 'python')] | str" = 'python',
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the dictionary will only contain JSON serializable types. If mode is 'python', the dictionary may contain any Python objects. |
+None | +
include | +None | +A list of fields to include in the output. | +None | +
exclude | +None | +A list of fields to exclude from the output. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that are unset or None from the output. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value from the output. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None from the output. |
+None | +
round_trip | +None | +Whether to enable serialization and deserialization round-trip support. | +None | +
warnings | +None | +Whether to log warnings when invalid fields are encountered. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. Can take either a string or set of strings. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. Can take either a string or set of strings. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that have the default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +Whether to use serialization/deserialization between JSON and class instance. | +None | +
warnings | +None | +Whether to show any warnings that occurred during serialization. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class PointData(
+ array: numpy.ndarray,
+ *,
+ band_names: List[str] = NOTHING,
+ coordinates: Union[Tuple[float, float], NoneType] = None,
+ crs: Union[rasterio.crs.CRS, NoneType] = None,
+ assets: Union[List, NoneType] = None,
+ metadata: Union[Dict, NoneType] = NOTHING
+)
+
Point Data class.
+Name | +Type | +Description | +Default | +
---|---|---|---|
array | +numpy.ma.MaskedArray | +pixel values. | +None | +
band_names | +list | +name of each band. Defaults to ["1", "2", "3"] for 3 bands image. |
+["1", "2", "3"] for 3 bands image |
+
coordinates | +tuple | +Point's coordinates. | +None | +
crs | +rasterio.crs.CRS | +Coordinates Reference System of the bounds. | +None | +
assets | +list | +list of assets used to construct the data values. | +None | +
metadata | +dict | +Additional metadata. Defaults to {} . |
+{} |
+
def create_from_list(
+ data: Sequence[ForwardRef('PointData')]
+)
+
Create PointData from a sequence of PointsData objects.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
data | +sequence | +sequence of PointData. | +None | +
count
+
Number of band.
+data
+
Return data part of the masked array.
+mask
+
Return Mask in form of rasterio dataset mask.
+def apply_expression(
+ self,
+ expression: str
+) -> 'PointData'
+
Apply expression to the image data.
+def as_masked(
+ self
+) -> numpy.ma.core.MaskedArray
+
return a numpy masked array.
+class RioTilerBaseModel(
+ __pydantic_self__,
+ **data: 'Any'
+)
+
Provides dictionary access for pydantic models, for backwards compatability.
+model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
def from_orm(
+ obj: 'Any'
+) -> 'Model'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
+Behaves as if Config.extra = 'allow'
was set since it adds all passed values
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +The set of field names accepted for the Model instance. | +None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to raise an exception on invalid fields. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If json_data is not a JSON string. |
+
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Model'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'typing.Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Model'
+
model_computed_fields
+
Get the computed fields of this model instance.
+model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been set on this model instance.
+def copy(
+ self: 'Model',
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'typing.Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. |
+None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. |
+None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. |
+None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'typing.Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'typing.Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self: 'Model',
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal[('json', 'python')] | str" = 'python',
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the dictionary will only contain JSON serializable types. If mode is 'python', the dictionary may contain any Python objects. |
+None | +
include | +None | +A list of fields to include in the output. | +None | +
exclude | +None | +A list of fields to exclude from the output. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that are unset or None from the output. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value from the output. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None from the output. |
+None | +
round_trip | +None | +Whether to enable serialization and deserialization round-trip support. | +None | +
warnings | +None | +Whether to log warnings when invalid fields are encountered. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. Can take either a string or set of strings. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. Can take either a string or set of strings. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that have the default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +Whether to use serialization/deserialization between JSON and class instance. | +None | +
warnings | +None | +Whether to show any warnings that occurred during serialization. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+class SpatialInfo(
+ __pydantic_self__,
+ **data: 'Any'
+)
+
Dataset SpatialInfo
+model_config
+
model_fields
+
def construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
def from_orm(
+ obj: 'Any'
+) -> 'Model'
+
def model_construct(
+ _fields_set: 'set[str] | None' = None,
+ **values: 'Any'
+) -> 'Model'
+
Creates a new instance of the Model
class with validated data.
Creates a new model setting __dict__
and __pydantic_fields_set__
from trusted or pre-validated data.
+Default values are respected, but no other validation is performed.
+Behaves as if Config.extra = 'allow'
was set since it adds all passed values
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_fields_set | +None | +The set of field names accepted for the Model instance. | +None | +
values | +None | +Trusted or pre-validated data dictionary. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A new instance of the Model class with validated data. |
+
def model_json_schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ schema_generator: 'type[GenerateJsonSchema]' = <class 'pydantic.json_schema.GenerateJsonSchema'>,
+ mode: 'JsonSchemaMode' = 'validation'
+) -> 'dict[str, Any]'
+
Generates a JSON schema for a model class.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
by_alias | +None | +Whether to use attribute aliases or not. | +None | +
ref_template | +None | +The reference template. | +None | +
schema_generator | +None | +To override the logic used to generate the JSON schema, as a subclass ofGenerateJsonSchema with your desired modifications |
+None | +
mode | +None | +The mode in which to generate the schema. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The JSON schema for the given model class. | +
def model_parametrized_name(
+ params: 'tuple[type[Any], ...]'
+) -> 'str'
+
Compute the class name for parametrizations of generic classes.
+This method can be overridden to achieve a custom naming scheme for generic BaseModels.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
params | +None | +Tuple of types of the class. Given a generic classModel with 2 type variables and a concrete model Model[str, int] ,the value (str, int) would be passed to params . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +String representing the new class where params are passed to cls as type variables. |
+
Raises:
+Type | +Description | +
---|---|
TypeError | +Raised when trying to generate concrete names for non-generic models. | +
def model_rebuild(
+ *,
+ force: 'bool' = False,
+ raise_errors: 'bool' = True,
+ _parent_namespace_depth: 'int' = 2,
+ _types_namespace: 'dict[str, Any] | None' = None
+) -> 'bool | None'
+
Try to rebuild the pydantic-core schema for the model.
+This may be necessary when one of the annotations is a ForwardRef which could not be resolved during +the initial attempt to build the schema, and automatic rebuilding fails.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
force | +None | +Whether to force the rebuilding of the model schema, defaults to False . |
+None | +
raise_errors | +None | +Whether to raise errors, defaults to True . |
+None | +
_parent_namespace_depth | +None | +The depth level of the parent namespace, defaults to 2. | +None | +
_types_namespace | +None | +The types namespace, defaults to None . |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +Returns None if the schema is already "complete" and rebuilding was not required.If rebuilding was required, returns True if rebuilding was successful, otherwise False . |
+
def model_validate(
+ obj: 'Any',
+ *,
+ strict: 'bool | None' = None,
+ from_attributes: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate a pydantic model instance.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
obj | +None | +The object to validate. | +None | +
strict | +None | +Whether to raise an exception on invalid fields. | +None | +
from_attributes | +None | +Whether to extract data from object attributes. | +None | +
context | +None | +Additional context to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated model instance. | +
Raises:
+Type | +Description | +
---|---|
ValidationError | +If the object could not be validated. | +
def model_validate_json(
+ json_data: 'str | bytes | bytearray',
+ *,
+ strict: 'bool | None' = None,
+ context: 'dict[str, Any] | None' = None
+) -> 'Model'
+
Validate the given JSON data against the Pydantic model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
json_data | +None | +The JSON data to validate. | +None | +
strict | +None | +Whether to enforce types strictly. | +None | +
context | +None | +Extra variables to pass to the validator. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +The validated Pydantic model. | +
Raises:
+Type | +Description | +
---|---|
ValueError | +If json_data is not a JSON string. |
+
def parse_file(
+ path: 'str | Path',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def parse_obj(
+ obj: 'Any'
+) -> 'Model'
+
def parse_raw(
+ b: 'str | bytes',
+ *,
+ content_type: 'str | None' = None,
+ encoding: 'str' = 'utf8',
+ proto: '_deprecated_parse.Protocol | None' = None,
+ allow_pickle: 'bool' = False
+) -> 'Model'
+
def schema(
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}'
+) -> 'typing.Dict[str, Any]'
+
def schema_json(
+ *,
+ by_alias: 'bool' = True,
+ ref_template: 'str' = '#/$defs/{model}',
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def update_forward_refs(
+ **localns: 'Any'
+) -> 'None'
+
def validate(
+ value: 'Any'
+) -> 'Model'
+
model_computed_fields
+
Get the computed fields of this model instance.
+model_extra
+
Get extra fields set during validation.
+model_fields_set
+
Returns the set of fields that have been set on this model instance.
+def copy(
+ self: 'Model',
+ *,
+ include: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ exclude: 'AbstractSetIntStr | MappingIntStrAny | None' = None,
+ update: 'typing.Dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Returns a copy of the model.
+Deprecated
+This method is now deprecated; use model_copy
instead.
If you need include
or exclude
, use:
data = self.model_dump(include=include, exclude=exclude, round_trip=True)
+data = {**data, **(update or {})}
+copied = self.model_validate(data)
+
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
include | +None | +Optional set or mapping specifying which fields to include in the copied model. |
+None | +
exclude | +None | +Optional set or mapping specifying which fields to exclude in the copied model. |
+None | +
update | +None | +Optional dictionary of field-value pairs to override field values in the copied model. |
+None | +
deep | +None | +If True, the values of fields that are Pydantic models will be deep copied. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A copy of the model with included, excluded and updated fields as specified. | +
def dict(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False
+) -> 'typing.Dict[str, Any]'
+
def json(
+ self,
+ *,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ encoder: 'typing.Callable[[Any], Any] | None' = PydanticUndefined,
+ models_as_dict: 'bool' = PydanticUndefined,
+ **dumps_kwargs: 'Any'
+) -> 'str'
+
def model_copy(
+ self: 'Model',
+ *,
+ update: 'dict[str, Any] | None' = None,
+ deep: 'bool' = False
+) -> 'Model'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#model_copy
+Returns a copy of the model.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
update | +None | +Values to change/add in the new model. Note: the data is not validated before creating the new model. You should trust this data. |
+None | +
deep | +None | +Set to True to make a deep copy of the model. |
+None | +
Returns:
+Type | +Description | +
---|---|
None | +New model instance. | +
def model_dump(
+ self,
+ *,
+ mode: "Literal[('json', 'python')] | str" = 'python',
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'dict[str, Any]'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump
+Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mode | +None | +The mode in which to_python should run.If mode is 'json', the dictionary will only contain JSON serializable types. If mode is 'python', the dictionary may contain any Python objects. |
+None | +
include | +None | +A list of fields to include in the output. | +None | +
exclude | +None | +A list of fields to exclude from the output. | +None | +
by_alias | +None | +Whether to use the field's alias in the dictionary key if defined. | +None | +
exclude_unset | +None | +Whether to exclude fields that are unset or None from the output. | +None | +
exclude_defaults | +None | +Whether to exclude fields that are set to their default value from the output. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None from the output. |
+None | +
round_trip | +None | +Whether to enable serialization and deserialization round-trip support. | +None | +
warnings | +None | +Whether to log warnings when invalid fields are encountered. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A dictionary representation of the model. | +
def model_dump_json(
+ self,
+ *,
+ indent: 'int | None' = None,
+ include: 'IncEx' = None,
+ exclude: 'IncEx' = None,
+ by_alias: 'bool' = False,
+ exclude_unset: 'bool' = False,
+ exclude_defaults: 'bool' = False,
+ exclude_none: 'bool' = False,
+ round_trip: 'bool' = False,
+ warnings: 'bool' = True
+) -> 'str'
+
Usage docs: docs.pydantic.dev/2.2/usage/serialization/#modelmodel_dump_json
+Generates a JSON representation of the model using Pydantic's to_json
method.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
indent | +None | +Indentation to use in the JSON output. If None is passed, the output will be compact. | +None | +
include | +None | +Field(s) to include in the JSON output. Can take either a string or set of strings. | +None | +
exclude | +None | +Field(s) to exclude from the JSON output. Can take either a string or set of strings. | +None | +
by_alias | +None | +Whether to serialize using field aliases. | +None | +
exclude_unset | +None | +Whether to exclude fields that have not been explicitly set. | +None | +
exclude_defaults | +None | +Whether to exclude fields that have the default value. | +None | +
exclude_none | +None | +Whether to exclude fields that have a value of None . |
+None | +
round_trip | +None | +Whether to use serialization/deserialization between JSON and class instance. | +None | +
warnings | +None | +Whether to show any warnings that occurred during serialization. | +None | +
Returns:
+Type | +Description | +
---|---|
None | +A JSON string representation of the model. | +
def model_post_init(
+ self,
+ _BaseModel__context: 'Any'
+) -> 'None'
+
Override this method to perform additional initialization after __init__
and model_construct
.
This is useful if you want to do some validation that requires the entire model to be initialized.
+ + + + + + + +rio-tiler.mosaic.methods abc class.
+class MosaicMethodBase(
+
+)
+
Abstract base class for rio-tiler-mosaic methods objects.
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return data.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: numpy.ma.core.MaskedArray
+)
+
Fill mosaic array.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
array | +numpy.ma.ndarray | +data | +None | +
rio_tiler.mosaic.methods.defaults: default mosaic filling methods.
+class FirstMethod(
+
+)
+
Feed the mosaic array with the first pixel available.
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return data.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add data to the mosaic array.
+class HighestMethod(
+
+)
+
Feed the mosaic array with the highest pixel values.
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return data.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add data to the mosaic array.
+class LastBandHighMethod(
+
+)
+
Feed the mosaic array using the last band as decision factor (highest value).
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return data.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add data to the mosaic array.
+class LastBandLowMethod(
+
+)
+
Feed the mosaic array using the last band as decision factor (lowest value).
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return data.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add data to the mosaic array.
+class LowestMethod(
+
+)
+
Feed the mosaic array with the lowest pixel values.
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return data.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add data to the mosaic array.
+class MeanMethod(
+ enforce_data_type: bool = True
+)
+
Stack the arrays and return the Mean pixel value.
+cutline_mask
+
enforce_data_type
+
exit_when_filled
+
mosaic
+
data
+
Return Mean of the data stack.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: numpy.ma.core.MaskedArray
+)
+
Add array to the stack.
+class MedianMethod(
+ enforce_data_type: bool = True
+)
+
Stack the arrays and return the Median pixel value.
+cutline_mask
+
enforce_data_type
+
exit_when_filled
+
mosaic
+
data
+
Return Median of the data stack.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add array to the stack.
+class StdevMethod(
+
+)
+
Stack the arrays and return the Standard Deviation value.
+cutline_mask
+
exit_when_filled
+
mosaic
+
data
+
Return STDDEV of the data stack.
+is_done
+
Check if the mosaic filling is done.
+def feed(
+ self,
+ array: Union[numpy.ma.core.MaskedArray, NoneType]
+)
+
Add array to the stack.
+ + + + + + + +rio_tiler.mosaic: create tile from multiple assets.
+MAX_THREADS
+
def mosaic_point_reader(
+ mosaic_assets: Sequence,
+ reader: Callable[..., rio_tiler.models.PointData],
+ *args: Any,
+ pixel_selection: Union[Type[rio_tiler.mosaic.methods.base.MosaicMethodBase], rio_tiler.mosaic.methods.base.MosaicMethodBase] = <class 'rio_tiler.mosaic.methods.defaults.FirstMethod'>,
+ chunk_size: Union[int, NoneType] = None,
+ threads: int = 10,
+ allowed_exceptions: Tuple = (<class 'rio_tiler.errors.PointOutsideBounds'>,),
+ **kwargs
+) -> Tuple[rio_tiler.models.PointData, List]
+
Merge multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mosaic_assets | +sequence | +List of assets. | +None | +
reader | +callable | +Reader function. The function MUST take (asset, *args, **kwargs) as arguments, and MUST return a PointData object. |
+None | +
args | +Any | +Argument to forward to the reader function. | +None | +
pixel_selection | +MosaicMethod | +Instance of MosaicMethodBase class. Defaults to rio_tiler.mosaic.methods.defaults.FirstMethod . |
+rio_tiler.mosaic.methods.defaults.FirstMethod |
+
chunk_size | +int | +Control the number of asset to process per loop. | +None | +
threads | +int | +Number of threads to use. If <= 1, runs single threaded without an event loop. By default reads from the MAX_THREADS environment variable, and if not found defaults to multiprocessing.cpu_count() * 5. | +None | +
allowed_exceptions | +tuple | +List of exceptions which will be ignored. Note: PointOutsideBounds is likely to be raised and should be included in the allowed_exceptions. Defaults to (TileOutsideBounds, ) . |
+(TileOutsideBounds, ) |
+
kwargs | +optional | +Reader callable's keywords options. | +None | +
Returns:
+Type | +Description | +
---|---|
tuple | +PointData and assets (list). | +
def mosaic_reader(
+ mosaic_assets: Sequence,
+ reader: Callable[..., rio_tiler.models.ImageData],
+ *args: Any,
+ pixel_selection: Union[Type[rio_tiler.mosaic.methods.base.MosaicMethodBase], rio_tiler.mosaic.methods.base.MosaicMethodBase] = <class 'rio_tiler.mosaic.methods.defaults.FirstMethod'>,
+ chunk_size: Union[int, NoneType] = None,
+ threads: int = 10,
+ allowed_exceptions: Tuple = (<class 'rio_tiler.errors.TileOutsideBounds'>,),
+ **kwargs
+) -> Tuple[rio_tiler.models.ImageData, List]
+
Merge multiple assets.
+Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
mosaic_assets | +sequence | +List of assets. | +None | +
reader | +callable | +Reader function. The function MUST take (asset, *args, **kwargs) as arguments, and MUST return an ImageData. |
+None | +
args | +Any | +Argument to forward to the reader function. | +None | +
pixel_selection | +MosaicMethod | +Instance of MosaicMethodBase class. Defaults to rio_tiler.mosaic.methods.defaults.FirstMethod . |
+rio_tiler.mosaic.methods.defaults.FirstMethod |
+
chunk_size | +int | +Control the number of asset to process per loop. | +None | +
threads | +int | +Number of threads to use. If <= 1, runs single threaded without an event loop. By default reads from the MAX_THREADS environment variable, and if not found defaults to multiprocessing.cpu_count() * 5. | +None | +
allowed_exceptions | +tuple | +List of exceptions which will be ignored. Note: TileOutsideBounds is likely to be raised and should be included in the allowed_exceptions. Defaults to (TileOutsideBounds, ) . |
+(TileOutsideBounds, ) |
+
kwargs | +optional | +Reader callable's keywords options. | +None | +
Returns:
+Type | +Description | +
---|---|
tuple | +ImageData and assets (list). | +
Image file profiles.
+img_profiles
+
class ImagesProfiles(
+
+)
+
GDAL Image creation options.
+ref: github.com/mapnik/mapnik/wiki/Image-IO#default-output-details.
+def fromkeys(
+ iterable,
+ value=None
+)
+
def clear(
+ self
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ self
+)
+
def get(
+ self,
+ key,
+ default=None
+)
+
Like normal item access but return a copy of the key.
+def items(
+ self
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ self
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ self,
+ key,
+ default=<object object at 0x7f03e50de150>
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If key is not found, d is returned if given, otherwise KeyError is raised.
+def popitem(
+ self
+)
+
D.popitem() -> (k, v), remove and return some (key, value) pair
+as a 2-tuple; but raise KeyError if D is empty.
+def setdefault(
+ self,
+ key,
+ default=None
+)
+
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+def update(
+ self,
+ other=(),
+ /,
+ **kwds
+)
+
D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+If E present and has a .keys() method, does: for k in E: D[k] = E[k] +If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v +In either case, this is followed by: for k, v in F.items(): D[k] = v
+def values(
+ self
+)
+
D.values() -> an object providing a view on D's values
+class JPEGProfile(
+ data={},
+ **kwds
+)
+
JPEG creation options ref: www.gdal.org/frmt_jpeg.html.
+defaults
+
def fromkeys(
+ iterable,
+ value=None
+)
+
def clear(
+ self
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ self
+)
+
def get(
+ self,
+ key,
+ default=None
+)
+
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
+def items(
+ self
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ self
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ self,
+ key,
+ default=<object object at 0x7f03e50de150>
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If key is not found, d is returned if given, otherwise KeyError is raised.
+def popitem(
+ self
+)
+
D.popitem() -> (k, v), remove and return some (key, value) pair
+as a 2-tuple; but raise KeyError if D is empty.
+def setdefault(
+ self,
+ key,
+ default=None
+)
+
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+def update(
+ self,
+ other=(),
+ /,
+ **kwds
+)
+
D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+If E present and has a .keys() method, does: for k in E: D[k] = E[k] +If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v +In either case, this is followed by: for k, v in F.items(): D[k] = v
+def values(
+ self
+)
+
D.values() -> an object providing a view on D's values
+class PNGProfile(
+ data={},
+ **kwds
+)
+
PNG creation options ref: www.gdal.org/frmt_png.html.
+defaults
+
def fromkeys(
+ iterable,
+ value=None
+)
+
def clear(
+ self
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ self
+)
+
def get(
+ self,
+ key,
+ default=None
+)
+
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
+def items(
+ self
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ self
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ self,
+ key,
+ default=<object object at 0x7f03e50de150>
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If key is not found, d is returned if given, otherwise KeyError is raised.
+def popitem(
+ self
+)
+
D.popitem() -> (k, v), remove and return some (key, value) pair
+as a 2-tuple; but raise KeyError if D is empty.
+def setdefault(
+ self,
+ key,
+ default=None
+)
+
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+def update(
+ self,
+ other=(),
+ /,
+ **kwds
+)
+
D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+If E present and has a .keys() method, does: for k in E: D[k] = E[k] +If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v +In either case, this is followed by: for k, v in F.items(): D[k] = v
+def values(
+ self
+)
+
D.values() -> an object providing a view on D's values
+class PNGRAWProfile(
+ data={},
+ **kwds
+)
+
PNG creation options ref: www.gdal.org/frmt_png.html.
+defaults
+
def fromkeys(
+ iterable,
+ value=None
+)
+
def clear(
+ self
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ self
+)
+
def get(
+ self,
+ key,
+ default=None
+)
+
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
+def items(
+ self
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ self
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ self,
+ key,
+ default=<object object at 0x7f03e50de150>
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If key is not found, d is returned if given, otherwise KeyError is raised.
+def popitem(
+ self
+)
+
D.popitem() -> (k, v), remove and return some (key, value) pair
+as a 2-tuple; but raise KeyError if D is empty.
+def setdefault(
+ self,
+ key,
+ default=None
+)
+
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+def update(
+ self,
+ other=(),
+ /,
+ **kwds
+)
+
D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+If E present and has a .keys() method, does: for k in E: D[k] = E[k] +If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v +In either case, this is followed by: for k, v in F.items(): D[k] = v
+def values(
+ self
+)
+
D.values() -> an object providing a view on D's values
+class WEBPProfile(
+ data={},
+ **kwds
+)
+
WEBP creation options ref: www.gdal.org/frmt_webp.html.
+defaults
+
def fromkeys(
+ iterable,
+ value=None
+)
+
def clear(
+ self
+)
+
D.clear() -> None. Remove all items from D.
+def copy(
+ self
+)
+
def get(
+ self,
+ key,
+ default=None
+)
+
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
+def items(
+ self
+)
+
D.items() -> a set-like object providing a view on D's items
+def keys(
+ self
+)
+
D.keys() -> a set-like object providing a view on D's keys
+def pop(
+ self,
+ key,
+ default=<object object at 0x7f03e50de150>
+)
+
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+If key is not found, d is returned if given, otherwise KeyError is raised.
+def popitem(
+ self
+)
+
D.popitem() -> (k, v), remove and return some (key, value) pair
+as a 2-tuple; but raise KeyError if D is empty.
+def setdefault(
+ self,
+ key,
+ default=None
+)
+
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+def update(
+ self,
+ other=(),
+ /,
+ **kwds
+)
+
D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+If E present and has a .keys() method, does: for k in E: D[k] = E[k] +If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v +In either case, this is followed by: for k, v in F.items(): D[k] = v
+def values(
+ self
+)
+
D.values() -> an object providing a view on D's values
+ + + + + + + +