alessandro trinca tornidor
commited on
Commit
·
49e8a51
1
Parent(s):
c6839af
refactor: remove unused files, update samgis-lisa to version 1.0.1 also in pyproject.toml
Browse files- pyproject.toml +1 -1
- samgis_lisa_on_zero/__init__.py +0 -21
- samgis_lisa_on_zero/__version__.py +0 -4
- samgis_lisa_on_zero/io_package/__init__.py +0 -1
- samgis_lisa_on_zero/io_package/coordinates_pixel_conversion.py +0 -102
- samgis_lisa_on_zero/io_package/geo_helpers.py +0 -95
- samgis_lisa_on_zero/io_package/raster_helpers.py +0 -343
- samgis_lisa_on_zero/io_package/tms2geotiff.py +0 -187
- samgis_lisa_on_zero/io_package/wrappers_helpers.py +0 -195
- samgis_lisa_on_zero/prediction_api/__init__.py +0 -1
- samgis_lisa_on_zero/prediction_api/global_models.py +0 -6
- samgis_lisa_on_zero/prediction_api/lisa.py +0 -107
- samgis_lisa_on_zero/prediction_api/predictors.py +0 -77
- samgis_lisa_on_zero/utilities/__init__.py +0 -1
- samgis_lisa_on_zero/utilities/constants.py +0 -64
- samgis_lisa_on_zero/utilities/session_logger.py +0 -63
- samgis_lisa_on_zero/utilities/type_hints.py +0 -111
pyproject.toml
CHANGED
@@ -13,7 +13,7 @@ version = "1.5.0"
|
|
13 |
|
14 |
[tool.poetry.dependencies]
|
15 |
python = ">=3.10,<3.11"
|
16 |
-
samgis-lisa = "1.0.
|
17 |
|
18 |
[tool.poetry.group.test]
|
19 |
optional = true
|
|
|
13 |
|
14 |
[tool.poetry.dependencies]
|
15 |
python = ">=3.10,<3.11"
|
16 |
+
samgis-lisa = "1.0.1"
|
17 |
|
18 |
[tool.poetry.group.test]
|
19 |
optional = true
|
samgis_lisa_on_zero/__init__.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
"""Get machine learning predictions from geodata raster images"""
|
2 |
-
import logging
|
3 |
-
import os
|
4 |
-
|
5 |
-
# not used here but contextily_tile is imported in samgis_lisa_on_zero.io_package.tms2geotiff
|
6 |
-
from contextily import tile as contextily_tile
|
7 |
-
from pathlib import Path
|
8 |
-
|
9 |
-
from lisa_on_cuda.utils import session_logger
|
10 |
-
from samgis_lisa_on_zero.utilities.constants import SERVICE_NAME
|
11 |
-
|
12 |
-
|
13 |
-
ROOT = Path(globals().get("__file__", "./_")).absolute().parent.parent
|
14 |
-
PROJECT_ROOT_FOLDER = Path(os.getenv("PROJECT_ROOT_FOLDER", ROOT))
|
15 |
-
WORKDIR = Path(os.getenv("WORKDIR", ROOT))
|
16 |
-
MODEL_FOLDER_PROJECT_ROOT_FOLDER = Path(PROJECT_ROOT_FOLDER / "machine_learning_models")
|
17 |
-
MODEL_FOLDER = Path(os.getenv("MODEL_FOLDER", MODEL_FOLDER_PROJECT_ROOT_FOLDER))
|
18 |
-
|
19 |
-
loglevel = os.getenv('LOGLEVEL', 'INFO').upper()
|
20 |
-
session_logger.change_logging(loglevel)
|
21 |
-
app_logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/__version__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
import importlib.metadata
|
2 |
-
|
3 |
-
|
4 |
-
__version__ = importlib.metadata.version(__package__ or __name__)
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/io_package/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
"""input/output helpers functions"""
|
|
|
|
samgis_lisa_on_zero/io_package/coordinates_pixel_conversion.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
"""functions useful to convert to/from latitude-longitude coordinates to pixel image coordinates"""
|
2 |
-
from lisa_on_cuda.utils import session_logger
|
3 |
-
from samgis_core.utilities.type_hints import TupleFloat, TupleFloatAny
|
4 |
-
from samgis_lisa_on_zero import app_logger
|
5 |
-
from samgis_lisa_on_zero.utilities.constants import TILE_SIZE, EARTH_EQUATORIAL_RADIUS
|
6 |
-
from samgis_lisa_on_zero.utilities.type_hints import ImagePixelCoordinates
|
7 |
-
from samgis_lisa_on_zero.utilities.type_hints import LatLngDict
|
8 |
-
|
9 |
-
|
10 |
-
@session_logger.set_uuid_logging
|
11 |
-
def _get_latlng2pixel_projection(latlng: LatLngDict) -> ImagePixelCoordinates:
|
12 |
-
from math import log, pi, sin
|
13 |
-
|
14 |
-
app_logger.debug(f"latlng: {type(latlng)}, value:{latlng}.")
|
15 |
-
app_logger.debug(f'latlng lat: {type(latlng.lat)}, value:{latlng.lat}.')
|
16 |
-
app_logger.debug(f'latlng lng: {type(latlng.lng)}, value:{latlng.lng}.')
|
17 |
-
try:
|
18 |
-
sin_y: float = sin(latlng.lat * pi / 180)
|
19 |
-
app_logger.debug(f"sin_y, #1:{sin_y}.")
|
20 |
-
sin_y = min(max(sin_y, -0.9999), 0.9999)
|
21 |
-
app_logger.debug(f"sin_y, #2:{sin_y}.")
|
22 |
-
x = TILE_SIZE * (0.5 + latlng.lng / 360)
|
23 |
-
app_logger.debug(f"x:{x}.")
|
24 |
-
y = TILE_SIZE * (0.5 - log((1 + sin_y) / (1 - sin_y)) / (4 * pi))
|
25 |
-
app_logger.debug(f"y:{y}.")
|
26 |
-
|
27 |
-
return {"x": x, "y": y}
|
28 |
-
except Exception as e_get_latlng2pixel_projection:
|
29 |
-
app_logger.error(f'args type:{type(latlng)}, {latlng}.')
|
30 |
-
app_logger.exception(f'e_get_latlng2pixel_projection:{e_get_latlng2pixel_projection}.', exc_info=True)
|
31 |
-
raise e_get_latlng2pixel_projection
|
32 |
-
|
33 |
-
|
34 |
-
@session_logger.set_uuid_logging
|
35 |
-
def _get_point_latlng_to_pixel_coordinates(latlng: LatLngDict, zoom: int | float) -> ImagePixelCoordinates:
|
36 |
-
from math import floor
|
37 |
-
|
38 |
-
try:
|
39 |
-
world_coordinate: ImagePixelCoordinates = _get_latlng2pixel_projection(latlng)
|
40 |
-
app_logger.debug(f"world_coordinate:{world_coordinate}.")
|
41 |
-
scale: int = pow(2, zoom)
|
42 |
-
app_logger.debug(f"scale:{scale}.")
|
43 |
-
return ImagePixelCoordinates(
|
44 |
-
x=floor(world_coordinate["x"] * scale),
|
45 |
-
y=floor(world_coordinate["y"] * scale)
|
46 |
-
)
|
47 |
-
except Exception as e_format_latlng_to_pixel_coordinates:
|
48 |
-
app_logger.error(f'latlng type:{type(latlng)}, {latlng}.')
|
49 |
-
app_logger.error(f'zoom type:{type(zoom)}, {zoom}.')
|
50 |
-
app_logger.exception(f'e_format_latlng_to_pixel_coordinates:{e_format_latlng_to_pixel_coordinates}.',
|
51 |
-
exc_info=True)
|
52 |
-
raise e_format_latlng_to_pixel_coordinates
|
53 |
-
|
54 |
-
|
55 |
-
@session_logger.set_uuid_logging
|
56 |
-
def get_latlng_to_pixel_coordinates(
|
57 |
-
latlng_origin_ne: LatLngDict,
|
58 |
-
latlng_origin_sw: LatLngDict,
|
59 |
-
latlng_current_point: LatLngDict,
|
60 |
-
zoom: int | float,
|
61 |
-
k: str
|
62 |
-
) -> ImagePixelCoordinates:
|
63 |
-
"""
|
64 |
-
Parse the input request lambda event
|
65 |
-
|
66 |
-
Args:
|
67 |
-
latlng_origin_ne: NE latitude-longitude origin point
|
68 |
-
latlng_origin_sw: SW latitude-longitude origin point
|
69 |
-
latlng_current_point: latitude-longitude prompt point
|
70 |
-
zoom: Level of detail
|
71 |
-
k: prompt type
|
72 |
-
|
73 |
-
Returns:
|
74 |
-
ImagePixelCoordinates: pixel image coordinate point
|
75 |
-
"""
|
76 |
-
app_logger.debug(f"latlng_origin - {k}: {type(latlng_origin_ne)}, value:{latlng_origin_ne}.")
|
77 |
-
app_logger.debug(f"latlng_current_point - {k}: {type(latlng_current_point)}, value:{latlng_current_point}.")
|
78 |
-
latlng_map_origin_ne = _get_point_latlng_to_pixel_coordinates(latlng_origin_ne, zoom)
|
79 |
-
latlng_map_origin_sw = _get_point_latlng_to_pixel_coordinates(latlng_origin_sw, zoom)
|
80 |
-
latlng_map_current_point = _get_point_latlng_to_pixel_coordinates(latlng_current_point, zoom)
|
81 |
-
diff_coord_x = abs(latlng_map_origin_sw["x"] - latlng_map_current_point["x"])
|
82 |
-
diff_coord_y = abs(latlng_map_origin_ne["y"] - latlng_map_current_point["y"])
|
83 |
-
point = ImagePixelCoordinates(x=diff_coord_x, y=diff_coord_y)
|
84 |
-
app_logger.debug(f"point type - {k}: {point}.")
|
85 |
-
return point
|
86 |
-
|
87 |
-
|
88 |
-
def _from4326_to3857(lat: float, lon: float) -> TupleFloat or TupleFloatAny:
|
89 |
-
from math import radians, log, tan
|
90 |
-
|
91 |
-
x_tile: float = radians(lon) * EARTH_EQUATORIAL_RADIUS
|
92 |
-
y_tile: float = log(tan(radians(45 + lat / 2.0))) * EARTH_EQUATORIAL_RADIUS
|
93 |
-
return x_tile, y_tile
|
94 |
-
|
95 |
-
|
96 |
-
def _deg2num(lat: float, lon: float, zoom: int):
|
97 |
-
from math import radians, pi, asinh, tan
|
98 |
-
|
99 |
-
n = 2 ** zoom
|
100 |
-
x_tile = ((lon + 180) / 360 * n)
|
101 |
-
y_tile = (1 - asinh(tan(radians(lat))) / pi) * n / 2
|
102 |
-
return x_tile, y_tile
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/io_package/geo_helpers.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
"""handle geo-referenced raster images"""
|
2 |
-
|
3 |
-
from affine import Affine
|
4 |
-
from lisa_on_cuda.utils import session_logger
|
5 |
-
from numpy import ndarray as np_ndarray
|
6 |
-
|
7 |
-
from samgis_core.utilities.type_hints import ListFloat, TupleFloat, DictStrInt
|
8 |
-
from samgis_lisa_on_zero import app_logger
|
9 |
-
|
10 |
-
|
11 |
-
@session_logger.set_uuid_logging
|
12 |
-
def load_affine_transformation_from_matrix(matrix_source_coefficients: ListFloat) -> Affine:
|
13 |
-
"""
|
14 |
-
Wrapper for rasterio.Affine.from_gdal() method
|
15 |
-
|
16 |
-
Args:
|
17 |
-
matrix_source_coefficients: 6 floats ordered by GDAL.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
Affine transform
|
21 |
-
"""
|
22 |
-
|
23 |
-
if len(matrix_source_coefficients) != 6:
|
24 |
-
raise ValueError(f"Expected 6 coefficients, found {len(matrix_source_coefficients)}; "
|
25 |
-
f"argument type: {type(matrix_source_coefficients)}.")
|
26 |
-
|
27 |
-
try:
|
28 |
-
a, d, b, e, c, f = (float(x) for x in matrix_source_coefficients)
|
29 |
-
center = tuple.__new__(Affine, [a, b, c, d, e, f, 0.0, 0.0, 1.0])
|
30 |
-
return center * Affine.translation(-0.5, -0.5)
|
31 |
-
except Exception as e:
|
32 |
-
app_logger.exception(f"exception:{e}, check updates on https://github.com/rasterio/affine",
|
33 |
-
extra=e,
|
34 |
-
stack_info=True, exc_info=True)
|
35 |
-
raise e
|
36 |
-
|
37 |
-
|
38 |
-
def get_affine_transform_from_gdal(matrix_source_coefficients: ListFloat or TupleFloat) -> Affine:
|
39 |
-
"""wrapper for rasterio Affine from_gdal method
|
40 |
-
|
41 |
-
Args:
|
42 |
-
matrix_source_coefficients: 6 floats ordered by GDAL.
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
Affine transform
|
46 |
-
"""
|
47 |
-
return Affine.from_gdal(*matrix_source_coefficients)
|
48 |
-
|
49 |
-
|
50 |
-
@session_logger.set_uuid_logging
|
51 |
-
def get_vectorized_raster_as_geojson(mask: np_ndarray, transform: TupleFloat) -> DictStrInt:
|
52 |
-
"""
|
53 |
-
Get shapes and values of connected regions in a dataset or array
|
54 |
-
|
55 |
-
Args:
|
56 |
-
mask: numpy mask
|
57 |
-
transform: tuple of float to transform into an Affine transform
|
58 |
-
|
59 |
-
Returns:
|
60 |
-
dict containing the output geojson and the predictions number
|
61 |
-
"""
|
62 |
-
try:
|
63 |
-
from rasterio.features import shapes
|
64 |
-
from geopandas import GeoDataFrame
|
65 |
-
|
66 |
-
app_logger.debug(f"matrix to consume with rasterio.shapes: {type(transform)}, {transform}.")
|
67 |
-
|
68 |
-
# old value for mask => band != 0
|
69 |
-
shapes_generator = ({
|
70 |
-
'properties': {'raster_val': v}, 'geometry': s}
|
71 |
-
for i, (s, v)
|
72 |
-
# instead of `enumerate(shapes(mask, mask=(band != 0), transform=rio_src.transform))`
|
73 |
-
# use mask=None to avoid using source
|
74 |
-
in enumerate(shapes(mask, mask=None, transform=transform))
|
75 |
-
)
|
76 |
-
app_logger.info("created shapes_generator, transform it to a polygon list...")
|
77 |
-
shapes_list = list(shapes_generator)
|
78 |
-
app_logger.info(f"created {len(shapes_list)} polygons.")
|
79 |
-
gpd_polygonized_raster = GeoDataFrame.from_features(shapes_list, crs="EPSG:3857")
|
80 |
-
app_logger.info("created a GeoDataFrame, export to geojson...")
|
81 |
-
geojson = gpd_polygonized_raster.to_json(to_wgs84=True)
|
82 |
-
app_logger.info("created geojson, preparing API response...")
|
83 |
-
return {
|
84 |
-
"geojson": geojson,
|
85 |
-
"n_shapes_geojson": len(shapes_list)
|
86 |
-
}
|
87 |
-
except Exception as e_shape_band:
|
88 |
-
try:
|
89 |
-
app_logger.error(f"mask type:{type(mask)}.")
|
90 |
-
app_logger.error(f"transform type:{type(transform)}, {transform}.")
|
91 |
-
app_logger.error(f"mask shape:{mask.shape}, dtype:{mask.dtype}.")
|
92 |
-
except Exception as e_shape_dtype:
|
93 |
-
app_logger.exception(f"mask shape or dtype not found:{e_shape_dtype}.", exc_info=True)
|
94 |
-
app_logger.exception(f"e_shape_band:{e_shape_band}.", exc_info=True)
|
95 |
-
raise e_shape_band
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/io_package/raster_helpers.py
DELETED
@@ -1,343 +0,0 @@
|
|
1 |
-
"""helpers for computer vision duties"""
|
2 |
-
import logging
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
from numpy import ndarray, bitwise_not
|
6 |
-
from rasterio import open as rasterio_open
|
7 |
-
|
8 |
-
from lisa_on_cuda.utils import session_logger
|
9 |
-
from samgis_lisa_on_zero.utilities.constants import OUTPUT_CRS_STRING
|
10 |
-
from samgis_lisa_on_zero.utilities.type_hints import XYZTerrainProvidersNames
|
11 |
-
|
12 |
-
from samgis_lisa_on_zero import app_logger
|
13 |
-
|
14 |
-
|
15 |
-
def get_nextzen_terrain_rgb_formula(red: ndarray, green: ndarray, blue: ndarray) -> ndarray:
|
16 |
-
"""
|
17 |
-
Compute a 32-bits 2d digital elevation model from a nextzen 'terrarium' (terrain-rgb) raster.
|
18 |
-
'Terrarium' format PNG tiles contain raw elevation data in meters, in Mercator projection (EPSG:3857).
|
19 |
-
All values are positive with a 32,768 offset, split into the red, green, and blue channels,
|
20 |
-
with 16 bits of integer and 8 bits of fraction. To decode:
|
21 |
-
|
22 |
-
(red * 256 + green + blue / 256) - 32768
|
23 |
-
|
24 |
-
More details on https://www.mapzen.com/blog/elevation/
|
25 |
-
|
26 |
-
Args:
|
27 |
-
red: red-valued channel image array
|
28 |
-
green: green-valued channel image array
|
29 |
-
blue: blue-valued channel image array
|
30 |
-
|
31 |
-
Returns:
|
32 |
-
ndarray: nextzen 'terrarium' 2d digital elevation model raster at 32 bits
|
33 |
-
|
34 |
-
"""
|
35 |
-
return (red * 256 + green + blue / 256) - 32768
|
36 |
-
|
37 |
-
|
38 |
-
def get_mapbox__terrain_rgb_formula(red: ndarray, green: ndarray, blue: ndarray) -> ndarray:
|
39 |
-
return ((red * 256 * 256 + green * 256 + blue) * 0.1) - 10000
|
40 |
-
|
41 |
-
|
42 |
-
providers_terrain_rgb_formulas = {
|
43 |
-
XYZTerrainProvidersNames.MAPBOX_TERRAIN_TILES_NAME: get_mapbox__terrain_rgb_formula,
|
44 |
-
XYZTerrainProvidersNames.NEXTZEN_TERRAIN_TILES_NAME: get_nextzen_terrain_rgb_formula
|
45 |
-
}
|
46 |
-
|
47 |
-
|
48 |
-
def _get_2d_array_from_3d(arr: ndarray) -> ndarray:
|
49 |
-
return arr.reshape(arr.shape[0], arr.shape[1])
|
50 |
-
|
51 |
-
|
52 |
-
def _channel_split(arr: ndarray) -> list[ndarray]:
|
53 |
-
from numpy import dsplit
|
54 |
-
|
55 |
-
return dsplit(arr, arr.shape[-1])
|
56 |
-
|
57 |
-
|
58 |
-
def get_raster_terrain_rgb_like(arr: ndarray, xyz_provider_name, nan_value_int: int = -12000):
|
59 |
-
"""
|
60 |
-
Compute a 32-bits 2d digital elevation model from a terrain-rgb raster.
|
61 |
-
|
62 |
-
Args:
|
63 |
-
arr: rgb raster
|
64 |
-
xyz_provider_name: xyz provider
|
65 |
-
nan_value_int: threshold int value to replace NaN
|
66 |
-
|
67 |
-
Returns:
|
68 |
-
ndarray: 2d digital elevation model raster at 32 bits
|
69 |
-
"""
|
70 |
-
red, green, blue = _channel_split(arr)
|
71 |
-
dem_rgb = providers_terrain_rgb_formulas[xyz_provider_name](red, green, blue)
|
72 |
-
output = _get_2d_array_from_3d(dem_rgb)
|
73 |
-
output[output < nan_value_int] = np.NaN
|
74 |
-
return output
|
75 |
-
|
76 |
-
|
77 |
-
@session_logger.set_uuid_logging
|
78 |
-
def get_rgb_prediction_image(raster_cropped: ndarray, slope_cellsize: int, invert_image: bool = True) -> ndarray:
|
79 |
-
"""
|
80 |
-
Return an RGB image from input numpy array
|
81 |
-
|
82 |
-
Args:
|
83 |
-
raster_cropped: input numpy array
|
84 |
-
slope_cellsize: window size to calculate slope and curvature (1st and 2nd degree array derivative)
|
85 |
-
invert_image:
|
86 |
-
|
87 |
-
Returns:
|
88 |
-
tuple of str: image filename, image path (with filename)
|
89 |
-
"""
|
90 |
-
from samgis_lisa_on_zero.utilities.constants import CHANNEL_EXAGGERATIONS_LIST
|
91 |
-
|
92 |
-
try:
|
93 |
-
slope, curvature = get_slope_curvature(raster_cropped, slope_cellsize=slope_cellsize)
|
94 |
-
|
95 |
-
channel0 = raster_cropped
|
96 |
-
channel1 = normalize_array_list(
|
97 |
-
[raster_cropped, slope, curvature], CHANNEL_EXAGGERATIONS_LIST, title="channel1_normlist")
|
98 |
-
channel2 = curvature
|
99 |
-
|
100 |
-
return get_rgb_image(channel0, channel1, channel2, invert_image=invert_image)
|
101 |
-
except ValueError as ve_get_rgb_prediction_image:
|
102 |
-
msg = f"ve_get_rgb_prediction_image:{ve_get_rgb_prediction_image}."
|
103 |
-
app_logger.error(msg)
|
104 |
-
raise ve_get_rgb_prediction_image
|
105 |
-
|
106 |
-
|
107 |
-
@session_logger.set_uuid_logging
|
108 |
-
def get_rgb_image(arr_channel0: ndarray, arr_channel1: ndarray, arr_channel2: ndarray,
|
109 |
-
invert_image: bool = True) -> ndarray:
|
110 |
-
"""
|
111 |
-
Return an RGB image from input R,G,B channel arrays
|
112 |
-
|
113 |
-
Args:
|
114 |
-
arr_channel0: channel image 0
|
115 |
-
arr_channel1: channel image 1
|
116 |
-
arr_channel2: channel image 2
|
117 |
-
invert_image: invert the RGB image channel order
|
118 |
-
|
119 |
-
Returns:
|
120 |
-
ndarray: RGB image
|
121 |
-
|
122 |
-
"""
|
123 |
-
try:
|
124 |
-
# RED curvature, GREEN slope, BLUE dem, invert_image=True
|
125 |
-
if len(arr_channel0.shape) != 2:
|
126 |
-
msg = f"arr_size, wrong type:{type(arr_channel0)} or arr_size:{arr_channel0.shape}."
|
127 |
-
app_logger.error(msg)
|
128 |
-
raise ValueError(msg)
|
129 |
-
data_rgb = np.zeros((arr_channel0.shape[0], arr_channel0.shape[1], 3), dtype=np.uint8)
|
130 |
-
app_logger.debug(f"arr_container data_rgb, type:{type(data_rgb)}, arr_shape:{data_rgb.shape}.")
|
131 |
-
data_rgb[:, :, 0] = normalize_array(
|
132 |
-
arr_channel0.astype(float), high=1, norm_type="float", title="RGB:channel0") * 64
|
133 |
-
data_rgb[:, :, 1] = normalize_array(
|
134 |
-
arr_channel1.astype(float), high=1, norm_type="float", title="RGB:channel1") * 128
|
135 |
-
data_rgb[:, :, 2] = normalize_array(
|
136 |
-
arr_channel2.astype(float), high=1, norm_type="float", title="RGB:channel2") * 192
|
137 |
-
if invert_image:
|
138 |
-
app_logger.debug(f"data_rgb:{type(data_rgb)}, {data_rgb.dtype}.")
|
139 |
-
data_rgb = bitwise_not(data_rgb)
|
140 |
-
return data_rgb
|
141 |
-
except ValueError as ve_get_rgb_image:
|
142 |
-
msg = f"ve_get_rgb_image:{ve_get_rgb_image}."
|
143 |
-
app_logger.error(msg)
|
144 |
-
raise ve_get_rgb_image
|
145 |
-
|
146 |
-
|
147 |
-
@session_logger.set_uuid_logging
|
148 |
-
def get_slope_curvature(dem: ndarray, slope_cellsize: int, title: str = "") -> tuple[ndarray, ndarray]:
|
149 |
-
"""
|
150 |
-
Return a tuple of two numpy arrays representing slope and curvature (1st grade derivative and 2nd grade derivative)
|
151 |
-
|
152 |
-
Args:
|
153 |
-
dem: input numpy array
|
154 |
-
slope_cellsize: window size to calculate slope and curvature
|
155 |
-
title: array name
|
156 |
-
|
157 |
-
Returns:
|
158 |
-
tuple of ndarrays: slope image, curvature image
|
159 |
-
|
160 |
-
"""
|
161 |
-
|
162 |
-
app_logger.info(f"dem shape:{dem.shape}, slope_cellsize:{slope_cellsize}.")
|
163 |
-
|
164 |
-
try:
|
165 |
-
dem = dem.astype(float)
|
166 |
-
app_logger.debug("get_slope_curvature:: start")
|
167 |
-
slope = calculate_slope(dem, slope_cellsize)
|
168 |
-
app_logger.debug("get_slope_curvature:: created slope raster")
|
169 |
-
s2c = calculate_slope(slope, slope_cellsize)
|
170 |
-
curvature = normalize_array(s2c, norm_type="float", title=f"SC:curvature_{title}")
|
171 |
-
app_logger.debug("get_slope_curvature:: created curvature raster")
|
172 |
-
|
173 |
-
return slope, curvature
|
174 |
-
except ValueError as ve_get_slope_curvature:
|
175 |
-
msg = f"ve_get_slope_curvature:{ve_get_slope_curvature}."
|
176 |
-
app_logger.error(msg)
|
177 |
-
raise ve_get_slope_curvature
|
178 |
-
|
179 |
-
|
180 |
-
@session_logger.set_uuid_logging
|
181 |
-
def calculate_slope(dem_array: ndarray, cell_size: int, calctype: str = "degree") -> ndarray:
|
182 |
-
"""
|
183 |
-
Return a numpy array representing slope (1st grade derivative)
|
184 |
-
|
185 |
-
Args:
|
186 |
-
dem_array: input numpy array
|
187 |
-
cell_size: window size to calculate slope
|
188 |
-
calctype: calculus type
|
189 |
-
|
190 |
-
Returns:
|
191 |
-
ndarray: slope image
|
192 |
-
|
193 |
-
"""
|
194 |
-
|
195 |
-
try:
|
196 |
-
gradx, grady = np.gradient(dem_array, cell_size)
|
197 |
-
dem_slope = np.sqrt(gradx ** 2 + grady ** 2)
|
198 |
-
if calctype == "degree":
|
199 |
-
dem_slope = np.degrees(np.arctan(dem_slope))
|
200 |
-
app_logger.debug(f"extracted slope with calctype:{calctype}.")
|
201 |
-
return dem_slope
|
202 |
-
except ValueError as ve_calculate_slope:
|
203 |
-
msg = f"ve_calculate_slope:{ve_calculate_slope}."
|
204 |
-
app_logger.error(msg)
|
205 |
-
raise ve_calculate_slope
|
206 |
-
|
207 |
-
|
208 |
-
@session_logger.set_uuid_logging
|
209 |
-
def normalize_array(arr: ndarray, high: int = 255, norm_type: str = "float", invert: bool = False, title: str = "") -> ndarray:
|
210 |
-
"""
|
211 |
-
Return normalized numpy array between 0 and 'high' value. Default normalization type is int
|
212 |
-
|
213 |
-
Args:
|
214 |
-
arr: input numpy array
|
215 |
-
high: max value to use for normalization
|
216 |
-
norm_type: type of normalization: could be 'float' or 'int'
|
217 |
-
invert: bool to choose if invert the normalized numpy array
|
218 |
-
title: array title name
|
219 |
-
|
220 |
-
Returns:
|
221 |
-
ndarray: normalized numpy array
|
222 |
-
|
223 |
-
"""
|
224 |
-
np.seterr("raise")
|
225 |
-
|
226 |
-
h_min_arr = np.nanmin(arr)
|
227 |
-
h_arr_max = np.nanmax(arr)
|
228 |
-
try:
|
229 |
-
h_diff = h_arr_max - h_min_arr
|
230 |
-
app_logger.debug(
|
231 |
-
f"normalize_array:: '{title}',h_min_arr:{h_min_arr},h_arr_max:{h_arr_max},h_diff:{h_diff}, dtype:{arr.dtype}.")
|
232 |
-
except Exception as e_h_diff:
|
233 |
-
app_logger.error(f"e_h_diff:{e_h_diff}.")
|
234 |
-
raise ValueError(e_h_diff)
|
235 |
-
|
236 |
-
if check_empty_array(arr, high) or check_empty_array(arr, h_diff):
|
237 |
-
msg_ve = f"normalize_array::empty array '{title}',h_min_arr:{h_min_arr},h_arr_max:{h_arr_max},h_diff:{h_diff}, dtype:{arr.dtype}."
|
238 |
-
app_logger.error(msg_ve)
|
239 |
-
raise ValueError(msg_ve)
|
240 |
-
try:
|
241 |
-
normalized = high * (arr - h_min_arr) / h_diff
|
242 |
-
normalized = np.nanmax(normalized) - normalized if invert else normalized
|
243 |
-
return normalized.astype(int) if norm_type == "int" else normalized
|
244 |
-
except FloatingPointError as fe:
|
245 |
-
msg = f"normalize_array::{title}:h_arr_max:{h_arr_max},h_min_arr:{h_min_arr},fe:{fe}."
|
246 |
-
app_logger.error(msg)
|
247 |
-
raise ValueError(msg)
|
248 |
-
|
249 |
-
|
250 |
-
@session_logger.set_uuid_logging
|
251 |
-
def normalize_array_list(arr_list: list[ndarray], exaggerations_list: list[float] = None, title: str = "") -> ndarray:
|
252 |
-
"""
|
253 |
-
Return a normalized numpy array from a list of numpy array and an optional list of exaggeration values.
|
254 |
-
|
255 |
-
Args:
|
256 |
-
arr_list: list of array to use for normalization
|
257 |
-
exaggerations_list: list of exaggeration values
|
258 |
-
title: array title name
|
259 |
-
|
260 |
-
Returns:
|
261 |
-
ndarray: normalized numpy array
|
262 |
-
|
263 |
-
"""
|
264 |
-
|
265 |
-
if not arr_list:
|
266 |
-
msg = f"input list can't be empty:{arr_list}."
|
267 |
-
app_logger.error(msg)
|
268 |
-
raise ValueError(msg)
|
269 |
-
if exaggerations_list is None:
|
270 |
-
exaggerations_list = list(np.ones(len(arr_list)))
|
271 |
-
arr_tmp = np.zeros(arr_list[0].shape)
|
272 |
-
for a, exaggeration in zip(arr_list, exaggerations_list):
|
273 |
-
app_logger.debug(f"normalize_array_list::exaggeration:{exaggeration}.")
|
274 |
-
arr_tmp += normalize_array(a, norm_type="float", title=f"ARRLIST:{title}.") * exaggeration
|
275 |
-
return arr_tmp / len(arr_list)
|
276 |
-
|
277 |
-
|
278 |
-
@session_logger.set_uuid_logging
|
279 |
-
def check_empty_array(arr: ndarray, val: float) -> bool:
|
280 |
-
"""
|
281 |
-
Return True if the input numpy array is empy. Check if
|
282 |
-
- all values are all the same value (0, 1 or given 'val' input float value)
|
283 |
-
- all values that are not NaN are a given 'val' float value
|
284 |
-
|
285 |
-
Args:
|
286 |
-
arr: input numpy array
|
287 |
-
val: value to use for check if array is empty
|
288 |
-
|
289 |
-
Returns:
|
290 |
-
bool: True if the input numpy array is empty, False otherwise
|
291 |
-
|
292 |
-
"""
|
293 |
-
|
294 |
-
arr_check5_tmp = np.copy(arr)
|
295 |
-
arr_size = arr.shape[0]
|
296 |
-
arr_check3 = np.ones((arr_size, arr_size))
|
297 |
-
check1 = np.array_equal(arr, arr_check3)
|
298 |
-
check2 = np.array_equal(arr, np.zeros((arr_size, arr_size)))
|
299 |
-
arr_check3 *= val
|
300 |
-
check3 = np.array_equal(arr, arr_check3)
|
301 |
-
arr[np.isnan(arr)] = 0
|
302 |
-
check4 = np.array_equal(arr, np.zeros((arr_size, arr_size)))
|
303 |
-
arr_check5 = np.ones((arr_size, arr_size)) * val
|
304 |
-
arr_check5_tmp[np.isnan(arr_check5_tmp)] = val
|
305 |
-
check5 = np.array_equal(arr_check5_tmp, arr_check5)
|
306 |
-
app_logger.debug(f"array checks:{check1}, {check2}, {check3}, {check4}, {check5}.")
|
307 |
-
return check1 or check2 or check3 or check4 or check5
|
308 |
-
|
309 |
-
|
310 |
-
@session_logger.set_uuid_logging
|
311 |
-
def write_raster_png(arr, transform, prefix: str, suffix: str, folder_output_path="/tmp"):
|
312 |
-
from pathlib import Path
|
313 |
-
from rasterio.plot import reshape_as_raster
|
314 |
-
|
315 |
-
output_filename = Path(folder_output_path) / f"{prefix}_{suffix}.png"
|
316 |
-
|
317 |
-
with rasterio_open(
|
318 |
-
output_filename, 'w', driver='PNG',
|
319 |
-
height=arr.shape[0],
|
320 |
-
width=arr.shape[1],
|
321 |
-
count=3,
|
322 |
-
dtype=str(arr.dtype),
|
323 |
-
crs=OUTPUT_CRS_STRING,
|
324 |
-
transform=transform) as dst:
|
325 |
-
dst.write(reshape_as_raster(arr))
|
326 |
-
app_logger.info(f"written:{output_filename} as PNG, use {OUTPUT_CRS_STRING} as CRS.")
|
327 |
-
|
328 |
-
|
329 |
-
@session_logger.set_uuid_logging
|
330 |
-
def write_raster_tiff(arr, transform, prefix: str, suffix: str, folder_output_path="/tmp"):
|
331 |
-
from pathlib import Path
|
332 |
-
output_filename = Path(folder_output_path) / f"{prefix}_{suffix}.tiff"
|
333 |
-
|
334 |
-
with rasterio_open(
|
335 |
-
output_filename, 'w', driver='GTiff',
|
336 |
-
height=arr.shape[0],
|
337 |
-
width=arr.shape[1],
|
338 |
-
count=1,
|
339 |
-
dtype=str(arr.dtype),
|
340 |
-
crs=OUTPUT_CRS_STRING,
|
341 |
-
transform=transform) as dst:
|
342 |
-
dst.write(arr, 1)
|
343 |
-
app_logger.info(f"written:{output_filename} as TIFF, use {OUTPUT_CRS_STRING} as CRS.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/io_package/tms2geotiff.py
DELETED
@@ -1,187 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from lisa_on_cuda.utils import session_logger
|
4 |
-
from numpy import ndarray
|
5 |
-
from samgis_core.utilities.type_hints import TupleFloat
|
6 |
-
from xyzservices import TileProvider
|
7 |
-
|
8 |
-
from samgis_lisa_on_zero import app_logger
|
9 |
-
from samgis_lisa_on_zero.utilities.constants import (OUTPUT_CRS_STRING, DRIVER_RASTERIO_GTIFF, N_MAX_RETRIES,
|
10 |
-
N_CONNECTION, N_WAIT,
|
11 |
-
ZOOM_AUTO, BOOL_USE_CACHE)
|
12 |
-
from samgis_lisa_on_zero.utilities.type_hints import tuple_ndarray_transform
|
13 |
-
|
14 |
-
|
15 |
-
bool_use_cache = int(os.getenv("BOOL_USE_CACHE", BOOL_USE_CACHE))
|
16 |
-
n_connection = int(os.getenv("N_CONNECTION", N_CONNECTION))
|
17 |
-
n_max_retries = int(os.getenv("N_MAX_RETRIES", N_MAX_RETRIES))
|
18 |
-
n_wait = int(os.getenv("N_WAIT", N_WAIT))
|
19 |
-
zoom_auto_string = os.getenv("ZOOM_AUTO", ZOOM_AUTO)
|
20 |
-
|
21 |
-
|
22 |
-
@session_logger.set_uuid_logging
|
23 |
-
def download_extent(w: float, s: float, e: float, n: float, zoom: int or str = zoom_auto_string,
|
24 |
-
source: TileProvider or str = None,
|
25 |
-
wait: int = n_wait, max_retries: int = n_max_retries, n_connections: int = n_connection,
|
26 |
-
use_cache: bool = bool_use_cache) -> tuple_ndarray_transform:
|
27 |
-
"""
|
28 |
-
Download, merge and crop a list of tiles into a single geo-referenced image or a raster geodata
|
29 |
-
|
30 |
-
Args:
|
31 |
-
w: West edge
|
32 |
-
s: South edge
|
33 |
-
e: East edge
|
34 |
-
n: North edge
|
35 |
-
zoom: Level of detail
|
36 |
-
source: The tile source: web tile provider or path to local file. The web tile provider can be in the form of
|
37 |
-
a :class:`xyzservices.TileProvider` object or a URL. The placeholders for the XYZ in the URL need to be
|
38 |
-
`{x}`, `{y}`, `{z}`, respectively. For local file paths, the file is read with `rasterio` and all bands are
|
39 |
-
loaded into the basemap. IMPORTANT: tiles are assumed to be in the Spherical Mercator projection
|
40 |
-
(EPSG:3857), unless the `crs` keyword is specified.
|
41 |
-
wait: if the tile API is rate-limited, the number of seconds to wait
|
42 |
-
between a failed request and the next try
|
43 |
-
max_retries: total number of rejected requests allowed before contextily will stop trying to fetch more tiles
|
44 |
-
from a rate-limited API.
|
45 |
-
n_connections: Number of connections for downloading tiles in parallel. Be careful not to overload the tile
|
46 |
-
server and to check the tile provider's terms of use before increasing this value. E.g., OpenStreetMap has
|
47 |
-
a max. value of 2 (https://operations.osmfoundation.org/policies/tiles/). If allowed to download in
|
48 |
-
parallel, a recommended value for n_connections is 16, and should never be larger than 64.
|
49 |
-
use_cache: If False, caching of the downloaded tiles will be disabled. This can be useful in resource
|
50 |
-
constrained environments, especially when using n_connections > 1, or when a tile provider's terms of use
|
51 |
-
don't allow caching.
|
52 |
-
|
53 |
-
Returns:
|
54 |
-
parsed request input
|
55 |
-
"""
|
56 |
-
try:
|
57 |
-
from samgis_lisa_on_zero import contextily_tile
|
58 |
-
from samgis_lisa_on_zero.io_package.coordinates_pixel_conversion import _from4326_to3857
|
59 |
-
|
60 |
-
app_logger.info(f"connection number:{n_connections}, type:{type(n_connections)}.")
|
61 |
-
app_logger.info(f"zoom:{zoom}, type:{type(zoom)}.")
|
62 |
-
app_logger.debug(f"download raster from source:{source} with bounding box w:{w}, s:{s}, e:{e}, n:{n}.")
|
63 |
-
app_logger.debug(f"types w:{type(w)}, s:{type(s)}, e:{type(e)}, n:{type(n)}.")
|
64 |
-
downloaded_raster, bbox_raster = contextily_tile.bounds2img(
|
65 |
-
w, s, e, n, zoom=zoom, source=source, ll=True, wait=wait, max_retries=max_retries,
|
66 |
-
n_connections=n_connections, use_cache=use_cache)
|
67 |
-
xp0, yp0 = _from4326_to3857(n, e)
|
68 |
-
xp1, yp1 = _from4326_to3857(s, w)
|
69 |
-
cropped_image_ndarray, cropped_transform = crop_raster(yp1, xp1, yp0, xp0, downloaded_raster, bbox_raster)
|
70 |
-
return cropped_image_ndarray, cropped_transform
|
71 |
-
except Exception as e_download_extent:
|
72 |
-
app_logger.exception(f"e_download_extent:{e_download_extent}.", exc_info=True)
|
73 |
-
raise e_download_extent
|
74 |
-
|
75 |
-
|
76 |
-
@session_logger.set_uuid_logging
|
77 |
-
def crop_raster(w: float, s: float, e: float, n: float, raster: ndarray, raster_bbox: TupleFloat,
|
78 |
-
crs: str = OUTPUT_CRS_STRING, driver: str = DRIVER_RASTERIO_GTIFF) -> tuple_ndarray_transform:
|
79 |
-
"""
|
80 |
-
Crop a raster using given bounding box (w, s, e, n) values
|
81 |
-
|
82 |
-
Args:
|
83 |
-
w: cropping west edge
|
84 |
-
s: cropping south edge
|
85 |
-
e: cropping east edge
|
86 |
-
n: cropping north edge
|
87 |
-
raster: raster image to crop
|
88 |
-
raster_bbox: bounding box of raster to crop
|
89 |
-
crs: The coordinate reference system. Required in 'w' or 'w+' modes, it is ignored in 'r' or 'r+' modes.
|
90 |
-
driver: A short format driver name (e.g. "GTiff" or "JPEG") or a list of such names (see GDAL docs at
|
91 |
-
https://gdal.org/drivers/raster/index.html ). In 'w' or 'w+' modes a single name is required. In 'r' or 'r+'
|
92 |
-
modes the driver can usually be omitted. Registered drivers will be tried sequentially until a match is
|
93 |
-
found. When multiple drivers are available for a format such as JPEG2000, one of them can be selected by
|
94 |
-
using this keyword argument.
|
95 |
-
|
96 |
-
Returns:
|
97 |
-
cropped raster with its Affine transform
|
98 |
-
"""
|
99 |
-
try:
|
100 |
-
from rasterio.io import MemoryFile
|
101 |
-
from rasterio.mask import mask as rio_mask
|
102 |
-
from shapely.geometry import Polygon
|
103 |
-
from geopandas import GeoSeries
|
104 |
-
|
105 |
-
app_logger.debug(f"raster: type {type(raster)}, raster_ext:{type(raster_bbox)}, {raster_bbox}.")
|
106 |
-
img_to_save, transform = get_transform_raster(raster, raster_bbox)
|
107 |
-
img_height, img_width, number_bands = img_to_save.shape
|
108 |
-
# https://rasterio.readthedocs.io/en/latest/topics/memory-files.html
|
109 |
-
with MemoryFile() as rio_mem_file:
|
110 |
-
app_logger.debug("writing raster in-memory to crop it with rasterio.mask.mask()")
|
111 |
-
with rio_mem_file.open(
|
112 |
-
driver=driver,
|
113 |
-
height=img_height,
|
114 |
-
width=img_width,
|
115 |
-
count=number_bands,
|
116 |
-
dtype=str(img_to_save.dtype.name),
|
117 |
-
crs=crs,
|
118 |
-
transform=transform,
|
119 |
-
) as src_raster_rw:
|
120 |
-
for band in range(number_bands):
|
121 |
-
src_raster_rw.write(img_to_save[:, :, band], band + 1)
|
122 |
-
app_logger.debug("cropping raster in-memory with rasterio.mask.mask()")
|
123 |
-
with rio_mem_file.open() as src_raster_ro:
|
124 |
-
shapes_crop_polygon = Polygon([(n, e), (s, e), (s, w), (n, w), (n, e)])
|
125 |
-
shapes_crop = GeoSeries([shapes_crop_polygon])
|
126 |
-
app_logger.debug(f"cropping with polygon::{shapes_crop_polygon}.")
|
127 |
-
cropped_image, cropped_transform = rio_mask(src_raster_ro, shapes=shapes_crop, crop=True)
|
128 |
-
cropped_image_ndarray = reshape_as_image(cropped_image)
|
129 |
-
app_logger.info(f"cropped image::{cropped_image_ndarray.shape}.")
|
130 |
-
return cropped_image_ndarray, cropped_transform
|
131 |
-
except Exception as e_crop_raster:
|
132 |
-
try:
|
133 |
-
app_logger.error(f"raster type:{type(raster)}.")
|
134 |
-
app_logger.error(f"raster shape:{raster.shape}, dtype:{raster.dtype}.")
|
135 |
-
except Exception as e_shape_dtype:
|
136 |
-
app_logger.exception(f"raster shape or dtype not found:{e_shape_dtype}.", exc_info=True)
|
137 |
-
app_logger.exception(f"e_crop_raster:{e_crop_raster}.", exc_info=True)
|
138 |
-
raise e_crop_raster
|
139 |
-
|
140 |
-
|
141 |
-
@session_logger.set_uuid_logging
|
142 |
-
def get_transform_raster(raster: ndarray, raster_bbox: TupleFloat) -> tuple_ndarray_transform:
|
143 |
-
"""
|
144 |
-
Convert the input raster image to RGB and extract the Affine
|
145 |
-
|
146 |
-
Args:
|
147 |
-
raster: raster image to geo-reference
|
148 |
-
raster_bbox: bounding box of raster to crop
|
149 |
-
|
150 |
-
Returns:
|
151 |
-
rgb raster image and its Affine transform
|
152 |
-
"""
|
153 |
-
try:
|
154 |
-
from rasterio.transform import from_origin
|
155 |
-
from numpy import array as np_array, linspace as np_linspace, uint8 as np_uint8
|
156 |
-
from PIL.Image import fromarray
|
157 |
-
|
158 |
-
app_logger.debug(f"raster: type {type(raster)}, raster_ext:{type(raster_bbox)}, {raster_bbox}.")
|
159 |
-
rgb = fromarray(np_uint8(raster)).convert('RGB')
|
160 |
-
np_rgb = np_array(rgb)
|
161 |
-
img_height, img_width, _ = np_rgb.shape
|
162 |
-
|
163 |
-
min_x, max_x, min_y, max_y = raster_bbox
|
164 |
-
app_logger.debug(f"raster rgb shape:{np_rgb.shape}, raster rgb bbox {raster_bbox}.")
|
165 |
-
x = np_linspace(min_x, max_x, img_width)
|
166 |
-
y = np_linspace(min_y, max_y, img_height)
|
167 |
-
res_x = (x[-1] - x[0]) / img_width
|
168 |
-
res_y = (y[-1] - y[0]) / img_height
|
169 |
-
transform = from_origin(x[0] - res_x / 2, y[-1] + res_y / 2, res_x, res_y)
|
170 |
-
return np_rgb, transform
|
171 |
-
except Exception as e_get_transform_raster:
|
172 |
-
app_logger.error(f"arguments raster: {type(raster)}, {raster}.")
|
173 |
-
app_logger.error(f"arguments raster_bbox: {type(raster_bbox)}, {raster_bbox}.")
|
174 |
-
app_logger.exception(f"e_get_transform_raster:{e_get_transform_raster}.", exc_info=True)
|
175 |
-
raise e_get_transform_raster
|
176 |
-
|
177 |
-
|
178 |
-
@session_logger.set_uuid_logging
|
179 |
-
def reshape_as_image(arr):
|
180 |
-
try:
|
181 |
-
from numpy import swapaxes
|
182 |
-
|
183 |
-
return swapaxes(swapaxes(arr, 0, 2), 0, 1)
|
184 |
-
except Exception as e_reshape_as_image:
|
185 |
-
app_logger.error(f"arguments: {type(arr)}, {arr}.")
|
186 |
-
app_logger.exception(f"e_reshape_as_image:{e_reshape_as_image}.", exc_info=True)
|
187 |
-
raise e_reshape_as_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/io_package/wrappers_helpers.py
DELETED
@@ -1,195 +0,0 @@
|
|
1 |
-
"""lambda helper functions"""
|
2 |
-
from typing import Dict
|
3 |
-
|
4 |
-
from lisa_on_cuda.utils import session_logger
|
5 |
-
from lisa_on_cuda.utils.app_helpers import get_cleaned_input
|
6 |
-
from xyzservices import providers, TileProvider
|
7 |
-
|
8 |
-
from samgis_lisa_on_zero import app_logger
|
9 |
-
from samgis_lisa_on_zero.io_package.coordinates_pixel_conversion import get_latlng_to_pixel_coordinates
|
10 |
-
from samgis_lisa_on_zero.utilities.constants import COMPLETE_URL_TILES_MAPBOX, COMPLETE_URL_TILES_NEXTZEN
|
11 |
-
from samgis_lisa_on_zero.utilities.type_hints import (
|
12 |
-
ApiRequestBody, XYZTerrainProvidersNames, XYZDefaultProvidersNames, StringPromptApiRequestBody)
|
13 |
-
|
14 |
-
|
15 |
-
@session_logger.set_uuid_logging
|
16 |
-
def get_parsed_bbox_points_with_string_prompt(request_input: StringPromptApiRequestBody) -> Dict:
|
17 |
-
"""
|
18 |
-
Parse the raw input request into bbox, prompt string and zoom
|
19 |
-
|
20 |
-
Args:
|
21 |
-
request_input: input dict
|
22 |
-
|
23 |
-
Returns:
|
24 |
-
dict with bounding box, prompt string and zoom
|
25 |
-
"""
|
26 |
-
|
27 |
-
app_logger.info(f"try to parsing input request: {type(request_input)}, {request_input}...")
|
28 |
-
if isinstance(request_input, str):
|
29 |
-
app_logger.info(f"string/json input, parsing it to {type(StringPromptApiRequestBody)}...")
|
30 |
-
request_input = StringPromptApiRequestBody.model_validate_json(request_input)
|
31 |
-
app_logger.info(f"parsed input, now of type {type(request_input)}...")
|
32 |
-
|
33 |
-
bbox = request_input.bbox
|
34 |
-
app_logger.debug(f"request bbox: {type(bbox)}, value:{bbox}.")
|
35 |
-
ne = bbox.ne
|
36 |
-
sw = bbox.sw
|
37 |
-
app_logger.debug(f"request ne: {type(ne)}, value:{ne}.")
|
38 |
-
app_logger.debug(f"request sw: {type(sw)}, value:{sw}.")
|
39 |
-
ne_latlng = [float(ne.lat), float(ne.lng)]
|
40 |
-
sw_latlng = [float(sw.lat), float(sw.lng)]
|
41 |
-
new_zoom = int(request_input.zoom)
|
42 |
-
cleaned_prompt = get_cleaned_input(request_input.string_prompt)
|
43 |
-
|
44 |
-
app_logger.debug(f"bbox => {bbox}.")
|
45 |
-
app_logger.debug(f'request_input-prompt cleaned => {cleaned_prompt}.')
|
46 |
-
|
47 |
-
app_logger.info("unpacking elaborated request...")
|
48 |
-
return {
|
49 |
-
"bbox": [ne_latlng, sw_latlng],
|
50 |
-
"prompt": cleaned_prompt,
|
51 |
-
"zoom": new_zoom,
|
52 |
-
"source": get_url_tile(request_input.source_type),
|
53 |
-
"source_name": get_source_name(request_input.source_type)
|
54 |
-
}
|
55 |
-
|
56 |
-
|
57 |
-
@session_logger.set_uuid_logging
|
58 |
-
def get_parsed_bbox_points_with_dictlist_prompt(request_input: ApiRequestBody) -> Dict:
|
59 |
-
"""
|
60 |
-
Parse the raw input request into bbox, prompt and zoom
|
61 |
-
|
62 |
-
Args:
|
63 |
-
request_input: input dict
|
64 |
-
|
65 |
-
Returns:
|
66 |
-
dict with bounding box, prompt and zoom
|
67 |
-
"""
|
68 |
-
|
69 |
-
app_logger.info(f"try to parsing input request {request_input}...")
|
70 |
-
|
71 |
-
bbox = request_input.bbox
|
72 |
-
app_logger.debug(f"request bbox: {type(bbox)}, value:{bbox}.")
|
73 |
-
ne = bbox.ne
|
74 |
-
sw = bbox.sw
|
75 |
-
app_logger.debug(f"request ne: {type(ne)}, value:{ne}.")
|
76 |
-
app_logger.debug(f"request sw: {type(sw)}, value:{sw}.")
|
77 |
-
ne_latlng = [float(ne.lat), float(ne.lng)]
|
78 |
-
sw_latlng = [float(sw.lat), float(sw.lng)]
|
79 |
-
new_zoom = int(request_input.zoom)
|
80 |
-
new_prompt_list = _get_parsed_prompt_list(ne, sw, new_zoom, request_input.prompt)
|
81 |
-
|
82 |
-
app_logger.debug(f"bbox => {bbox}.")
|
83 |
-
app_logger.debug(f'request_input-prompt updated => {new_prompt_list}.')
|
84 |
-
|
85 |
-
app_logger.info("unpacking elaborated request...")
|
86 |
-
return {
|
87 |
-
"bbox": [ne_latlng, sw_latlng],
|
88 |
-
"prompt": new_prompt_list,
|
89 |
-
"zoom": new_zoom,
|
90 |
-
"source": get_url_tile(request_input.source_type),
|
91 |
-
"source_name": get_source_name(request_input.source_type)
|
92 |
-
}
|
93 |
-
|
94 |
-
|
95 |
-
@session_logger.set_uuid_logging
|
96 |
-
def _get_parsed_prompt_list(bbox_ne, bbox_sw, zoom, prompt_list):
|
97 |
-
new_prompt_list = []
|
98 |
-
for prompt in prompt_list:
|
99 |
-
app_logger.debug(f"current prompt: {type(prompt)}, value:{prompt}.")
|
100 |
-
new_prompt = {"type": prompt.type.value}
|
101 |
-
if prompt.type == "point":
|
102 |
-
new_prompt_data = _get_new_prompt_data_point(bbox_ne, bbox_sw, prompt, zoom)
|
103 |
-
new_prompt["label"] = prompt.label.value
|
104 |
-
elif prompt.type == "rectangle":
|
105 |
-
new_prompt_data = _get_new_prompt_data_rectangle(bbox_ne, bbox_sw, prompt, zoom)
|
106 |
-
else:
|
107 |
-
msg = "Valid prompt type: 'point' or 'rectangle', not '{}'. Check ApiRequestBody parsing/validation."
|
108 |
-
raise TypeError(msg.format(prompt.type))
|
109 |
-
app_logger.debug(f"new_prompt_data: {type(new_prompt_data)}, value:{new_prompt_data}.")
|
110 |
-
new_prompt["data"] = new_prompt_data
|
111 |
-
new_prompt_list.append(new_prompt)
|
112 |
-
return new_prompt_list
|
113 |
-
|
114 |
-
|
115 |
-
@session_logger.set_uuid_logging
|
116 |
-
def _get_new_prompt_data_point(bbox_ne, bbox_sw, prompt, zoom):
|
117 |
-
current_point = get_latlng_to_pixel_coordinates(bbox_ne, bbox_sw, prompt.data, zoom, prompt.type)
|
118 |
-
app_logger.debug(f"current prompt: {type(current_point)}, value:{current_point}, label: {prompt.label}.")
|
119 |
-
return [current_point['x'], current_point['y']]
|
120 |
-
|
121 |
-
|
122 |
-
@session_logger.set_uuid_logging
|
123 |
-
def _get_new_prompt_data_rectangle(bbox_ne, bbox_sw, prompt, zoom):
|
124 |
-
current_point_ne = get_latlng_to_pixel_coordinates(bbox_ne, bbox_sw, prompt.data.ne, zoom, prompt.type)
|
125 |
-
app_logger.debug(
|
126 |
-
f"rectangle:: current_point_ne prompt: {type(current_point_ne)}, value:{current_point_ne}.")
|
127 |
-
current_point_sw = get_latlng_to_pixel_coordinates(bbox_ne, bbox_sw, prompt.data.sw, zoom, prompt.type)
|
128 |
-
app_logger.debug(
|
129 |
-
f"rectangle:: current_point_sw prompt: {type(current_point_sw)}, value:{current_point_sw}.")
|
130 |
-
# correct order for rectangle prompt
|
131 |
-
return [
|
132 |
-
current_point_sw["x"],
|
133 |
-
current_point_ne["y"],
|
134 |
-
current_point_ne["x"],
|
135 |
-
current_point_sw["y"]
|
136 |
-
]
|
137 |
-
|
138 |
-
|
139 |
-
mapbox_terrain_rgb = TileProvider(
|
140 |
-
name=XYZTerrainProvidersNames.MAPBOX_TERRAIN_TILES_NAME,
|
141 |
-
url=COMPLETE_URL_TILES_MAPBOX,
|
142 |
-
attribution=""
|
143 |
-
)
|
144 |
-
nextzen_terrain_rgb = TileProvider(
|
145 |
-
name=XYZTerrainProvidersNames.NEXTZEN_TERRAIN_TILES_NAME,
|
146 |
-
url=COMPLETE_URL_TILES_NEXTZEN,
|
147 |
-
attribution=""
|
148 |
-
)
|
149 |
-
|
150 |
-
|
151 |
-
@session_logger.set_uuid_logging
|
152 |
-
def get_url_tile(source_type: str):
|
153 |
-
try:
|
154 |
-
match source_type.lower():
|
155 |
-
case XYZDefaultProvidersNames.DEFAULT_TILES_NAME_SHORT:
|
156 |
-
return providers.query_name(XYZDefaultProvidersNames.DEFAULT_TILES_NAME)
|
157 |
-
case XYZTerrainProvidersNames.MAPBOX_TERRAIN_TILES_NAME:
|
158 |
-
return mapbox_terrain_rgb
|
159 |
-
case XYZTerrainProvidersNames.NEXTZEN_TERRAIN_TILES_NAME:
|
160 |
-
app_logger.info("nextzen_terrain_rgb:", nextzen_terrain_rgb)
|
161 |
-
return nextzen_terrain_rgb
|
162 |
-
case _:
|
163 |
-
return providers.query_name(source_type)
|
164 |
-
except ValueError as ve:
|
165 |
-
from pydantic_core import ValidationError
|
166 |
-
|
167 |
-
app_logger.error("ve:", str(ve))
|
168 |
-
raise ValidationError(ve)
|
169 |
-
|
170 |
-
|
171 |
-
def check_source_type_is_terrain(source: str | TileProvider):
|
172 |
-
return isinstance(source, TileProvider) and source.name in list(XYZTerrainProvidersNames)
|
173 |
-
|
174 |
-
|
175 |
-
@session_logger.set_uuid_logging
|
176 |
-
def get_source_name(source: str | TileProvider) -> str | bool:
|
177 |
-
try:
|
178 |
-
match source.lower():
|
179 |
-
case XYZDefaultProvidersNames.DEFAULT_TILES_NAME_SHORT:
|
180 |
-
source_output = providers.query_name(XYZDefaultProvidersNames.DEFAULT_TILES_NAME)
|
181 |
-
case _:
|
182 |
-
source_output = providers.query_name(source)
|
183 |
-
if isinstance(source_output, str):
|
184 |
-
return source_output
|
185 |
-
try:
|
186 |
-
source_dict = dict(source_output)
|
187 |
-
app_logger.info(f"source_dict:{type(source_dict)}, {'name' in source_dict}, source_dict:{source_dict}.")
|
188 |
-
return source_dict["name"]
|
189 |
-
except KeyError as ke:
|
190 |
-
app_logger.error(f"ke:{ke}.")
|
191 |
-
except ValueError as ve:
|
192 |
-
app_logger.info(f"source name::{source}, ve:{ve}.")
|
193 |
-
app_logger.info(f"source name::{source}.")
|
194 |
-
|
195 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/prediction_api/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
"""functions useful to handle machine learning models"""
|
|
|
|
samgis_lisa_on_zero/prediction_api/global_models.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
models_dict = {
|
2 |
-
"fastsam": {"instance": None},
|
3 |
-
"lisa": {"inference": None}
|
4 |
-
}
|
5 |
-
embedding_dict = {}
|
6 |
-
inference_fn_dict = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/prediction_api/lisa.py
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
from datetime import datetime
|
2 |
-
|
3 |
-
from lisa_on_cuda.utils import session_logger
|
4 |
-
from samgis_core.utilities.type_hints import LlistFloat, DictStrInt
|
5 |
-
from spaces import GPU as SPACES_GPU
|
6 |
-
|
7 |
-
from samgis_lisa_on_zero import app_logger
|
8 |
-
from samgis_lisa_on_zero.io_package.geo_helpers import get_vectorized_raster_as_geojson
|
9 |
-
from samgis_lisa_on_zero.io_package.raster_helpers import write_raster_png, write_raster_tiff
|
10 |
-
from samgis_lisa_on_zero.io_package.tms2geotiff import download_extent
|
11 |
-
from samgis_lisa_on_zero.utilities.constants import DEFAULT_URL_TILES, LISA_INFERENCE_FN
|
12 |
-
|
13 |
-
msg_write_tmp_on_disk = "found option to write images and geojson output..."
|
14 |
-
|
15 |
-
|
16 |
-
@session_logger.set_uuid_logging
|
17 |
-
def load_model_and_inference_fn(inference_function_name_key: str):
|
18 |
-
from lisa_on_cuda.utils import app_helpers
|
19 |
-
from samgis_lisa_on_zero.prediction_api.global_models import models_dict
|
20 |
-
|
21 |
-
if models_dict[inference_function_name_key]["inference"] is None:
|
22 |
-
msg = f"missing inference function {inference_function_name_key}, "
|
23 |
-
msg += f"instantiating it now using inference_decorator {SPACES_GPU}!"
|
24 |
-
app_logger.info(msg)
|
25 |
-
parsed_args = app_helpers.parse_args([])
|
26 |
-
inference_fn = app_helpers.get_inference_model_by_args(
|
27 |
-
parsed_args,
|
28 |
-
internal_logger0=app_logger,
|
29 |
-
inference_decorator=SPACES_GPU
|
30 |
-
)
|
31 |
-
models_dict[inference_function_name_key]["inference"] = inference_fn
|
32 |
-
|
33 |
-
|
34 |
-
@session_logger.set_uuid_logging
|
35 |
-
def lisa_predict(
|
36 |
-
bbox: LlistFloat,
|
37 |
-
prompt: str,
|
38 |
-
zoom: float,
|
39 |
-
inference_function_name_key: str = LISA_INFERENCE_FN,
|
40 |
-
source: str = DEFAULT_URL_TILES,
|
41 |
-
source_name: str = None
|
42 |
-
) -> DictStrInt:
|
43 |
-
"""
|
44 |
-
Return predictions as a geojson from a geo-referenced image using the given input prompt.
|
45 |
-
|
46 |
-
1. if necessary instantiate a segment anything machine learning instance model
|
47 |
-
2. download a geo-referenced raster image delimited by the coordinates bounding box (bbox)
|
48 |
-
3. get a prediction image from the segment anything instance model using the input prompt
|
49 |
-
4. get a geo-referenced geojson from the prediction image
|
50 |
-
|
51 |
-
Args:
|
52 |
-
bbox: coordinates bounding box
|
53 |
-
prompt: machine learning input prompt
|
54 |
-
zoom: Level of detail
|
55 |
-
inference_function_name_key: machine learning model name
|
56 |
-
source: xyz
|
57 |
-
source_name: name of tile provider
|
58 |
-
|
59 |
-
Returns:
|
60 |
-
Affine transform
|
61 |
-
"""
|
62 |
-
from os import getenv
|
63 |
-
from samgis_lisa_on_zero.prediction_api.global_models import models_dict
|
64 |
-
|
65 |
-
if source_name is None:
|
66 |
-
source_name = str(source)
|
67 |
-
|
68 |
-
app_logger.info("start lisa inference...")
|
69 |
-
app_logger.debug(f"type(source):{type(source)}, source:{source},")
|
70 |
-
app_logger.debug(f"type(source_name):{type(source_name)}, source_name:{source_name}.")
|
71 |
-
|
72 |
-
load_model_and_inference_fn(inference_function_name_key)
|
73 |
-
app_logger.debug(f"using a '{inference_function_name_key}' instance model...")
|
74 |
-
inference_fn = models_dict[inference_function_name_key]["inference"]
|
75 |
-
app_logger.info(f"loaded inference function '{inference_fn.__name__}'.")
|
76 |
-
|
77 |
-
pt0, pt1 = bbox
|
78 |
-
app_logger.info(f"tile_source: {source}: downloading geo-referenced raster with bbox {bbox}, zoom {zoom}.")
|
79 |
-
img, transform = download_extent(w=pt1[1], s=pt1[0], e=pt0[1], n=pt0[0], zoom=zoom, source=source)
|
80 |
-
app_logger.info(
|
81 |
-
f"img type {type(img)} with shape/size:{img.size}, transform type: {type(transform)}, transform:{transform}.")
|
82 |
-
folder_write_tmp_on_disk = getenv("WRITE_TMP_ON_DISK", "")
|
83 |
-
prefix = f"w{pt1[1]},s{pt1[0]},e{pt0[1]},n{pt0[0]}_"
|
84 |
-
if bool(folder_write_tmp_on_disk):
|
85 |
-
now = datetime.now().strftime('%Y%m%d_%H%M%S')
|
86 |
-
app_logger.info(msg_write_tmp_on_disk + f"with coords {prefix}, shape:{img.shape}, {len(img.shape)}.")
|
87 |
-
if img.shape and len(img.shape) == 2:
|
88 |
-
write_raster_tiff(img, transform, f"{source_name}_{prefix}_{now}_", f"raw_tiff", folder_write_tmp_on_disk)
|
89 |
-
if img.shape and len(img.shape) == 3 and img.shape[2] == 3:
|
90 |
-
write_raster_png(img, transform, f"{source_name}_{prefix}_{now}_", f"raw_img", folder_write_tmp_on_disk)
|
91 |
-
else:
|
92 |
-
app_logger.info("keep all temp data in memory...")
|
93 |
-
|
94 |
-
app_logger.info(f"lisa_zero, source_name:{source_name}, source_name type:{type(source_name)}.")
|
95 |
-
app_logger.info(f"lisa_zero, prompt type:{type(prompt)}.")
|
96 |
-
app_logger.info(f"lisa_zero, prompt:{prompt}.")
|
97 |
-
prompt_str = str(prompt)
|
98 |
-
app_logger.info(f"lisa_zero, img type:{type(img)}.")
|
99 |
-
embedding_key = f"{source_name}_z{zoom}_{prefix}"
|
100 |
-
_, mask, output_string = inference_fn(input_str=prompt_str, input_image=img, embedding_key=embedding_key)
|
101 |
-
app_logger.info(f"lisa_zero, output_string type:{type(output_string)}.")
|
102 |
-
app_logger.info(f"lisa_zero, mask_output type:{type(mask)}.")
|
103 |
-
app_logger.info(f"created output_string '{output_string}', preparing conversion to geojson...")
|
104 |
-
return {
|
105 |
-
"output_string": output_string,
|
106 |
-
**get_vectorized_raster_as_geojson(mask, transform)
|
107 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/prediction_api/predictors.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
"""functions using machine learning instance model(s)"""
|
2 |
-
|
3 |
-
from lisa_on_cuda.utils import session_logger
|
4 |
-
from samgis_core.prediction_api import sam_onnx2, sam_onnx_inference
|
5 |
-
from samgis_core.utilities.constants import MODEL_ENCODER_NAME, MODEL_DECODER_NAME, DEFAULT_INPUT_SHAPE
|
6 |
-
from samgis_core.utilities.type_hints import LlistFloat, DictStrInt, ListDict
|
7 |
-
|
8 |
-
from samgis_lisa_on_zero import MODEL_FOLDER
|
9 |
-
from samgis_lisa_on_zero import app_logger
|
10 |
-
from samgis_lisa_on_zero.io_package.geo_helpers import get_vectorized_raster_as_geojson
|
11 |
-
from samgis_lisa_on_zero.io_package.raster_helpers import get_raster_terrain_rgb_like, get_rgb_prediction_image
|
12 |
-
from samgis_lisa_on_zero.io_package.tms2geotiff import download_extent
|
13 |
-
from samgis_lisa_on_zero.io_package.wrappers_helpers import check_source_type_is_terrain
|
14 |
-
from samgis_lisa_on_zero.prediction_api.global_models import models_dict, embedding_dict
|
15 |
-
from samgis_lisa_on_zero.utilities.constants import DEFAULT_URL_TILES, SLOPE_CELLSIZE
|
16 |
-
|
17 |
-
|
18 |
-
@session_logger.set_uuid_logging
|
19 |
-
def samexporter_predict(
|
20 |
-
bbox: LlistFloat,
|
21 |
-
prompt: ListDict,
|
22 |
-
zoom: float,
|
23 |
-
model_name_key: str = "fastsam",
|
24 |
-
source: str = DEFAULT_URL_TILES,
|
25 |
-
source_name: str = None
|
26 |
-
) -> DictStrInt:
|
27 |
-
"""
|
28 |
-
Return predictions as a geojson from a geo-referenced image using the given input prompt.
|
29 |
-
|
30 |
-
1. if necessary instantiate a segment anything machine learning instance model
|
31 |
-
2. download a geo-referenced raster image delimited by the coordinates bounding box (bbox)
|
32 |
-
3. get a prediction image from the segment anything instance model using the input prompt
|
33 |
-
4. get a geo-referenced geojson from the prediction image
|
34 |
-
|
35 |
-
Args:
|
36 |
-
bbox: coordinates bounding box
|
37 |
-
prompt: machine learning input prompt
|
38 |
-
zoom: Level of detail
|
39 |
-
model_name_key: machine learning model name
|
40 |
-
source: xyz
|
41 |
-
source_name: name of tile provider
|
42 |
-
|
43 |
-
Returns:
|
44 |
-
Affine transform
|
45 |
-
"""
|
46 |
-
if models_dict[model_name_key]["instance"] is None:
|
47 |
-
app_logger.info(f"missing instance model {model_name_key}, instantiating it now!")
|
48 |
-
model_instance = sam_onnx2.SegmentAnythingONNX2(
|
49 |
-
encoder_model_path=MODEL_FOLDER / MODEL_ENCODER_NAME,
|
50 |
-
decoder_model_path=MODEL_FOLDER / MODEL_DECODER_NAME
|
51 |
-
)
|
52 |
-
models_dict[model_name_key]["instance"] = model_instance
|
53 |
-
app_logger.debug(f"using a {model_name_key} instance model...")
|
54 |
-
models_instance = models_dict[model_name_key]["instance"]
|
55 |
-
|
56 |
-
pt0, pt1 = bbox
|
57 |
-
app_logger.info(f"tile_source: {source}: downloading geo-referenced raster with bbox {bbox}, zoom {zoom}.")
|
58 |
-
img, transform = download_extent(w=pt1[1], s=pt1[0], e=pt0[1], n=pt0[0], zoom=zoom, source=source)
|
59 |
-
if check_source_type_is_terrain(source):
|
60 |
-
app_logger.info("terrain-rgb like raster: transforms it into a DEM")
|
61 |
-
dem = get_raster_terrain_rgb_like(img, source.name)
|
62 |
-
# set a slope cell size proportional to the image width
|
63 |
-
slope_cellsize = int(img.shape[1] * SLOPE_CELLSIZE / DEFAULT_INPUT_SHAPE[1])
|
64 |
-
app_logger.info(f"terrain-rgb like raster: compute slope, curvature using {slope_cellsize} as cell size.")
|
65 |
-
img = get_rgb_prediction_image(dem, slope_cellsize)
|
66 |
-
|
67 |
-
app_logger.info(
|
68 |
-
f"img type {type(img)} with shape/size:{img.size}, transform type: {type(transform)}, transform:{transform}.")
|
69 |
-
app_logger.info(f"source_name:{source_name}, source_name type:{type(source_name)}.")
|
70 |
-
embedding_key = f"{source_name}_z{zoom}_w{pt1[1]},s{pt1[0]},e{pt0[1]},n{pt0[0]}"
|
71 |
-
mask, n_predictions = sam_onnx_inference.get_raster_inference_with_embedding_from_dict(
|
72 |
-
img, prompt, models_instance, model_name_key, embedding_key, embedding_dict)
|
73 |
-
app_logger.info(f"created {n_predictions} masks, preparing conversion to geojson...")
|
74 |
-
return {
|
75 |
-
"n_predictions": n_predictions,
|
76 |
-
**get_vectorized_raster_as_geojson(mask, transform)
|
77 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/utilities/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
"""various helpers functions"""
|
|
|
|
samgis_lisa_on_zero/utilities/constants.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
"""Project constants"""
|
2 |
-
INPUT_CRS_STRING = "EPSG:4326"
|
3 |
-
OUTPUT_CRS_STRING = "EPSG:3857"
|
4 |
-
DRIVER_RASTERIO_GTIFF = "GTiff"
|
5 |
-
CUSTOM_RESPONSE_MESSAGES = {
|
6 |
-
200: "ok",
|
7 |
-
400: "Bad Request",
|
8 |
-
422: "Missing required parameter",
|
9 |
-
500: "Internal server error"
|
10 |
-
}
|
11 |
-
TILE_SIZE = 256
|
12 |
-
EARTH_EQUATORIAL_RADIUS = 6378137.0
|
13 |
-
WKT_3857 = 'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,'
|
14 |
-
WKT_3857 += 'AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
|
15 |
-
WKT_3857 += 'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],'
|
16 |
-
WKT_3857 += 'PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],'
|
17 |
-
WKT_3857 += 'PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],'
|
18 |
-
WKT_3857 += 'AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 '
|
19 |
-
WKT_3857 += '+x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]]'
|
20 |
-
SERVICE_NAME = "sam-gis"
|
21 |
-
DEFAULT_LOG_LEVEL = 'INFO'
|
22 |
-
RETRY_DOWNLOAD = 3
|
23 |
-
TIMEOUT_DOWNLOAD = 60
|
24 |
-
CALLBACK_INTERVAL_DOWNLOAD = 0.05
|
25 |
-
BOOL_USE_CACHE = True
|
26 |
-
N_WAIT = 0
|
27 |
-
N_MAX_RETRIES = 2
|
28 |
-
N_CONNECTION = 2
|
29 |
-
ZOOM_AUTO = "auto"
|
30 |
-
DEFAULT_URL_TILES = 'https://tile.openstreetmap.org/{z}/{x}/{y}.png'
|
31 |
-
DOMAIN_URL_TILES_MAPBOX = "api.mapbox.com"
|
32 |
-
RELATIVE_URL_TILES_MAPBOX = "v/mapbox.terrain-rgb/{zoom}/{x}/{y}{@2x}.pngraw?access_token={TOKEN}"
|
33 |
-
COMPLETE_URL_TILES_MAPBOX = f"https://{DOMAIN_URL_TILES_MAPBOX}/{RELATIVE_URL_TILES_MAPBOX}"
|
34 |
-
# https://s3.amazonaws.com/elevation-tiles-prod/terrarium/13/1308/3167.png
|
35 |
-
DOMAIN_URL_TILES_NEXTZEN = "s3.amazonaws.com"
|
36 |
-
RELATIVE_URL_TILES_NEXTZEN = "elevation-tiles-prod/terrarium/{z}/{x}/{y}.png" # "terrarium/{z}/{x}/{y}.png"
|
37 |
-
COMPLETE_URL_TILES_NEXTZEN = f"https://{DOMAIN_URL_TILES_NEXTZEN}/{RELATIVE_URL_TILES_NEXTZEN}"
|
38 |
-
CHANNEL_EXAGGERATIONS_LIST = [2.5, 1.1, 2.0]
|
39 |
-
SLOPE_CELLSIZE = 61
|
40 |
-
LISA_INFERENCE_FN = "lisa"
|
41 |
-
GRADIO_EXAMPLE_BODY = {
|
42 |
-
"bbox":{
|
43 |
-
"ne":{"lat":46.17271333276639,"lng":10.079505443573},
|
44 |
-
"sw":{"lat":46.1677724417049,"lng":10.068830251693727}
|
45 |
-
},
|
46 |
-
"string_prompt":"",
|
47 |
-
"zoom":17,
|
48 |
-
"source_type":"Esri.WorldImagery"
|
49 |
-
}
|
50 |
-
GRADIO_EXAMPLES_TEXT_LIST = [
|
51 |
-
"""You need to identify the areas with trees in this photogrammetric image. Please output segmentation mask.""",
|
52 |
-
"""You need to identify the areas with streets in this photogrammetric image. Please output segmentation mask.""",
|
53 |
-
"""You need to identify the houses in this photogrammetric image. Please output segmentation mask and explain why.""",
|
54 |
-
"""Describe what do you see in this image.""",
|
55 |
-
]
|
56 |
-
GRADIO_MARKDOWN = """# [LISA](https://github.com/dvlab-research/LISA) + [SamGIS](https://github.com/trincadev/samgis-be) on Zero GPU!
|
57 |
-
|
58 |
-
This project aims to permit use of [LISA](https://github.com/dvlab-research/LISA) (Reasoning Segmentation via Large Language Model) applied to geospatial data thanks to [SamGIS](https://github.com/trincadev/samgis-be). In this space I adapted LISA to HuggingFace [lisa-on-cuda](https://huggingface.co/spaces/aletrn/lisa-on-cuda) ZeroGPU space.
|
59 |
-
|
60 |
-
This [home page project](https://huggingface.co/spaces/aletrn/samgis-lisa-on-zero) is a plane Gradio interface that take a json in input to translate it to a geojson. More information about these API implementation [here](
|
61 |
-
https://aletrn-samgis-lisa-on-zero.hf.space/docs). On this [blog page](https://trinca.tornidor.com/projects/lisa-adapted-for-samgis) you can find more details, including some request and response examples with the geojson map representations.
|
62 |
-
|
63 |
-
You can also find the alternative map interface [here](https://aletrn-samgis-lisa-on-zero.hf.space/lisa/) useful to create on the fly the payload requests and to represent the geojson response.
|
64 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/utilities/session_logger.py
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
import contextvars
|
2 |
-
import logging
|
3 |
-
from functools import wraps
|
4 |
-
from typing import Callable, Tuple
|
5 |
-
|
6 |
-
|
7 |
-
logging_uuid = contextvars.ContextVar("uuid")
|
8 |
-
default_formatter = '%(asctime)s | %(uuid)s [%(pathname)s:%(module)s %(lineno)d] %(levelname)s | %(message)s'
|
9 |
-
loggingType = logging.CRITICAL | logging.ERROR | logging.WARNING | logging.INFO | logging.DEBUG
|
10 |
-
|
11 |
-
|
12 |
-
def setup_logging(log_level: loggingType, formatter: str = default_formatter, name: str = "logger"
|
13 |
-
) -> Tuple[logging, contextvars.ContextVar]:
|
14 |
-
"""
|
15 |
-
Create a logging instance with log string formatter.
|
16 |
-
|
17 |
-
Args:
|
18 |
-
log_level: logging level
|
19 |
-
formatter: log string formatter
|
20 |
-
name: logger name
|
21 |
-
|
22 |
-
Returns:
|
23 |
-
Logger
|
24 |
-
|
25 |
-
"""
|
26 |
-
|
27 |
-
old_factory = logging.getLogRecordFactory()
|
28 |
-
|
29 |
-
def record_factory(*args, **kwargs):
|
30 |
-
record = old_factory(*args, **kwargs)
|
31 |
-
record.uuid = logging_uuid.get("uuid")
|
32 |
-
if isinstance(record.msg, str):
|
33 |
-
record.msg = record.msg.replace("\\", "\\\\").replace("\n", "\\n")
|
34 |
-
return record
|
35 |
-
|
36 |
-
logging.setLogRecordFactory(record_factory)
|
37 |
-
logging.basicConfig(level="DEBUG", format=default_formatter, force=True)
|
38 |
-
|
39 |
-
logger = logging.getLogger(name=name)
|
40 |
-
|
41 |
-
# create a console handler
|
42 |
-
ch = logging.StreamHandler()
|
43 |
-
ch.setLevel("DEBUG")
|
44 |
-
|
45 |
-
# create formatter and add to the console
|
46 |
-
formatter = logging.Formatter(formatter)
|
47 |
-
ch.setFormatter(formatter)
|
48 |
-
|
49 |
-
# add the console handler to logger
|
50 |
-
logger.addHandler(ch)
|
51 |
-
return logger, logging_uuid
|
52 |
-
|
53 |
-
|
54 |
-
def set_uuid_logging(func: Callable) -> Callable:
|
55 |
-
@wraps(func)
|
56 |
-
def wrapper(*args, **kwargs):
|
57 |
-
import uuid
|
58 |
-
|
59 |
-
current_uuid = f"{uuid.uuid4()}"
|
60 |
-
logging_uuid.set(current_uuid)
|
61 |
-
return func(*args, **kwargs)
|
62 |
-
|
63 |
-
return wrapper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
samgis_lisa_on_zero/utilities/type_hints.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
"""custom type hints"""
|
2 |
-
from enum import IntEnum, Enum
|
3 |
-
from typing import TypedDict
|
4 |
-
|
5 |
-
from affine import Affine
|
6 |
-
from numpy import ndarray
|
7 |
-
from pydantic import BaseModel
|
8 |
-
|
9 |
-
|
10 |
-
tuple_ndarray_transform = tuple[ndarray, Affine]
|
11 |
-
|
12 |
-
|
13 |
-
class XYZDefaultProvidersNames(str, Enum):
|
14 |
-
"""Default xyz provider names"""
|
15 |
-
DEFAULT_TILES_NAME_SHORT = "openstreetmap"
|
16 |
-
DEFAULT_TILES_NAME = "openstreetmap.mapnik"
|
17 |
-
|
18 |
-
|
19 |
-
class XYZTerrainProvidersNames(str, Enum):
|
20 |
-
"""Custom xyz provider names for digital elevation models"""
|
21 |
-
MAPBOX_TERRAIN_TILES_NAME = "mapbox.terrain-rgb"
|
22 |
-
NEXTZEN_TERRAIN_TILES_NAME = "nextzen.terrarium"
|
23 |
-
|
24 |
-
|
25 |
-
class LatLngDict(BaseModel):
|
26 |
-
"""Generic geographic latitude-longitude type"""
|
27 |
-
lat: float
|
28 |
-
lng: float
|
29 |
-
|
30 |
-
|
31 |
-
class ContentTypes(str, Enum):
|
32 |
-
"""Segment Anything: validation point prompt type"""
|
33 |
-
APPLICATION_JSON = "application/json"
|
34 |
-
TEXT_PLAIN = "text/plain"
|
35 |
-
TEXT_HTML = "text/html"
|
36 |
-
|
37 |
-
|
38 |
-
class PromptPointType(str, Enum):
|
39 |
-
"""Segment Anything: validation point prompt type"""
|
40 |
-
point = "point"
|
41 |
-
|
42 |
-
|
43 |
-
class PromptRectangleType(str, Enum):
|
44 |
-
"""Segment Anything: validation rectangle prompt type"""
|
45 |
-
rectangle = "rectangle"
|
46 |
-
|
47 |
-
|
48 |
-
class PromptLabel(IntEnum):
|
49 |
-
"""Valid prompt label type"""
|
50 |
-
EXCLUDE = 0
|
51 |
-
INCLUDE = 1
|
52 |
-
|
53 |
-
|
54 |
-
class ImagePixelCoordinates(TypedDict):
|
55 |
-
"""Image pixel coordinates type"""
|
56 |
-
x: int
|
57 |
-
y: int
|
58 |
-
|
59 |
-
|
60 |
-
class RawBBox(BaseModel):
|
61 |
-
"""Input lambda bbox request type (not yet parsed)"""
|
62 |
-
ne: LatLngDict
|
63 |
-
sw: LatLngDict
|
64 |
-
|
65 |
-
|
66 |
-
class RawPromptPoint(BaseModel):
|
67 |
-
"""Input lambda prompt request of type 'PromptPointType' - point (not yet parsed)"""
|
68 |
-
type: PromptPointType
|
69 |
-
data: LatLngDict
|
70 |
-
label: PromptLabel
|
71 |
-
|
72 |
-
|
73 |
-
class RawPromptRectangle(BaseModel):
|
74 |
-
"""Input lambda prompt request of type 'PromptRectangleType' - rectangle (not yet parsed)"""
|
75 |
-
type: PromptRectangleType
|
76 |
-
data: RawBBox
|
77 |
-
|
78 |
-
def get_type_str(self):
|
79 |
-
return self.type
|
80 |
-
|
81 |
-
|
82 |
-
class ApiRequestBody(BaseModel):
|
83 |
-
"""Input lambda request validator type (not yet parsed)"""
|
84 |
-
id: str = ""
|
85 |
-
bbox: RawBBox
|
86 |
-
prompt: list[RawPromptPoint | RawPromptRectangle]
|
87 |
-
zoom: int | float
|
88 |
-
source_type: str = "OpenStreetMap.Mapnik"
|
89 |
-
debug: bool = False
|
90 |
-
|
91 |
-
|
92 |
-
class StringPromptApiRequestBody(BaseModel):
|
93 |
-
"""Input lambda request validator type (not yet parsed)"""
|
94 |
-
id: str = ""
|
95 |
-
bbox: RawBBox
|
96 |
-
string_prompt: str
|
97 |
-
zoom: int | float
|
98 |
-
source_type: str = "OpenStreetMap.Mapnik"
|
99 |
-
debug: bool = False
|
100 |
-
|
101 |
-
|
102 |
-
class ApiResponseBodyFailure(BaseModel):
|
103 |
-
duration_run: float
|
104 |
-
message: str
|
105 |
-
request_id: str
|
106 |
-
|
107 |
-
|
108 |
-
class ApiResponseBodySuccess(ApiResponseBodyFailure):
|
109 |
-
n_predictions: int
|
110 |
-
geojson: str
|
111 |
-
n_shapes_geojson: int
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|