From d3295d844b4c594e7897e16497b3c22fe17e0c14 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:05:49 +0100 Subject: [PATCH 1/8] =?UTF-8?q?docs:=20update=20all=20=F0=9F=93=96=20emoji?= =?UTF-8?q?s=20to=20be=20correct?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- docs/about.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5b1931f..3bd1196 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ --- -๐Ÿ•ฎ **Documentation**: https://hotosm.github.io/osm-rawdata/ +๐Ÿ“– **Documentation**: https://hotosm.github.io/osm-rawdata/ ๐Ÿ–ฅ๏ธ **Source Code**: https://github.com/hotosm/osm-rawdata diff --git a/docs/about.md b/docs/about.md index bc4d84e..74f8189 100644 --- a/docs/about.md +++ b/docs/about.md @@ -1,6 +1,6 @@ # osm-rawdata -๐Ÿ•ฎ [Documentation](https://hotosm.github.io/osm-rawdata/) +๐Ÿ“– [Documentation](https://hotosm.github.io/osm-rawdata/) These is a module to work with [OpenStreetMap](https://www.openstreetmap.org) data using postgres and From c83ebe0a81c826c9b1340878de7c35f64bd7155e Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:10:37 +0100 Subject: [PATCH 2/8] ci: add emojis to workflow names --- .github/workflows/build-ci.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/pr_label.yml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/pytest.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-ci.yml b/.github/workflows/build-ci.yml index f71d4de..d81199d 100644 --- a/.github/workflows/build-ci.yml +++ b/.github/workflows/build-ci.yml @@ -1,4 +1,4 @@ -name: Build CI Img +name: ๐Ÿ”ง Build CI Img on: # Push includes PR merge diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 72b2b3f..9f9ab23 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,4 +1,4 @@ -name: Build +name: ๐Ÿ”ง Build on: push: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 10d2bda..c7ba2af 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,4 +1,4 @@ -name: Publish Docs +name: ๐Ÿ“– Publish Docs on: push: diff --git a/.github/workflows/pr_label.yml b/.github/workflows/pr_label.yml index c952171..c73a1f8 100644 --- a/.github/workflows/pr_label.yml +++ b/.github/workflows/pr_label.yml @@ -1,4 +1,4 @@ -name: PR Label +name: ๐Ÿท๏ธ PR Label on: pull_request_target: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index c560a2c..e6b4e90 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,4 +1,4 @@ -name: Publish to PyPi.org +name: ๐Ÿšฉ Publish to PyPi.org on: release: diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 864975f..98d9427 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -1,4 +1,4 @@ -name: PyTest +name: ๐Ÿงช PyTest on: # Run tests on all pushed branches From 8fc32ff8a60034a97398c9bdeb8ddae0544d62c5 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:12:57 +0100 Subject: [PATCH 3/8] ci: add wiki publish workflow --- .github/workflows/wiki.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/wiki.yml diff --git a/.github/workflows/wiki.yml b/.github/workflows/wiki.yml new file mode 100644 index 0000000..6e4762e --- /dev/null +++ b/.github/workflows/wiki.yml @@ -0,0 +1,15 @@ +name: ๐Ÿ“– Publish Docs to Wiki + +on: + push: + paths: + - docs/** + branches: [main] + # Allow manual trigger (workflow_dispatch) + workflow_dispatch: + +jobs: + publish-docs-to-wiki: + uses: hotosm/gh-workflows/.github/workflows/wiki.yml@1.0.1 + with: + homepage_path: "index.md" From 323536c472a9224b3aa9d39de69a0b68c01bd356 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:24:19 +0100 Subject: [PATCH 4/8] docs: add sidebar for wiki --- docs/_Sidebar.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 docs/_Sidebar.md diff --git a/docs/_Sidebar.md b/docs/_Sidebar.md new file mode 100644 index 0000000..8be9605 --- /dev/null +++ b/docs/_Sidebar.md @@ -0,0 +1,17 @@ +# [Home](https://github.com/hotosm/osm-fieldwork/wiki) + +[About](https://github.com/hotosm/osm-fieldwork/wiki/about) + +[Postgres](https://github.com/hotosm/osm-fieldwork/wiki/postgres) + +[YAML](https://github.com/hotosm/osm-fieldwork/wiki/yaml) + +[JSON](https://github.com/hotosm/osm-fieldwork/wiki/json) + +[Importer](https://github.com/hotosm/osm-fieldwork/wiki/importer) + +[Overture](https://github.com/hotosm/osm-fieldwork/wiki/overture) + +[Geofabrik](https://github.com/hotosm/osm-fieldwork/wiki/geofabrik) + +[API](https://github.com/hotosm/osm-fieldwork/wiki/api) From 617f36029e43785bc4704744b1575d927f579de9 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:27:15 +0100 Subject: [PATCH 5/8] ci: update pre-commit hooks --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6d9743a..4119a63 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,14 +8,14 @@ repos: # Autoformat: Python code - repo: https://github.com/psf/black - rev: 23.9.1 + rev: 23.10.0 hooks: - id: black # Lint / autoformat: Python code - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: "v0.0.292" + rev: "v0.1.1" hooks: - id: ruff args: [--exit-non-zero-on-fix] From 6a01ffb9f3ea0aeb1d00076a3bcef6eac331a484 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:28:49 +0100 Subject: [PATCH 6/8] ci: run pre-commit hooks on all files --- docs/geofabrik.md | 12 ++--- docs/overture.md | 67 +++++++++++++------------- docs/postgres.md | 18 +++---- osm_rawdata/importer.py | 71 +++++++++++++-------------- osm_rawdata/overture.py | 104 +++++++++++++++++++++------------------- osm_rawdata/postgres.py | 2 +- 6 files changed, 141 insertions(+), 133 deletions(-) diff --git a/docs/geofabrik.md b/docs/geofabrik.md index 622ad65..1e3a7c8 100644 --- a/docs/geofabrik.md +++ b/docs/geofabrik.md @@ -1,10 +1,10 @@ # Geofabrik This is a simple utility to download country data files from -[GeoFabrik](https://download.geofabrik.de/). +[GeoFabrik](https://download.geofabrik.de/). - options: - --help(-h) show this help message and exit - --verbose(-v) verbose output - --file(-f) FILE The country or US state to download - --list(-l) List all files on GeoFabrik + options: + --help(-h) show this help message and exit + --verbose(-v) verbose output + --file(-f) FILE The country or US state to download + --list(-l) List all files on GeoFabrik diff --git a/docs/overture.md b/docs/overture.md index e6dead0..e3f6712 100644 --- a/docs/overture.md +++ b/docs/overture.md @@ -1,6 +1,6 @@ # Overture Map Data -The Overture Foundation (https://www.overturemaps.org) has been +The Overture Foundation () has been recently formed to build a competitor to Google Maps. The plan is to use OpenStreetMap (OSM) data as a baselayer, and layer other datasets on top. The currently available data (July 2023) has 13 different @@ -40,28 +40,27 @@ less columns in it, and each data type had a schema oriented towards that data type. The new schema (Oct 2023) is larger, but all the data types are supported in the same schema. -The schema used in the Overture data files is [documented here]( -https://docs.overturemaps.org/reference). This document is just a +The schema used in the Overture data files is [documented here](https://docs.overturemaps.org/reference). This document is just a summary with some implementation details. ### Buildings The current list of buildings datasets is: -* Austin Building Footprints Year 2013 2D Buildings -* Boston BPDA 3D Buildings -* City of Cambridge, MA Open Data 3D Buildings -* Denver Regional Council of Governments 2D Buildings -* Esri Buildings | Austin Building Footprints Year 2013 2D Buildings -* Esri Buildings | Denver Regional Council of Governments 2D Buildings -* Esri Community Maps -* Miami-Dade County Open Data 3D Buildings -* OpenStreetMap -* Microsoft ML Buildings -* NYC Open Data 3D Buildings -* Portland Building Footprint 2D Buildings -* USGS Lidar -* Washington DC Open Data 3D Buildings +- Austin Building Footprints Year 2013 2D Buildings +- Boston BPDA 3D Buildings +- City of Cambridge, MA Open Data 3D Buildings +- Denver Regional Council of Governments 2D Buildings +- Esri Buildings | Austin Building Footprints Year 2013 2D Buildings +- Esri Buildings | Denver Regional Council of Governments 2D Buildings +- Esri Community Maps +- Miami-Dade County Open Data 3D Buildings +- OpenStreetMap +- Microsoft ML Buildings +- NYC Open Data 3D Buildings +- Portland Building Footprint 2D Buildings +- USGS Lidar +- Washington DC Open Data 3D Buildings Since the Microsoft ML Buildings and the OpenStreetMap data is available elsewhere, and is more up-to-date for global coverage, all @@ -78,30 +77,30 @@ accurate. ### Places -The *places* data are POIs of places. This appears to be for +The _places_ data are POIs of places. This appears to be for amenities, and contains tags related to that OSM category. This dataset is from Meta, and the data appears derived from Facebook. The columns that are of interest to OSM are: -* freeform - The address of the amenity, although the format is not +- freeform - The address of the amenity, although the format is not consistent -* socials - An array of social media links for this amenity. -* phone - The phone number if it has one -* websites - The website URL if it has one -* value - The name of the amenity if known +- socials - An array of social media links for this amenity. +- phone - The phone number if it has one +- websites - The website URL if it has one +- value - The name of the amenity if known ### Highways -In the current highway *segment* data files, the only source is +In the current highway _segment_ data files, the only source is OSM. In that cases it's better to use uptodate OSM data. It'll be interesting to see if Overture imports the publically available highway datasets from the USGS, or some state governments. That would be very useful. -The Overture *segments* data files are equivalent to an OSM way, with +The Overture _segments_ data files are equivalent to an OSM way, with tags specific to that highway linestring. There are separate data -files for *connections*, that are equivalant to an OSM relation. +files for _connections_, that are equivalant to an OSM relation. ### Admin Boundaries @@ -115,10 +114,10 @@ reason to care about these files. The names column can have 4 variations on the name. Each may also have a language value as well. -* common -* official -* alternate -* short +- common +- official +- alternate +- short Each of these can have multiple values, each of which consists of a value and the language. @@ -126,10 +125,10 @@ value and the language. ## sources The sources column is an array of with two entries. The first entry is -the name of the dataset, and where it exists, a *recordID* to +the name of the dataset, and where it exists, a _recordID_ to reference the source dataset. For OSM data, the recordID has 3 -sub-fields. The first character is the type, *w* (way), *n* (node), or -*l* (line). The second is the OSM ID, and the third with a *v* is the +sub-fields. The first character is the type, _w_ (way), _n_ (node), or +_l_ (line). The second is the OSM ID, and the third with a _v_ is the version of the feature in OSM. -For example: *w***123456**v2 is a way with ID 123456 and is version 2. +For example: \*w**\*123456**v2 is a way with ID 123456 and is version 2. diff --git a/docs/postgres.md b/docs/postgres.md index 63e7c45..666d13d 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -6,12 +6,12 @@ from a local postgres data, or the remote Underpass one. A boundary polygon is used to define the area to be covered in the extract. Optionally a data file can be used. - options: - --help(-h) show this help message and exit - --verbose(-v) verbose output - --uri(-u) URI Database URI - --boundary(-b) BOUNDARY Boundary polygon to limit the data size - --sql(-s) SQL Custom SQL query to execute against the database - --all(-a) ALL All the geometry or just centroids - --config(-c) CONFIG The config file for the query (json or yaml) - --outfile(-o) OUTFILE The output file + options: + --help(-h) show this help message and exit + --verbose(-v) verbose output + --uri(-u) URI Database URI + --boundary(-b) BOUNDARY Boundary polygon to limit the data size + --sql(-s) SQL Custom SQL query to execute against the database + --all(-a) ALL All the geometry or just centroids + --config(-c) CONFIG The config file for the query (json or yaml) + --outfile(-o) OUTFILE The output file diff --git a/osm_rawdata/importer.py b/osm_rawdata/importer.py index 36248ef..2a9b700 100755 --- a/osm_rawdata/importer.py +++ b/osm_rawdata/importer.py @@ -20,35 +20,31 @@ # import argparse +import concurrent.futures import logging import subprocess import sys -import os -import concurrent.futures -import geojson -from geojson import Feature, FeatureCollection -from sys import argv from pathlib import Path -from cpuinfo import get_cpu_info -from shapely.geometry import shape +from sys import argv +import geojson import pyarrow.parquet as pq from codetiming import Timer -from osm_rawdata.postgres import uriParser +from cpuinfo import get_cpu_info from progress.spinner import PixelSpinner from shapely import wkb +from shapely.geometry import shape from sqlalchemy import MetaData, cast, column, create_engine, select, table, text from sqlalchemy.dialects.postgresql import JSONB, insert +from sqlalchemy.engine.base import Connection from sqlalchemy.orm import sessionmaker from sqlalchemy_utils import create_database, database_exists -from sqlalchemy.engine.base import Connection -from shapely.geometry import Point, LineString, Polygon -from shapely import wkt, wkb # Find the other files for this project import osm_rawdata as rw import osm_rawdata.db_models from osm_rawdata.db_models import Base +from osm_rawdata.postgres import uriParser rootdir = rw.__path__[0] @@ -57,12 +53,13 @@ # The number of threads is based on the CPU cores info = get_cpu_info() -cores = info['count'] +cores = info["count"] + def importThread( - data: list, - db: Connection, - ): + data: list, + db: Connection, +): """Thread to handle importing Args: @@ -70,15 +67,15 @@ def importThread( db (Connection): A database connection """ # log.debug(f"In importThread()") - #timer = Timer(text="importThread() took {seconds:.0f}s") - #timer.start() + # timer = Timer(text="importThread() took {seconds:.0f}s") + # timer.start() ways = table( "ways_poly", column("id"), column("user"), column("geom"), column("tags"), - ) + ) nodes = table( "nodes", @@ -86,7 +83,7 @@ def importThread( column("user"), column("geom"), column("tags"), - ) + ) index = 0 @@ -94,34 +91,35 @@ def importThread( # log.debug(feature) index -= 1 entry = dict() - tags = feature['properties'] - tags['building'] = 'yes' - entry['id'] = index + tags = feature["properties"] + tags["building"] = "yes" + entry["id"] = index ewkt = shape(feature["geometry"]) geom = wkb.dumps(ewkt) type = ewkt.geom_type scalar = select(cast(tags, JSONB)) - if type == 'Polygon': + if type == "Polygon": sql = insert(ways).values( # id = entry['id'], geom=geom, tags=scalar, - ) - elif type == 'Point': + ) + elif type == "Point": sql = insert(nodes).values( # id = entry['id'], geom=geom, tags=scalar, - ) + ) db.execute(sql) # db.commit() + def parquetThread( data: list, db: Connection, - ): +): """Thread to handle importing Args: @@ -136,7 +134,7 @@ def parquetThread( column("user"), column("geom"), column("tags"), - ) + ) nodes = table( "nodes", @@ -144,7 +142,7 @@ def parquetThread( column("user"), column("geom"), column("tags"), - ) + ) index = -1 log.debug(f"There are {len(data)} entries in the data") @@ -202,6 +200,7 @@ def parquetThread( # print(f"FIXME2: {entry}") timer.stop() + class MapImporter(object): def __init__( self, @@ -229,7 +228,7 @@ def __init__( "CREATE EXTENSION IF NOT EXISTS postgis; CREATE EXTENSION IF NOT EXISTS hstore;CREATE EXTENSION IF NOT EXISTS dblink;" ) self.db.execute(sql) - #self.db.commit() + # self.db.commit() Base.metadata.create_all(bind=engine) @@ -354,8 +353,8 @@ def importGeoJson( """ # load the GeoJson file file = open(infile, "r") - #size = os.path.getsize(infile) - #for line in file.readlines(): + # size = os.path.getsize(infile) + # for line in file.readlines(): # print(line) data = geojson.load(file) @@ -379,11 +378,11 @@ def importGeoJson( meta.create_all(engine) # A chunk is a group of threads - entries = len(data['features']) + entries = len(data["features"]) chunk = round(entries / cores) if entries <= chunk: - result = importThread(data['features'], connections[0]) + result = importThread(data["features"], connections[0]) timer.stop() return True @@ -391,7 +390,7 @@ def importGeoJson( block = 0 while block <= entries: log.debug("Dispatching Block %d:%d" % (block, block + chunk)) - result = executor.submit(importThread, data['features'][block : block + chunk], connections[index]) + result = executor.submit(importThread, data["features"][block : block + chunk], connections[index]) block += chunk index += 1 executor.shutdown() @@ -399,6 +398,7 @@ def importGeoJson( return True + def main(): """This main function lets this class be run standalone by a bash script.""" parser = argparse.ArgumentParser( @@ -441,6 +441,7 @@ def main(): mi.importParquet(args.infile) log.info(f"Imported {args.infile} into {args.uri}") + if __name__ == "__main__": """This is just a hook so this file can be run standalone during development.""" main() diff --git a/osm_rawdata/overture.py b/osm_rawdata/overture.py index 3ed246d..63e1616 100755 --- a/osm_rawdata/overture.py +++ b/osm_rawdata/overture.py @@ -14,57 +14,57 @@ # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . - + import argparse import logging +import math import sys -import os -from sys import argv -from geojson import Point, Feature, FeatureCollection, dump, Polygon + import geojson -from shapely.geometry import shape, Polygon, mapping -import shapely -from shapely import wkt, wkb -import pyarrow.parquet as pq -# import pyarrow as pa -from pandas import Series import pandas as pd -import math +from codetiming import Timer +from geojson import Feature, FeatureCollection from numpy import ndarray + +# import pyarrow as pa +from pandas import Series from progress.spinner import PixelSpinner -from codetiming import Timer +from shapely import wkb # Instantiate logger -log = logging.getLogger('osm-rawdata') +log = logging.getLogger("osm-rawdata") + class Overture(object): - def __init__(self, - filespec: str, - ): + def __init__( + self, + filespec: str, + ): """A class for parsing Overture V2 files. Args: data (list): The list of features """ - #pfile = pq.ParquetFile(filespec) + # pfile = pq.ParquetFile(filespec) # self.data = pfile.read() self.data = pd.read_parquet(filespec) self.filespec = filespec log.debug(f"Read {len(self.data)} entries from {filespec}") - def parse(self, - data: Series, - ): + def parse( + self, + data: Series, + ): # log.debug(data) entry = dict() # timer = Timer(text="importParquet() took {seconds:.0f}s") # timer.start() - for key,value in data.to_dict().items(): + for key, value in data.to_dict().items(): if value is None: continue if type(value) == float and math.isnan(value): continue - if key == 'geometry': + if key == "geometry": geom = wkb.loads(value) if type(value) == ndarray: # print(f"LIST: {key} = {value}") @@ -72,34 +72,42 @@ def parse(self, for k1, v1 in value[0].items(): if v1 is not None: if type(v1) == ndarray: - import epdb; epdb.st() + import epdb + + epdb.st() entry[k1] = v1 else: # FIXME: for now the data only has one entry in the array, # but this could change. if type(value[0]) == ndarray: - import epdb; epdb.st() + import epdb + + epdb.st() entry[key] = value[0] continue - if key == 'sources' and type(value) == list: + if key == "sources" and type(value) == list: if type(value) == ndarray: - import epdb; epdb.st() + import epdb + + epdb.st() if type(value[0]) == ndarray: - import epdb; epdb.st() - if 'dataset' in value[0]: - entry['source'] = value[0]['dataset'] - if 'recordId' in valve[0] and ['recordId'] is not None: - entry['record'] = value[0]['recordId'] - if value[0]['confidence'] is not None: - entry['confidence'] = value[0]['confidence'] + import epdb + + epdb.st() + if "dataset" in value[0]: + entry["source"] = value[0]["dataset"] + if "recordId" in valve[0] and ["recordId"] is not None: + entry["record"] = value[0]["recordId"] + if value[0]["confidence"] is not None: + entry["confidence"] = value[0]["confidence"] else: - entry['source'] = value['dataset'] - if value[0]['recordId'] is not None: - entry['record'] = value[0]['recordId'] - if value[0]['confidence'] is not None: - entry['confidence'] = value[0]['confidence'] + entry["source"] = value["dataset"] + if value[0]["recordId"] is not None: + entry["record"] = value[0]["recordId"] + if value[0]["confidence"] is not None: + entry["confidence"] = value[0]["confidence"] if type(value) == dict: - if key == 'bbox': + if key == "bbox": continue for k1, v1 in value.items(): if v1 is None: @@ -110,7 +118,7 @@ def parse(self, if v2 is None: continue if type(v2) == ndarray: - for k3,v3 in v2.tolist()[0].items(): + for k3, v3 in v2.tolist()[0].items(): if v3 is not None: entry[k3] = v3 elif type(v2) == str: @@ -118,15 +126,16 @@ def parse(self, continue # FIXME: we should use the language to adjust the name tag # lang = v1[0]['language'] - #timer.stop() + # timer.stop() return Feature(geometry=geom, properties=entry) + def main(): """This main function lets this class be run standalone by a bash script, primarily to assist in code development or debugging. This should really be a test case. """ - categories = ('buildings', 'places', 'highways', 'admin', 'localities') + categories = ("buildings", "places", "highways", "admin", "localities") parser = argparse.ArgumentParser( prog="conflateDB", formatter_class=argparse.RawDescriptionHelpFormatter, @@ -134,7 +143,7 @@ def main(): ) parser.add_argument("-v", "--verbose", action="store_true", help="verbose output") parser.add_argument("-i", "--infile", required=True, help="Input file") - parser.add_argument("-o", "--outfile", default='overture.geojson', help="Output file") + parser.add_argument("-o", "--outfile", default="overture.geojson", help="Output file") args = parser.parse_args() @@ -143,9 +152,7 @@ def main(): log.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) - formatter = logging.Formatter( - "%(threadName)10s - %(name)s - %(levelname)s - %(message)s" - ) + formatter = logging.Formatter("%(threadName)10s - %(name)s - %(levelname)s - %(message)s") ch.setFormatter(formatter) log.addHandler(ch) @@ -159,11 +166,11 @@ def main(): spin.next() feature = overture.data.loc[index] entry = overture.parse(feature) - if entry['properties']['dataset'] != 'OpenStreetMap': + if entry["properties"]["dataset"] != "OpenStreetMap": features.append(entry) if len(features) > 0: - file = open(args.outfile, 'w') + file = open(args.outfile, "w") geojson.dump(FeatureCollection(features), file) timer.stop() log.info(f"Wrote {args.outfile}") @@ -172,6 +179,7 @@ def main(): spin.finish() + if __name__ == "__main__": """This is just a hook so this file can be run standlone during development.""" main() diff --git a/osm_rawdata/postgres.py b/osm_rawdata/postgres.py index e48b207..b12af62 100755 --- a/osm_rawdata/postgres.py +++ b/osm_rawdata/postgres.py @@ -370,7 +370,7 @@ def queryLocal( return FeatureCollection(features) # If there is no config file, don't modify the results - if len(self.qc.config['where']['ways_poly']) == 0 and len(self.qc.config['where']['nodes']) == 0: + if len(self.qc.config["where"]["ways_poly"]) == 0 and len(self.qc.config["where"]["nodes"]) == 0: return result for item in result: From 7330843544e1f2c526a6e283ed6e8c9ff40c3320 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 03:45:33 +0100 Subject: [PATCH 7/8] ci: rename incorrectly named stage --- .github/workflows/build-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-ci.yml b/.github/workflows/build-ci.yml index d81199d..53697b6 100644 --- a/.github/workflows/build-ci.yml +++ b/.github/workflows/build-ci.yml @@ -12,7 +12,7 @@ on: workflow_dispatch: jobs: - backend-ci-build: + ci-build: uses: hotosm/gh-workflows/.github/workflows/image_build.yml@1.0.1 with: build_target: ci From ff5dff3ad350129f1fc06f4d20e1ea1ec68868f7 Mon Sep 17 00:00:00 2001 From: spwoodcock Date: Tue, 24 Oct 2023 14:06:05 +0100 Subject: [PATCH 8/8] ci: update pr_label workflow to use v4 --- .github/workflows/pr_label.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr_label.yml b/.github/workflows/pr_label.yml index c73a1f8..1e5a389 100644 --- a/.github/workflows/pr_label.yml +++ b/.github/workflows/pr_label.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/labeler@v3 + - uses: actions/labeler@v4 # Uses .github/labeler.yml definitions with: repo-token: ${{ secrets.GITHUB_TOKEN }}