-
Notifications
You must be signed in to change notification settings - Fork 9
Implement DuckDB-based converter #198
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
ivorbosloper
wants to merge
16
commits into
main
Choose a base branch
from
duckdb_converter
base: main
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
Show all changes
16 commits
Select commit
Hold shift + click to select a range
f6d9436
Implement DuckDB-based converter
ivorbosloper 6f52d97
Not sure if this is right
ivorbosloper 0a854ad
Update project
ivorbosloper 996cc38
Fix tests
ivorbosloper 51bea87
Add collection to metadata
ivorbosloper 2d3fc50
Add required to arrow definition
ivorbosloper 852fb6a
Support for sources with different schemas
ivorbosloper 3cf3b8d
Export collection and set compression
m-mohr c524142
Add caching and warnings
m-mohr 0d5a9f9
Add todos
m-mohr 07d7aac
Fix tests
m-mohr 78fb00d
Implement rewrite to correct GeoParquet
ivorbosloper dd88ed0
Merge branch 'main' into duckdb_converter
m-mohr 6017ef0
Update fiboa_cli/conversion/duckdb.py
ivorbosloper 993824e
Merge branch 'main' into duckdb_converter
ivorbosloper 920b39d
Update pixi.lock for duckdb
ivorbosloper File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,221 @@ | ||
| import json | ||
| import os | ||
| from pathlib import Path | ||
| from tempfile import NamedTemporaryFile | ||
|
|
||
| import duckdb | ||
| import numpy as np | ||
| import pyarrow as pa | ||
| import pyarrow.parquet as pq | ||
| from geopandas.array import from_wkb | ||
| from pyarrow.lib import StructArray | ||
| from vecorel_cli.encoding.geojson import VecorelJSONEncoder | ||
|
|
||
| from .fiboa_converter import FiboaBaseConverter | ||
|
|
||
|
|
||
| # This converter is experimental, use with caution. | ||
| # Results may not be fully fiboa compliant yet. | ||
| # Use this primarily for datasets that are too large to be processed by the default converter | ||
| class FiboaDuckDBBaseConverter(FiboaBaseConverter): | ||
| def convert( | ||
| self, | ||
| output_file, | ||
| cache=None, | ||
| input_files=None, | ||
| variant=None, | ||
| compression=None, | ||
| geoparquet_version=None, | ||
| original_geometries=False, | ||
| **kwargs, | ||
| ) -> str: | ||
| if not original_geometries: | ||
| self.warning( | ||
| "original_geometries is not supported for DuckDB-based converters and will always write original geometries" | ||
| ) | ||
|
|
||
| geoparquet_version = geoparquet_version or "1.1.0" | ||
| compression = compression or "brotli" | ||
|
|
||
| self.variant = variant | ||
| cid = self.id.strip() | ||
| if self.bbox is not None and len(self.bbox) != 4: | ||
| raise ValueError("If provided, the bounding box must consist of 4 numbers") | ||
|
|
||
| # Create output folder if it doesn't exist | ||
| directory = os.path.dirname(output_file) | ||
| if directory: | ||
| os.makedirs(directory, exist_ok=True) | ||
|
|
||
| if input_files is not None and isinstance(input_files, dict) and len(input_files) > 0: | ||
| self.warning("Using user provided input file(s) instead of the pre-defined file(s)") | ||
| urls = input_files | ||
| else: | ||
| urls = self.get_urls() | ||
| if urls is None: | ||
| raise ValueError("No input files provided") | ||
|
|
||
| self.info("Getting file(s) if not cached yet") | ||
| if cache: | ||
| request_args = {} | ||
| if self.avoid_range_request: | ||
| request_args["block_size"] = 0 | ||
| urls = self.download_files(urls, cache, **request_args) | ||
| elif self.avoid_range_request: | ||
| self.warning( | ||
| "avoid_range_request is set, but cache is not used, so this setting has no effect" | ||
| ) | ||
|
|
||
| selections = [] | ||
| geom_column = None | ||
| for k, v in self.columns.items(): | ||
| if k in self.column_migrations: | ||
| selections.append(f'{self.column_migrations.get(k)} as "{v}"') | ||
| else: | ||
| selections.append(f'"{k}" as "{v}"') | ||
| if v == "geometry": | ||
| geom_column = k | ||
| selection = ", ".join(selections) | ||
|
|
||
| filters = [] | ||
| where = "" | ||
| if self.bbox is not None: | ||
| filters.append( | ||
| f"ST_Intersects(geometry, ST_MakeEnvelope({self.bbox[0]}, {self.bbox[1]}, {self.bbox[2]}, {self.bbox[3]}))" | ||
| ) | ||
| for k, v in self.column_filters.items(): | ||
| filters.append(v) | ||
| if len(filters) > 0: | ||
| where = f"WHERE {' AND '.join(filters)}" | ||
|
|
||
| if isinstance(urls, str): | ||
| sources = f'"{urls}"' | ||
| else: | ||
| paths = [] | ||
| for url in urls: | ||
| if isinstance(url, tuple): | ||
| paths.append(f'"{url[0]}"') | ||
| else: | ||
| paths.append(f'"{url}"') | ||
| sources = "[" + ",".join(paths) + "]" | ||
|
|
||
| collection = self.create_collection(cid) | ||
| collection.update(self.column_additions) | ||
| collection["collection"] = self.id | ||
|
|
||
| if isinstance(output_file, Path): | ||
| output_file = str(output_file) | ||
|
|
||
| collection_json = json.dumps(collection, cls=VecorelJSONEncoder).encode("utf-8") | ||
|
|
||
| con = duckdb.connect() | ||
| con.install_extension("spatial") | ||
| con.load_extension("spatial") | ||
| con.execute( | ||
| f""" | ||
| COPY ( | ||
| SELECT {selection} | ||
| FROM read_parquet({sources}, union_by_name=true) | ||
| {where} | ||
| ORDER BY ST_Hilbert({geom_column}) | ||
| ) TO ? ( | ||
| FORMAT parquet, | ||
| compression ?, | ||
| KV_METADATA {{ | ||
| collection: ?, | ||
| }} | ||
| ) | ||
| """, | ||
| [output_file, compression, collection_json], | ||
| ) | ||
|
|
||
| # Post-process the written Parquet to proper GeoParquet v1.1 with bbox and nullability | ||
| try: | ||
| pq_file = pq.ParquetFile(output_file) | ||
|
|
||
| existing_schema = pq_file.schema_arrow | ||
| col_names = existing_schema.names | ||
| assert "geometry" in col_names, "Missing geometry column in output parquet file" | ||
|
|
||
| schemas = collection.merge_schemas({}) | ||
| collection_only = {k for k, v in schemas.get("collection", {}).items() if v} | ||
| required_columns = {"geometry"} | { | ||
| r | ||
| for r in schemas.get("required", []) | ||
| if r in col_names and r not in collection_only | ||
| } | ||
| if "id" in col_names: | ||
| required_columns.add("id") | ||
|
|
||
| # Update for version 1.1.0 | ||
| metadata = existing_schema.metadata | ||
| if geoparquet_version > "1.0.0": | ||
| geo_meta = json.loads(existing_schema.metadata[b"geo"]) | ||
| geo_meta["version"] = geoparquet_version | ||
| metadata[b"geo"] = json.dumps(geo_meta).encode("utf-8") | ||
|
|
||
| # Build a new Arrow schema with adjusted nullability | ||
| new_fields = [] | ||
| for field in existing_schema: | ||
| if field.name in required_columns and field.nullable: | ||
| new_fields.append( | ||
| pa.field(field.name, field.type, nullable=False, metadata=field.metadata) | ||
| ) | ||
| else: | ||
| new_fields.append(field) | ||
|
|
||
| add_bbox = geoparquet_version > "1.0.0" and "bbox" not in col_names | ||
| if add_bbox: | ||
| new_fields.append( | ||
| pa.field( | ||
| "bbox", | ||
| pa.struct( | ||
| [ | ||
| ("xmin", pa.float64()), | ||
| ("ymin", pa.float64()), | ||
| ("xmax", pa.float64()), | ||
| ("ymax", pa.float64()), | ||
| ] | ||
| ), | ||
| ) | ||
| ) | ||
| new_schema = pa.schema(new_fields, metadata=metadata) | ||
|
|
||
| # 7) Streamingly rewrite the file to a temp file and replace atomically | ||
| with NamedTemporaryFile( | ||
| "wb", delete=False, dir=os.path.dirname(output_file), suffix=".parquet" | ||
| ) as tmp: | ||
| tmp_path = tmp.name | ||
|
|
||
| writer = pq.ParquetWriter( | ||
| tmp_path, | ||
| new_schema, | ||
| compression=compression, | ||
| use_dictionary=True, | ||
| write_statistics=True, | ||
| ) | ||
| try: | ||
| bbox_names = ["ymax", "xmax", "ymin", "xmin"] | ||
| for rg in range(pq_file.num_row_groups): | ||
| tbl = pq_file.read_row_group(rg) | ||
| if add_bbox: | ||
| # determine bounds, change to StructArray type | ||
| bounds = from_wkb(tbl["geometry"]).bounds | ||
| bbox_array = StructArray.from_arrays( | ||
| np.rot90(bounds), | ||
| names=bbox_names, | ||
| ) | ||
| tbl = tbl.append_column("bbox", bbox_array) | ||
| # Ensure table adheres to the new schema (mainly nullability); cast if needed | ||
| if tbl.schema != new_schema: | ||
| # Align field order/types; this does not materialize data beyond the batch | ||
| tbl = tbl.cast(new_schema, safe=False) | ||
| writer.write_table(tbl) | ||
| finally: | ||
| writer.close() | ||
|
|
||
| os.replace(tmp_path, output_file) | ||
| except Exception as e: | ||
| self.warning(f"GeoParquet 1.1 post-processing failed: {e}") | ||
|
|
||
| return output_file | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.