Skip to content

Commit 0b599c6

Browse files
committed
Add constraint to avoid adding files with conflicting field IDs
1 parent 677f196 commit 0b599c6

File tree

3 files changed

+167
-10
lines changed

3 files changed

+167
-10
lines changed

mkdocs/docs/api.md

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1006,8 +1006,14 @@ Expert Iceberg users may choose to commit existing parquet files to the Iceberg
10061006

10071007
<!-- prettier-ignore-start -->
10081008

1009-
!!! note "Name Mapping"
1010-
Because `add_files` uses existing files without writing new parquet files that are aware of the Iceberg's schema, it requires the Iceberg's table to have a [Name Mapping](https://iceberg.apache.org/spec/?h=name+mapping#name-mapping-serialization) (The Name mapping maps the field names within the parquet files to the Iceberg field IDs). Hence, `add_files` requires that there are no field IDs in the parquet file's metadata, and creates a new Name Mapping based on the table's current schema if the table doesn't already have one.
1009+
!!! note "Name Mapping and Field IDs"
1010+
`add_files` can work with Parquet files both with and without field IDs in their metadata:
1011+
1012+
- **Files with field IDs**: When field IDs are present in the Parquet metadata, they must match the corresponding field IDs in the Iceberg table schema. This is common for files generated by tools like Spark or when using or other libraries with explicit field ID metadata.
1013+
1014+
- **Files without field IDs**: When field IDs are absent, the table must have a [Name Mapping](https://iceberg.apache.org/spec/?h=name+mapping#name-mapping-serialization) to map field names to Iceberg field IDs. `add_files` will automatically create a Name Mapping based on the table's current schema if one doesn't already exist.
1015+
1016+
In both cases, a Name Mapping is created if the table doesn't have one, ensuring compatibility with various readers.
10111017

10121018
!!! note "Partitions"
10131019
`add_files` only requires the client to read the existing parquet files' metadata footer to infer the partition value of each file. This implementation also supports adding files to Iceberg tables with partition transforms like `MonthTransform`, and `TruncateTransform` which preserve the order of the values after the transformation (Any Transform that has the `preserves_order` property set to True is supported). Please note that if the column statistics of the `PartitionField`'s source column are not present in the parquet metadata, the partition value is inferred as `None`.

pyiceberg/io/pyarrow.py

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2610,7 +2610,10 @@ def _check_pyarrow_schema_compatible(
26102610
Raises:
26112611
ValueError: If the schemas are not compatible.
26122612
"""
2613+
# Check if the PyArrow schema has explicit field IDs
2614+
has_field_ids = visit_pyarrow(provided_schema, _HasIds())
26132615
name_mapping = requested_schema.name_mapping
2616+
26142617
try:
26152618
provided_schema = pyarrow_to_schema(
26162619
provided_schema,
@@ -2624,8 +2627,42 @@ def _check_pyarrow_schema_compatible(
26242627
)
26252628
additional_names = set(provided_schema._name_to_id.keys()) - set(requested_schema._name_to_id.keys())
26262629
raise ValueError(
2627-
f"PyArrow table contains more columns: {', '.join(sorted(additional_names))}. Update the schema first (hint, use union_by_name)."
2630+
f"PyArrow table contains more columns: {', '.join(sorted(additional_names))}. "
2631+
"Update the schema first (hint, use union_by_name)."
26282632
) from e
2633+
2634+
# If the file has explicit field IDs, validate they match the table schema exactly
2635+
if has_field_ids:
2636+
# Build mappings for both schemas (including nested fields)
2637+
requested_id_to_name = requested_schema._lazy_id_to_name
2638+
provided_id_to_name = provided_schema._lazy_id_to_name
2639+
2640+
# Also build reverse mapping: path -> field_id for the table
2641+
requested_name_to_id = {path: field_id for field_id, path in requested_id_to_name.items()}
2642+
2643+
# Check that all field paths in the file have matching field IDs in the table
2644+
mismatched_fields = []
2645+
for field_id, provided_path in provided_id_to_name.items():
2646+
# Check if this path exists in the table schema
2647+
expected_field_id = requested_name_to_id.get(provided_path)
2648+
if expected_field_id is None:
2649+
# The file has a field path that doesn't exist in the table at all
2650+
# This will be caught by _check_schema_compatible later, so skip it here
2651+
continue
2652+
elif expected_field_id != field_id:
2653+
# Same path, different field ID - this is the critical error
2654+
mismatched_fields.append(
2655+
f"'{provided_path}': table expects field_id={expected_field_id}, "
2656+
f"file has field_id={field_id}"
2657+
)
2658+
2659+
if mismatched_fields:
2660+
raise ValueError(
2661+
"Field IDs in Parquet file do not match table schema. When field IDs are explicitly set in the "
2662+
"Parquet metadata, they must match the Iceberg table schema.\nMismatched fields:\n"
2663+
+ "\n".join(f" - {field}" for field in mismatched_fields)
2664+
)
2665+
26292666
_check_schema_compatible(requested_schema, provided_schema)
26302667

26312668

tests/integration/test_add_files.py

Lines changed: 121 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
LongType,
4848
NestedField,
4949
StringType,
50+
StructType,
5051
TimestampType,
5152
TimestamptzType,
5253
)
@@ -216,23 +217,136 @@ def test_add_files_to_unpartitioned_table_raises_file_not_found(
216217

217218

218219
@pytest.mark.integration
219-
def test_add_files_to_unpartitioned_table_raises_has_field_ids(
220+
def test_add_files_to_unpartitioned_table_with_field_ids(
220221
spark: SparkSession, session_catalog: Catalog, format_version: int
221222
) -> None:
222-
identifier = f"default.unpartitioned_raises_field_ids_v{format_version}"
223+
identifier = f"default.unpartitioned_with_field_ids_v{format_version}"
223224
tbl = _create_table(session_catalog, identifier, format_version)
224225

225-
file_paths = [f"s3://warehouse/default/unpartitioned_raises_field_ids/v{format_version}/test-{i}.parquet" for i in range(5)]
226-
# write parquet files
226+
file_paths = [
227+
f"s3://warehouse/default/unpartitioned_with_field_ids/v{format_version}/test-{i}.parquet" for i in range(5)
228+
]
229+
# write parquet files with field IDs matching the table schema
227230
for file_path in file_paths:
228231
fo = tbl.io.new_output(file_path)
229232
with fo.create(overwrite=True) as fos:
230233
with pq.ParquetWriter(fos, schema=ARROW_SCHEMA_WITH_IDS) as writer:
231234
writer.write_table(ARROW_TABLE_WITH_IDS)
232235

233-
# add the parquet files as data files
234-
with pytest.raises(NotImplementedError):
235-
tbl.add_files(file_paths=file_paths)
236+
# add the parquet files as data files - should succeed now that field IDs are supported
237+
tbl.add_files(file_paths=file_paths)
238+
239+
# NameMapping should still be set even though files have field IDs
240+
assert tbl.name_mapping() is not None
241+
242+
# Verify files were added successfully
243+
rows = spark.sql(
244+
f"""
245+
SELECT added_data_files_count, existing_data_files_count, deleted_data_files_count
246+
FROM {identifier}.all_manifests
247+
"""
248+
).collect()
249+
250+
assert [row.added_data_files_count for row in rows] == [5]
251+
assert [row.existing_data_files_count for row in rows] == [0]
252+
assert [row.deleted_data_files_count for row in rows] == [0]
253+
254+
# Verify data can be read back correctly
255+
df = spark.table(identifier).toPandas()
256+
assert len(df) == 5
257+
assert all(df["foo"] == True) # noqa: E712
258+
assert all(df["bar"] == "bar_string")
259+
assert all(df["baz"] == 123)
260+
assert all(df["qux"] == date(2024, 3, 7))
261+
262+
263+
@pytest.mark.integration
264+
def test_add_files_with_mismatched_field_ids(
265+
spark: SparkSession, session_catalog: Catalog, format_version: int
266+
) -> None:
267+
identifier = f"default.unpartitioned_mismatched_field_ids_v{format_version}"
268+
tbl = _create_table(session_catalog, identifier, format_version)
269+
270+
# Create schema with field IDs that don't match the table schema
271+
# Table has: 1=foo, 2=bar, 3=baz, 4=qux (assigned by catalog)
272+
# This file has: 1=foo, 2=bar, 5=baz, 6=qux (wrong IDs for baz and qux)
273+
mismatched_schema = pa.schema(
274+
[
275+
pa.field("foo", pa.bool_(), nullable=False, metadata={"PARQUET:field_id": "1"}),
276+
pa.field("bar", pa.string(), nullable=False, metadata={"PARQUET:field_id": "2"}),
277+
pa.field("baz", pa.int32(), nullable=False, metadata={"PARQUET:field_id": "5"}), # Wrong: should be 3
278+
pa.field("qux", pa.date32(), nullable=False, metadata={"PARQUET:field_id": "6"}), # Wrong: should be 4
279+
]
280+
)
281+
282+
file_path = f"s3://warehouse/default/unpartitioned_mismatched_field_ids/v{format_version}/test.parquet"
283+
fo = tbl.io.new_output(file_path)
284+
with fo.create(overwrite=True) as fos:
285+
with pq.ParquetWriter(fos, schema=mismatched_schema) as writer:
286+
writer.write_table(ARROW_TABLE_WITH_IDS)
287+
288+
# Adding files with mismatched field IDs should fail
289+
with pytest.raises(ValueError, match="Field IDs in Parquet file do not match table schema"):
290+
tbl.add_files(file_paths=[file_path])
291+
292+
293+
@pytest.mark.integration
294+
def test_add_files_with_mismatched_nested_field_ids(
295+
spark: SparkSession, session_catalog: Catalog, format_version: int
296+
) -> None:
297+
"""Test that files with mismatched nested (struct) field IDs are rejected."""
298+
identifier = f"default.nested_mismatched_field_ids_v{format_version}"
299+
300+
# Create a table with a nested struct field
301+
try:
302+
session_catalog.drop_table(identifier=identifier)
303+
except NoSuchTableError:
304+
pass
305+
306+
nested_schema = Schema(
307+
NestedField(1, "id", IntegerType(), required=False),
308+
NestedField(2, "user", StructType(
309+
NestedField(3, "name", StringType(), required=False),
310+
NestedField(4, "age", IntegerType(), required=False),
311+
), required=False),
312+
schema_id=0
313+
)
314+
315+
tbl = session_catalog.create_table(
316+
identifier=identifier,
317+
schema=nested_schema,
318+
properties={"format-version": str(format_version)},
319+
)
320+
321+
# Create PyArrow schema with MISMATCHED nested field IDs
322+
# The table expects: id=1, user=2, user.name=3, user.age=4
323+
# This file has: id=1, user=2, user.name=99, user.age=100 (wrong nested IDs)
324+
pa_schema_mismatched = pa.schema([
325+
pa.field('id', pa.int32(), nullable=True, metadata={b'PARQUET:field_id': b'1'}),
326+
pa.field('user', pa.struct([
327+
pa.field('name', pa.string(), nullable=True, metadata={b'PARQUET:field_id': b'99'}), # Wrong!
328+
pa.field('age', pa.int32(), nullable=True, metadata={b'PARQUET:field_id': b'100'}), # Wrong!
329+
]), nullable=True, metadata={b'PARQUET:field_id': b'2'}),
330+
])
331+
332+
pa_table = pa.table({
333+
'id': pa.array([1, 2, 3], type=pa.int32()),
334+
'user': pa.array([
335+
{'name': 'Alice', 'age': 30},
336+
{'name': 'Bob', 'age': 25},
337+
{'name': 'Charlie', 'age': 35},
338+
], type=pa_schema_mismatched.field('user').type),
339+
}, schema=pa_schema_mismatched)
340+
341+
file_path = f"s3://warehouse/default/nested_mismatched_field_ids/v{format_version}/test.parquet"
342+
fo = tbl.io.new_output(file_path)
343+
with fo.create(overwrite=True) as fos:
344+
with pq.ParquetWriter(fos, schema=pa_schema_mismatched) as writer:
345+
writer.write_table(pa_table)
346+
347+
# Adding files with mismatched nested field IDs should fail
348+
with pytest.raises(ValueError, match="Field IDs in Parquet file do not match table schema"):
349+
tbl.add_files(file_paths=[file_path])
236350

237351

238352
@pytest.mark.integration

0 commit comments

Comments
 (0)