From b12bbc306823c75e0d888f5a2ff314d13816e966 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov <32552679+zvonand@users.noreply.github.com> Date: Mon, 27 Apr 2026 11:08:33 +0200 Subject: [PATCH 1/3] Cherry-pick of https://github.com/Altinity/ClickHouse/pull/1631 with unresolved conflict markers (resolution in next commit) --- Original cherry-pick message follows: Merge pull request #1631 from Altinity/arthurpassos-patch-11 Fix condition for using parquet metadata cache # Conflicts: # src/Storages/ObjectStorage/StorageObjectStorageSource.cpp # tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py --- .../StorageObjectStorageSource.cpp | 4 ++++ ...test_read_constant_columns_optimization.py | 21 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index ab31ec4db751..03b55dec237f 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -884,7 +884,11 @@ StorageObjectStorageSource::ReaderHolder StorageObjectStorageSource::createReade InputFormatPtr input_format; if (context_->getSettingsRef()[Setting::use_parquet_metadata_cache] && use_native_reader_v3 +<<<<<<< HEAD && (object_info->getFileFormat().value_or(configuration->format) == "Parquet") +======= + && (Poco::toLower(object_info->getFileFormat().value_or(configuration->getFormat())) == "parquet") +>>>>>>> ba1e4f5f6dd (Merge pull request #1631 from Altinity/arthurpassos-patch-11) && !object_info->getObjectMetadata()->etag.empty()) { const std::optional object_with_metadata = object_info->relative_path_with_metadata; diff --git a/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py b/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py index d2f016a04b58..481b588d3714 100644 --- a/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py +++ b/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py @@ -169,6 +169,18 @@ def execute_spark_query(query: str): for replica in started_cluster_iceberg_with_spark.instances.values(): replica.query("SYSTEM FLUSH LOGS") + # Number of object-get requests per data file that are NOT served from caches + # after the warmup query above. The parquet metadata cache (enabled by default) + # caches the parquet footer keyed by the object's etag; the warmup query then + # populates it, so any subsequent read of the same file skips one object-get + # (the footer read). However, AzureObjectStorage::getObjectMetadata does NOT + # populate etag, so the cache guard `!etag.empty()` in + # StorageObjectStorageSource::createReader always fails for Azure, and the + # cache path is never taken there. As a result the multiplier is: + # S3: 2 (footer served from cache, data-only gets remain) + # Azure: 3 (cache never engaged, footer + data gets) + per_file_gets = 2 if storage_type == "s3" else 3 + def check_events(query_id, event, is_cluster, expected): res = instance.query( f""" @@ -185,9 +197,16 @@ def check_events(query_id, event, is_cluster, expected): """) # Weird, but looks like ReadFileMetadata does not used local file cache in 26.1 # metadata.json always downloaded in 26.1, once per query or subquery +<<<<<<< HEAD # In 25.8 count was equal to expected, in 26.1 it is expected * 2 + 1 for Local case # expected * 2 + 4 for Cluster case, because each subquery loads metadata.json assert int(res) == expected * 2 + (4 if is_cluster else 1) +======= + # In 25.8 count was equal to expected, in 26.1 it is expected * N + 1 for Local + # case and expected * N + 4 for Cluster case (each subquery loads metadata.json). + # N = per_file_gets (see comment above the function). + assert int(res) == expected * per_file_gets + (4 if is_cluster else 1) +>>>>>>> ba1e4f5f6dd (Merge pull request #1631 from Altinity/arthurpassos-patch-11) event = "S3GetObject" if storage_type == "s3" else "AzureGetObject" @@ -212,4 +231,4 @@ def compare_selects(query): compare_selects(f"SELECT _path,* FROM {creation_expression} ORDER BY ALL") compare_selects(f"SELECT _path,* FROM {creation_expression} WHERE name_old='vasily' ORDER BY ALL") - compare_selects(f"SELECT _path,* FROM {creation_expression} WHERE ((tag + length(name_old)) % 2 = 1) ORDER BY ALL") \ No newline at end of file + compare_selects(f"SELECT _path,* FROM {creation_expression} WHERE ((tag + length(name_old)) % 2 = 1) ORDER BY ALL") From 304b2986adfa4a7d435ed7e91a618d3183ec2a84 Mon Sep 17 00:00:00 2001 From: Andrey Zvonov Date: Thu, 7 May 2026 13:51:18 +0200 Subject: [PATCH 2/3] Resolve conflicts in cherry-pick of #1631 --- src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 6 +----- .../test_read_constant_columns_optimization.py | 6 ------ 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 03b55dec237f..28a8b36d1a1d 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -884,11 +884,7 @@ StorageObjectStorageSource::ReaderHolder StorageObjectStorageSource::createReade InputFormatPtr input_format; if (context_->getSettingsRef()[Setting::use_parquet_metadata_cache] && use_native_reader_v3 -<<<<<<< HEAD - && (object_info->getFileFormat().value_or(configuration->format) == "Parquet") -======= - && (Poco::toLower(object_info->getFileFormat().value_or(configuration->getFormat())) == "parquet") ->>>>>>> ba1e4f5f6dd (Merge pull request #1631 from Altinity/arthurpassos-patch-11) + && (Poco::toLower(object_info->getFileFormat().value_or(configuration->format)) == "parquet") && !object_info->getObjectMetadata()->etag.empty()) { const std::optional object_with_metadata = object_info->relative_path_with_metadata; diff --git a/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py b/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py index 481b588d3714..814352d566be 100644 --- a/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py +++ b/tests/integration/test_storage_iceberg_with_spark/test_read_constant_columns_optimization.py @@ -197,16 +197,10 @@ def check_events(query_id, event, is_cluster, expected): """) # Weird, but looks like ReadFileMetadata does not used local file cache in 26.1 # metadata.json always downloaded in 26.1, once per query or subquery -<<<<<<< HEAD - # In 25.8 count was equal to expected, in 26.1 it is expected * 2 + 1 for Local case - # expected * 2 + 4 for Cluster case, because each subquery loads metadata.json - assert int(res) == expected * 2 + (4 if is_cluster else 1) -======= # In 25.8 count was equal to expected, in 26.1 it is expected * N + 1 for Local # case and expected * N + 4 for Cluster case (each subquery loads metadata.json). # N = per_file_gets (see comment above the function). assert int(res) == expected * per_file_gets + (4 if is_cluster else 1) ->>>>>>> ba1e4f5f6dd (Merge pull request #1631 from Altinity/arthurpassos-patch-11) event = "S3GetObject" if storage_type == "s3" else "AzureGetObject" From 4263d1491f2adbec8f7b9632e00570c5e13c95c1 Mon Sep 17 00:00:00 2001 From: Arthur Passos Date: Wed, 13 May 2026 09:19:15 -0300 Subject: [PATCH 3/3] Fix conditional check for Parquet file format --- src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index a69efbcf03ae..aa8f364800c8 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -884,7 +884,7 @@ StorageObjectStorageSource::ReaderHolder StorageObjectStorageSource::createReade InputFormatPtr input_format; if (context_->getSettingsRef()[Setting::use_parquet_metadata_cache] && use_native_reader_v3 - && (Poco::toLower(object_info->getFileFormat().value_or(configuration->getFormat())) == "Parquet") + && (Poco::toLower(object_info->getFileFormat().value_or(configuration->getFormat())) == "parquet") && !object_info->getObjectMetadata()->etag.empty()) { const std::optional object_with_metadata = object_info->relative_path_with_metadata;