From 5fa021c2eb452478971d9ce894f0300b60270c01 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 10 Feb 2026 10:32:12 +0000 Subject: [PATCH 01/53] Backport #96479 to 26.1: Fix null pointer dereference in `DataTypeFunction::updateHashImpl` --- src/DataTypes/DataTypeFunction.cpp | 8 +++++++- .../03913_data_type_function_null_arg_hash.reference | 0 .../03913_data_type_function_null_arg_hash.sql | 3 +++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03913_data_type_function_null_arg_hash.reference create mode 100644 tests/queries/0_stateless/03913_data_type_function_null_arg_hash.sql diff --git a/src/DataTypes/DataTypeFunction.cpp b/src/DataTypes/DataTypeFunction.cpp index 51eb20f023b5..6c2386610cfd 100644 --- a/src/DataTypes/DataTypeFunction.cpp +++ b/src/DataTypes/DataTypeFunction.cpp @@ -36,10 +36,16 @@ bool DataTypeFunction::equals(const IDataType & rhs) const void DataTypeFunction::updateHashImpl(SipHash & hash) const { + /// Argument types and return type can be nullptr when the lambda is not yet resolved. hash.update(argument_types.size()); for (const auto & arg_type : argument_types) - arg_type->updateHash(hash); + { + hash.update(arg_type != nullptr); + if (arg_type) + arg_type->updateHash(hash); + } + hash.update(return_type != nullptr); if (return_type) return_type->updateHash(hash); } diff --git a/tests/queries/0_stateless/03913_data_type_function_null_arg_hash.reference b/tests/queries/0_stateless/03913_data_type_function_null_arg_hash.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/03913_data_type_function_null_arg_hash.sql b/tests/queries/0_stateless/03913_data_type_function_null_arg_hash.sql new file mode 100644 index 000000000000..956fce12ae8d --- /dev/null +++ b/tests/queries/0_stateless/03913_data_type_function_null_arg_hash.sql @@ -0,0 +1,3 @@ +-- Regression test: DataTypeFunction::updateHashImpl must handle null argument types +-- https://s3.amazonaws.com/clickhouse-test-reports/json.html?REF=master&sha=b9e68f4b9b0b33c7db43b00afb3eff4ff2050694&name_0=MasterCI&name_1=AST%20fuzzer%20%28amd_ubsan%29 +SELECT arrayFold((acc, x) -> plus(acc, toString(NULL, toLowCardinality(toUInt128(4)), materialize(4), 'aaaa', materialize(4), 4, 4, 1), x), range(number), ((acc, x) -> if(x % 2, arrayPushFront(acc, x), arrayPushBack(acc, x)))) FROM system.numbers LIMIT 0; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } From 88345b3bcbf20ac7af8b334915ab137d24e5c3f4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 16 Feb 2026 21:19:13 +0000 Subject: [PATCH 02/53] Backport #96995 to 26.1: Fix missing move of query_metadata_cache in BlockIO::operator= --- src/QueryPipeline/BlockIO.cpp | 22 ++-- src/QueryPipeline/BlockIO.h | 2 + src/Server/TCPHandler.cpp | 3 +- ...lockio_move_query_metadata_cache.reference | 1 + ...03917_blockio_move_query_metadata_cache.sh | 108 ++++++++++++++++++ 5 files changed, 126 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.reference create mode 100755 tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.sh diff --git a/src/QueryPipeline/BlockIO.cpp b/src/QueryPipeline/BlockIO.cpp index 7783ee27ba07..4a6c0ca5481f 100644 --- a/src/QueryPipeline/BlockIO.cpp +++ b/src/QueryPipeline/BlockIO.cpp @@ -4,6 +4,15 @@ namespace DB { +void BlockIO::resetPipeline(bool cancel) +{ + if (cancel) + pipeline.cancel(); + /// May use storage that is protected by pipeline, so should be destroyed first + query_metadata_cache.reset(); + pipeline.reset(); +} + void BlockIO::reset() { /** process_list_entries should be destroyed after in, after out and after pipeline, @@ -17,7 +26,7 @@ void BlockIO::reset() /// TODO simplify it all releaseQuerySlot(); - pipeline.reset(); + resetPipeline(/*cancel=*/false); process_list_entries.clear(); /// TODO Do we need also reset callbacks? In which order? @@ -32,6 +41,7 @@ BlockIO & BlockIO::operator= (BlockIO && rhs) noexcept reset(); process_list_entries = std::move(rhs.process_list_entries); + query_metadata_cache = std::move(rhs.query_metadata_cache); pipeline = std::move(rhs.pipeline); finalize_query_pipeline = std::move(rhs.finalize_query_pipeline); @@ -60,9 +70,7 @@ void BlockIO::onFinish(std::chrono::system_clock::time_point finish_time) } } else - { - pipeline.reset(); - } + resetPipeline(/*cancel=*/false); } void BlockIO::onException(bool log_as_error) @@ -73,15 +81,13 @@ void BlockIO::onException(bool log_as_error) for (const auto & callback : exception_callbacks) callback(log_as_error); - pipeline.cancel(); - pipeline.reset(); + resetPipeline(/*cancel=*/true); } void BlockIO::onCancelOrConnectionLoss() { releaseQuerySlot(); - pipeline.cancel(); - pipeline.reset(); + resetPipeline(/*cancel=*/true); } void BlockIO::setAllDataSent() const diff --git a/src/QueryPipeline/BlockIO.h b/src/QueryPipeline/BlockIO.h index 056ba14f5396..41cddd969ab0 100644 --- a/src/QueryPipeline/BlockIO.h +++ b/src/QueryPipeline/BlockIO.h @@ -88,6 +88,8 @@ struct BlockIO /// Release query slot early to allow client to reuse it for his next query. void releaseQuerySlot() const; + void resetPipeline(bool cancel); + private: void reset(); }; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 05fdfdd997b7..581b8f84c0a2 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1322,8 +1322,7 @@ void TCPHandler::processInsertQuery(QueryState & state, CurrentThread::QueryScop if (result.status == AsynchronousInsertQueue::PushResult::OK) { /// Reset pipeline because it may hold write lock for some storages. - state.io.pipeline.cancel(); - state.io.pipeline.reset(); + state.io.resetPipeline(/*cancel=*/true); if (settings[Setting::wait_for_async_insert]) { size_t timeout_ms = settings[Setting::wait_for_async_insert_timeout].totalMilliseconds(); diff --git a/tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.reference b/tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.reference new file mode 100644 index 000000000000..d00491fd7e5b --- /dev/null +++ b/tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.sh b/tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.sh new file mode 100755 index 000000000000..99652e516b70 --- /dev/null +++ b/tests/queries/0_stateless/03917_blockio_move_query_metadata_cache.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +# Tags: long + +# Regression test for https://github.com/ClickHouse/ClickHouse/issues/95742 +# +# BlockIO::operator= was not moving query_metadata_cache, causing premature +# destruction of cached StorageSnapshots. When combined with concurrent +# DROP TABLE or DETACH/ATTACH, the storage could be freed while parts still +# reference it, leading to SEGFAULT in clearCaches. +# +# The MSan trace shows the storage freed by DatabaseCatalog::dropTablesParallel +# on a background thread while TCPHandler's pipeline is still being destroyed +# via BlockIO::onException, so we exercise both DROP and DETACH paths. + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +TABLE="test_cache_race_${CLICKHOUSE_DATABASE}" + +function create_and_fill() +{ + $CLICKHOUSE_CLIENT --query " + CREATE TABLE IF NOT EXISTS ${TABLE} (key UInt64, value String) + ENGINE = MergeTree ORDER BY key + " + # Create multiple parts so snapshots have non-trivial data + for i in $(seq 1 10); do + $CLICKHOUSE_CLIENT --query "INSERT INTO ${TABLE} SELECT number, toString(number) FROM numbers($((i * 100)), 100)" 2>/dev/null + done +} + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ${TABLE}" +create_and_fill + +function mutation_thread() +{ + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do + # ALTER TABLE ... UPDATE goes through MutationsInterpreter::validate() + # which caches a StorageSnapshot in QueryMetadataCache, then destroys + # the validation pipeline. The cache entry becomes the only ref. + $CLICKHOUSE_CLIENT --query \ + "ALTER TABLE ${TABLE} UPDATE value = 'x' WHERE key > $RANDOM SETTINGS mutations_sync = 0" \ + 2>/dev/null + sleep 0.0$RANDOM + done +} + +function detach_attach_thread() +{ + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do + # DETACH removes the storage from the database, dropping its StoragePtr. + $CLICKHOUSE_CLIENT --query "DETACH TABLE ${TABLE}" 2>/dev/null + sleep 0.0$RANDOM + $CLICKHOUSE_CLIENT --query "ATTACH TABLE ${TABLE}" 2>/dev/null + sleep 0.0$RANDOM + done +} + +function drop_create_thread() +{ + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do + # DROP TABLE triggers DatabaseCatalog::dropTablesParallel on a background + # thread, which frees the storage. If a concurrent query's pipeline still + # holds parts referencing the storage via bare pointers, clearCaches will + # access freed memory. + $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ${TABLE}" 2>/dev/null + sleep 0.0$RANDOM + create_and_fill + sleep 0.0$RANDOM + done +} + +function select_thread() +{ + local TIMELIMIT=$((SECONDS+$1)) + while [ $SECONDS -lt "$TIMELIMIT" ]; do + # Subquery on the same table exercises snapshot cache sharing: + # both the outer and inner query hit getStorageSnapshot, and the + # second call returns the cached snapshot. + $CLICKHOUSE_CLIENT --query \ + "SELECT count() FROM ${TABLE} WHERE value IN (SELECT value FROM ${TABLE} WHERE key > $RANDOM)" \ + >/dev/null 2>&1 + sleep 0.0$RANDOM + done +} + +TIMEOUT=15 + +mutation_thread $TIMEOUT & +mutation_thread $TIMEOUT & +select_thread $TIMEOUT & +select_thread $TIMEOUT & +detach_attach_thread $TIMEOUT & +drop_create_thread $TIMEOUT & + +wait + +# Re-attach in case the table was left detached +$CLICKHOUSE_CLIENT --query "ATTACH TABLE ${TABLE}" 2>/dev/null + +# Verify the server is still alive (the original bug caused SEGFAULT) +$CLICKHOUSE_CLIENT --query "SELECT 1" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS ${TABLE}" From 01fa75352e94fed34dc7e7a2ff326d10cab0c1eb Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 18 Feb 2026 07:34:12 +0000 Subject: [PATCH 03/53] Backport #97239 to 26.1: Fix exception when arrayJoin in WHERE is pushed below JOIN --- .../partialJoinFilterPushDown.cpp | 9 +++ ...join_filter_push_down_cross_join.reference | 22 ++++++++ ...array_join_filter_push_down_cross_join.sql | 55 +++++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.reference create mode 100644 tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.sql diff --git a/src/Processors/QueryPlan/Optimizations/partialJoinFilterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/partialJoinFilterPushDown.cpp index ed361764ddd6..04ad32e79840 100644 --- a/src/Processors/QueryPlan/Optimizations/partialJoinFilterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/partialJoinFilterPushDown.cpp @@ -194,6 +194,15 @@ std::optional tryToExtractPartialPredicate( full_dag.addOrReplaceInOutputs(*predicate_node); full_dag.removeUnusedActions(); + /// removeUnusedActions unconditionally keeps ARRAY_JOIN nodes because they change + /// the number of rows. This can bring back INPUT nodes from the other side of the + /// JOIN that are not available in the target stream. Since extractPartialPredicate + /// already correctly rejects predicates depending on ARRAY_JOIN (via + /// onlyDependsOnAvailableColumns), any ARRAY_JOIN surviving here is an artifact + /// and pushing it below a JOIN would cause duplicate rows. + if (full_dag.hasArrayJoin()) + return {}; + return full_dag; } diff --git a/tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.reference b/tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.reference new file mode 100644 index 000000000000..8cc07445e2cf --- /dev/null +++ b/tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.reference @@ -0,0 +1,22 @@ + +0 0 0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +3 +1 +1 +4 +1 +1 +1 +3 diff --git a/tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.sql b/tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.sql new file mode 100644 index 000000000000..a4e9b1e36795 --- /dev/null +++ b/tests/queries/0_stateless/03832_array_join_filter_push_down_cross_join.sql @@ -0,0 +1,55 @@ +-- The partial filter pushdown optimization (tryToExtractPartialPredicate) could +-- push a filter containing ARRAY_JOIN nodes below a JOIN. removeUnusedActions +-- unconditionally keeps ARRAY_JOIN nodes (they change row count), which pulls +-- in INPUT nodes from the other side of the JOIN that are not available in the +-- target stream, causing a LOGICAL_ERROR: +-- "In Filter cannot be more inputs in the DAG than columns in the input header" + +SET enable_analyzer = 1; + +-- Original fuzzer-found query +SELECT DISTINCT anyLastDistinct(r.number), l.number, grouping(l.number) +FROM numbers(1) AS l INNER JOIN numbers(2, assumeNotNull(isNull(13))) AS r +ON not(l.number < r.number) +WHERE and(equals(toNullable(2), arrayJoin([13, 13, 13, isZeroOrNull(materialize(13)), *, toLowCardinality(materialize(13)), 13, 13])), 13, 13, 13) +GROUP BY ALL WITH TOTALS; + +-- Simplified reproducer: cross join with arrayJoin([*, 1]) in WHERE +SELECT 1 FROM numbers(1) AS l, numbers(2) AS r WHERE and(arrayJoin([*, 1]), 1); + +-- arrayJoin referencing both sides in WHERE +SELECT 1 FROM numbers(1) AS l, numbers(2) AS r WHERE arrayJoin([l.number, r.number, 1]); + +-- arrayJoin with expression combining both sides +SELECT 1 FROM numbers(1) AS l, numbers(2) AS r WHERE and(arrayJoin([l.number + r.number]), 1); + +-- arrayJoin with additional single-side filter +SELECT 1 FROM numbers(1) AS l, numbers(2) AS r WHERE and(arrayJoin([*, 1]), l.number = 0); + +-- arrayJoin with product and constant in WHERE +SELECT 1 FROM numbers(1) AS l, numbers(2) AS r WHERE and(arrayJoin([l.number * r.number, 1]), 1, 1); + +-- count with arrayJoin across both sides +SELECT count() FROM numbers(1) AS l, numbers(2) AS r WHERE arrayJoin([*, 1]); + +-- Three-way cross join with arrayJoin from all sides +SELECT 1 FROM numbers(1) AS l, numbers(1) AS r, numbers(1) AS t WHERE and(arrayJoin([l.number, r.number, t.number, 1]), 1); + +-- INNER JOIN with condition and arrayJoin in WHERE +SELECT 1 FROM numbers(1) AS l INNER JOIN numbers(2) AS r ON l.number = r.number WHERE arrayJoin([l.number, r.number]); + +-- LEFT JOIN with arrayJoin referencing left side +SELECT 1 FROM numbers(1) AS l LEFT JOIN numbers(2) AS r ON l.number = r.number WHERE arrayJoin([l.number, 1]); + +-- Cross join with arrayJoin and single-side filters +SELECT 1 FROM numbers(3) AS l, numbers(3) AS r WHERE and(arrayJoin([l.number, r.number, 1]), l.number > 0) FORMAT Null; +SELECT 1 FROM numbers(3) AS l, numbers(3) AS r WHERE and(arrayJoin([l.number, r.number, 1]), r.number > 0) FORMAT Null; + +-- Subquery with arrayJoin from both sides +SELECT sum(x) FROM (SELECT arrayJoin([l.number, r.number]) AS x FROM numbers(2) AS l, numbers(2) AS r WHERE 1); + +-- Same tests with legacy join step +SET query_plan_use_logical_join_step = 0; + +SELECT 1 FROM numbers(1) AS l, numbers(2) AS r WHERE and(arrayJoin([*, 1]), 1); +SELECT count() FROM numbers(1) AS l, numbers(2) AS r WHERE arrayJoin([*, 1]); From dd8b3a643291f3bb29a726db61af25db1fd5de38 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 19 Feb 2026 13:43:34 +0000 Subject: [PATCH 04/53] Backport #97336 to 26.1: Fix use-after-free in StorageKeeperMap backup --- src/Storages/StorageKeeperMap.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 1510ef96b3e1..54a77c0df110 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -1162,10 +1162,11 @@ void StorageKeeperMap::backupData(BackupEntriesCollector & backup_entries_collec auto tmp_data = std::make_shared(backup_entries_collector.getContext()->getTempDataOnDisk(), tmp_data_settings); + auto self = std::static_pointer_cast(shared_from_this()); auto with_retries = std::make_shared ( getLogger(fmt::format("StorageKeeperMapBackup ({})", getStorageID().getNameForLogs())), - [&] { return getClient(); }, + [self] { return self->getClient(); }, BackupKeeperSettings(backup_entries_collector.getContext()), backup_entries_collector.getContext()->getProcessListElement() ); From 6e9bd59112ab01c92b33e4419815197ba489c7f5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 22 Feb 2026 13:32:23 +0000 Subject: [PATCH 05/53] Backport #97520 to 26.1: fix a possible use after free in StorageKafka2::activate() --- src/Storages/Kafka/StorageKafka2.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Storages/Kafka/StorageKafka2.cpp b/src/Storages/Kafka/StorageKafka2.cpp index 33cc7e35612a..edbd811d0fa0 100644 --- a/src/Storages/Kafka/StorageKafka2.cpp +++ b/src/Storages/Kafka/StorageKafka2.cpp @@ -210,6 +210,11 @@ void StorageKafka2::partialShutdown() task->holder->deactivate(); } is_active = false; + /// Reset the active node holder while the old ZooKeeper session is still alive (even if expired). + /// EphemeralNodeHolder stores a raw ZooKeeper reference, so resetting it here prevents a + /// use-after-free: setZooKeeper() called afterwards may free the old session, and the holder's + /// destructor would then access a dangling reference when checking zookeeper.expired(). + replica_is_active_node = nullptr; } bool StorageKafka2::activate() From 4abf78930b41c284be2d466fd6437e7779e2e7af Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 26 Feb 2026 04:47:56 +0000 Subject: [PATCH 06/53] Backport #97826 to 26.1: Fix crash with mapContainsKey/mapContainsKeyLike --- .../MergeTreeIndexBloomFilterText.cpp | 4 +- ...88_map_contains_key_like_tokenbf.reference | 18 +++++ .../03988_map_contains_key_like_tokenbf.sql | 65 +++++++++++++++++++ 3 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.reference create mode 100644 tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.sql diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp index 9a1dbe7843a7..393e23d873ea 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp @@ -499,7 +499,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( { if (function_name == "has" || function_name == "mapContainsKey" || function_name == "mapContains") { - out.key_column = *key_index; + out.key_column = *map_key_index; out.function = RPNElement::FUNCTION_HAS; out.bloom_filter = std::make_unique(params); auto & value = const_value.safeGet(); @@ -508,7 +508,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( } if (function_name == "mapContainsKeyLike") { - out.key_column = *key_index; + out.key_column = *map_key_index; out.function = RPNElement::FUNCTION_HAS; out.bloom_filter = std::make_unique(params); auto & value = const_value.safeGet(); diff --git a/tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.reference b/tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.reference new file mode 100644 index 000000000000..599d91218700 --- /dev/null +++ b/tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.reference @@ -0,0 +1,18 @@ +1 +0 +1 +1 +1 +1 +1 +1 +0 +hostname +Verify skip index is used +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.sql b/tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.sql new file mode 100644 index 000000000000..d14c3fc91371 --- /dev/null +++ b/tests/queries/0_stateless/03988_map_contains_key_like_tokenbf.sql @@ -0,0 +1,65 @@ +-- Test for issue https://github.com/ClickHouse/ClickHouse/issues/97792 + +SET parallel_replicas_local_plan = 1; + +DROP TABLE IF EXISTS t_map_tokenbf; + +CREATE TABLE t_map_tokenbf +( + metadata Map(String, String), + created_at DateTime64(3), + INDEX index_metadata_keys mapKeys(metadata) TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 1, + INDEX index_metadata_vals mapValues(metadata) TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY created_at; + +INSERT INTO t_map_tokenbf VALUES ({'hostname': 'myhost', 'env': 'prod'}, now()); + +SELECT count() FROM t_map_tokenbf WHERE mapContainsKeyLike(metadata, '%host%'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE mapContainsKeyLike(metadata, '%bad%'); -- 0 +SELECT count() FROM t_map_tokenbf WHERE mapContains(metadata, 'hostname'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE mapContainsKey(metadata, 'env'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE has(mapKeys(metadata), 'env'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE has(metadata, 'hostname'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE mapContainsValue(metadata, 'prod'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE mapContainsValueLike(metadata, '%host%'); -- 1 +SELECT count() FROM t_map_tokenbf WHERE mapContainsValueLike(metadata, '%random%'); -- 0 + +SELECT arrayJoin(mapKeys(mapExtractKeyLike(metadata, '%host%'))) as extracted_metadata +FROM t_map_tokenbf +WHERE mapContainsKeyLike(metadata, '%host%') +GROUP BY extracted_metadata; + +-- Verify that skip index was used - all should return 1 +SELECT 'Verify skip index is used'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE mapContainsKeyLike(metadata, '%host%') + ) WHERE explain LIKE '%index_metadata%'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE mapContains(metadata, 'hostname') + ) WHERE explain LIKE '%index_metadata%'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE mapContainsKey(metadata, 'env') + ) WHERE explain LIKE '%index_metadata%'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE has(mapKeys(metadata), 'env') + ) WHERE explain LIKE '%index_metadata%'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE has(metadata, 'hostname') + ) WHERE explain LIKE '%index_metadata%'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE mapContainsValue(metadata, 'prod') + ) WHERE explain LIKE '%index_metadata%'; + +SELECT COUNT(*) FROM ( + EXPLAIN indexes=1 SELECT count() FROM t_map_tokenbf WHERE mapContainsValueLike(metadata, '%random%') + ) WHERE explain LIKE '%index_metadata%'; + +DROP TABLE t_map_tokenbf; From 8597b3e87cfd5bdc8ad1b9add13bb7de147ed740 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 26 Feb 2026 06:37:52 +0000 Subject: [PATCH 07/53] Backport #97546 to 26.1: Remove incorrect replaceRegexpOne to extract rewrite; fix exception with group_by_use_nulls --- .../Passes/RegexpFunctionRewritePass.cpp | 78 +------------------ ...regexp_rewrite_nullable_group_by.reference | 4 + ...03389_regexp_rewrite_nullable_group_by.sql | 4 + ...ptimize_rewrite_regexp_functions.reference | 71 ++--------------- ...3538_optimize_rewrite_regexp_functions.sql | 55 +------------ 5 files changed, 20 insertions(+), 192 deletions(-) create mode 100644 tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.reference create mode 100644 tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.sql diff --git a/src/Analyzer/Passes/RegexpFunctionRewritePass.cpp b/src/Analyzer/Passes/RegexpFunctionRewritePass.cpp index 860c3fb71d29..e898105c69f3 100644 --- a/src/Analyzer/Passes/RegexpFunctionRewritePass.cpp +++ b/src/Analyzer/Passes/RegexpFunctionRewritePass.cpp @@ -1,13 +1,12 @@ #include -#include -#include #include #include #include #include #include #include +#include #include #include #include @@ -35,28 +34,13 @@ class RegexpFunctionRewriteVisitor : public InDepthQueryTreeVisitorWithContextas(); - if (!function_node || !function_node->isOrdinaryFunction() || !isString(function_node->getResultType())) + if (!function_node || !function_node->isOrdinaryFunction() || !isString(removeNullable(function_node->getResultType()))) return; /// If a regular expression without alternatives starts with ^ or ends with an unescaped $, rewrite /// replaceRegexpAll with replaceRegexpOne. if (function_node->getFunctionName() == "replaceRegexpAll" || Poco::toLower(function_node->getFunctionName()) == "regexp_replace") - { - if (!handleReplaceRegexpAll(*function_node)) - return; - - /// After optimization, function_node might now be "replaceRegexpOne", so continue processing - } - - /// If a replaceRegexpOne function has a regexp that matches entire haystack, and a replacement of nothing other - /// than \1 and some subpatterns in the regexp, or \0 and no subpatterns in the regexp, rewrite it with extract. - if (function_node->getFunctionName() == "replaceRegexpOne") - { - if (!handleReplaceRegexpOne(*function_node)) - return; - - /// After optimization, function_node might now be "extract", so continue processing - } + handleReplaceRegexpAll(*function_node); /// If an extract function has a regexp with some subpatterns and the regexp starts with ^.* or ending with an /// unescaped .*$, remove this prefix and/or suffix. @@ -114,62 +98,6 @@ class RegexpFunctionRewriteVisitor : public InDepthQueryTreeVisitorWithContextas(); - if (!constant_node) - return false; - - if (auto constant_type = constant_node->getResultType(); !isString(constant_type)) - return false; - - String replacement = constant_node->getValue().safeGet(); - bool replacement_zero = replacement == "\\0"; - bool replacement_one = replacement == "\\1"; - if (!replacement_zero && !replacement_one) - return false; - - const auto * regexp_node = function_node_arguments_nodes[1]->as(); - if (!regexp_node) - return false; - - if (auto regexp_type = regexp_node->getResultType(); !isString(regexp_type)) - return false; - - String regexp = regexp_node->getValue().safeGet(); - - /// Currently only look for ^...$ patterns without alternatives. - bool starts_with_caret = regexp.front() == '^'; - if (!starts_with_caret) - return false; - - bool ends_with_unescaped_dollar = false; - if (!regexp.empty() && regexp.back() == '$') - ends_with_unescaped_dollar = isUnescaped(regexp, regexp.size() - 1); - - if (!ends_with_unescaped_dollar) - return false; - - /// Analyze the regular expression to detect presence of alternatives (e.g., 'a|b'). If any alternatives are - /// found, return false to indicate the regexp is not suitable for optimization. - RegexpAnalysisResult result = OptimizedRegularExpression::analyze(regexp); - if (!result.alternatives.empty()) - return false; - - if ((replacement_one && result.has_capture) || (replacement_zero && !result.has_capture)) - { - function_node_arguments_nodes.resize(2); - resolveOrdinaryFunctionNodeByName(function_node, "extract", getContext()); - return true; - } - - return false; - } - void handleExtract(FunctionNode & function_node) { auto & function_node_arguments_nodes = function_node.getArguments().getNodes(); diff --git a/tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.reference b/tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.reference new file mode 100644 index 000000000000..d099bc72639f --- /dev/null +++ b/tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.reference @@ -0,0 +1,4 @@ +abc123 +abc123 +\N +\N diff --git a/tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.sql b/tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.sql new file mode 100644 index 000000000000..47b44703fbff --- /dev/null +++ b/tests/queries/0_stateless/03389_regexp_rewrite_nullable_group_by.sql @@ -0,0 +1,4 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/88218 +-- RegexpFunctionRewritePass must handle Nullable result types from group_by_use_nulls +SET enable_analyzer = 1; +SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\\1') GROUP BY 1, toLowCardinality(9), 1 WITH CUBE SETTINGS group_by_use_nulls=1; diff --git a/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.reference b/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.reference index 54c9b5d8fda7..12102a657fa5 100644 --- a/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.reference +++ b/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.reference @@ -38,62 +38,9 @@ FROM system.one AS __table1 EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '^123|456$', ''); SELECT replaceRegexpAll(identity(\'abc123\'), \'^123|456$\', \'\') AS `replaceRegexpAll(identity(\'abc123\'), \'^123|456$\', \'\')` FROM system.one AS __table1 --- Rule 2: If a replaceRegexpOne function has a replacement of nothing other than \1 and some subpatterns in the regexp, or \0 and no subpatterns in the regexp, rewrite it with extract. +-- Rule 2 (replaceRegexpOne -> extract) was removed because extract returns empty string on non-match, +-- while replaceRegexpOne returns the original string, making them semantically different. --- NOTE: \0 is specially treated as NUL instead of capture group reference. Need to use \\0 instead. - --- Only \0, no capture group (should rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc123$', '\\0'); -SELECT extract(identity(\'abc123\'), \'^abc123$\') AS `replaceRegexpOne(identity(\'abc123\'), \'^abc123$\', \'\\\\\\\\0\')` -FROM system.one AS __table1 --- Only \1, with one capture group (should rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\1'); -SELECT extract(identity(\'abc123\'), \'^(abc)$\') AS `replaceRegexpOne(identity(\'abc123\'), \'^(abc)$\', \'\\\\\\\\1\')` -FROM system.one AS __table1 --- Only \1, no capture group (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc$', '\1'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^abc$\', \'\\\\1\') AS `replaceRegexpOne(identity(\'abc123\'), \'^abc$\', \'\\\\\\\\1\')` -FROM system.one AS __table1 --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc', '\\0'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^abc\', \'\\\\0\') AS `replaceRegexpOne(identity(\'abc123\'), \'^abc\', \'\\\\\\\\0\')` -FROM system.one AS __table1 --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), 'abc$', '\\0'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'abc$\', \'\\\\0\') AS `replaceRegexpOne(identity(\'abc123\'), \'abc$\', \'\\\\\\\\0\')` -FROM system.one AS __table1 --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), 'abc', '\\0'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'abc\', \'\\\\0\') AS `replaceRegexpOne(identity(\'abc123\'), \'abc\', \'\\\\\\\\0\')` -FROM system.one AS __table1 --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc\\$', '\\0'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^abc\\\\$\', \'\\\\0\') AS `replaceRegexpOne(identity(\'abc123\'), \'^abc\\\\\\\\$\', \'\\\\\\\\0\')` -FROM system.one AS __table1 --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^ab|c$', '\\0'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^ab|c$\', \'\\\\0\') AS `replaceRegexpOne(identity(\'abc123\'), \'^ab|c$\', \'\\\\\\\\0\')` -FROM system.one AS __table1 --- \0 with extra characters (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc123$', 'pre\\0post'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^abc123$\', \'pre\\\\0post\') AS `replaceRegexpOne(identity(\'abc123\'), \'^abc123$\', \'pre\\\\\\\\0post\')` -FROM system.one AS __table1 --- \1 with two capture groups (should rewrite — only \1 used) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(a)(b)$', '\1'); -SELECT extract(identity(\'abc123\'), \'^(a)(b)$\') AS `replaceRegexpOne(identity(\'abc123\'), \'^(a)(b)$\', \'\\\\\\\\1\')` -FROM system.one AS __table1 --- \2 used (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(a)(b)$', '\2'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^(a)(b)$\', \'\\\\2\') AS `replaceRegexpOne(identity(\'abc123\'), \'^(a)(b)$\', \'\\\\\\\\2\')` -FROM system.one AS __table1 --- Mixed content in replacement (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', 'X\1Y'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^(abc)$\', \'X\\\\1Y\') AS `replaceRegexpOne(identity(\'abc123\'), \'^(abc)$\', \'X\\\\\\\\1Y\')` -FROM system.one AS __table1 --- Escaped backslash in replacement (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\\\\1'); -SELECT replaceRegexpOne(identity(\'abc123\'), \'^(abc)$\', \'\\\\\\\\1\') AS `replaceRegexpOne(identity(\'abc123\'), \'^(abc)$\', \'\\\\\\\\\\\\\\\\1\')` -FROM system.one AS __table1 -- Rule 3: If an extract function has a regexp with some subpatterns and the regexp starts with ^.* or ending with an unescaped .*$, remove this prefix and/or suffix. -- Starts with ^.* (should strip prefix) @@ -134,19 +81,11 @@ SELECT extract(identity(\'abc123\'), \'(abc).*\') AS `extract(identity(\'abc123\ FROM system.one AS __table1 -- Cascade tests --- Rule 1 + Rule 2: replaceRegexpAll to replaceRegexpOne to extract +-- Rule 1 only: replaceRegexpAll to replaceRegexpOne (Rule 2 removed) EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc'), '^(abc)', '\1'); SELECT replaceRegexpOne(identity(\'abc\'), \'^(abc)\', \'\\\\1\') AS `replaceRegexpAll(identity(\'abc\'), \'^(abc)\', \'\\\\\\\\1\')` FROM system.one AS __table1 --- Rule 2 + 3: replaceRegexpOne -> extract -> simplified extract -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc'), '^.*(abc).*$','\1'); -SELECT extract(identity(\'abc\'), \'(abc)\') AS `replaceRegexpOne(identity(\'abc\'), \'^.*(abc).*$\', \'\\\\\\\\1\')` -FROM system.one AS __table1 --- Rule 1 + 2 + 3: replaceRegexpAll -> replaceRegexpOne -> extract -> simplified extract -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc'), '^.*(abc).*$','\1'); -SELECT extract(identity(\'abc\'), \'(abc)\') AS `replaceRegexpAll(identity(\'abc\'), \'^.*(abc).*$\', \'\\\\\\\\1\')` -FROM system.one AS __table1 --- ClickBench Q28 +-- ClickBench Q28: Rule 1 only: regexp_replace to replaceRegexpOne EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT REGEXP_REPLACE(identity('some referer'), '^https?://(?:www\.)?([^/]+)/.*$', '\1'); -SELECT extract(identity(\'some referer\'), \'^https?://(?:www\\\\.)?([^/]+)/\') AS `REGEXP_REPLACE(identity(\'some referer\'), \'^https?://(?:www\\\\\\\\.)?([^/]+)/.*$\', \'\\\\\\\\1\')` +SELECT replaceRegexpOne(identity(\'some referer\'), \'^https?://(?:www\\\\.)?([^/]+)/.*$\', \'\\\\1\') AS `REGEXP_REPLACE(identity(\'some referer\'), \'^https?://(?:www\\\\\\\\.)?([^/]+)/.*$\', \'\\\\\\\\1\')` FROM system.one AS __table1 diff --git a/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.sql b/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.sql index e5f37eb54c9e..3e0e3194442d 100644 --- a/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.sql +++ b/tests/queries/0_stateless/03538_optimize_rewrite_regexp_functions.sql @@ -28,49 +28,8 @@ EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity( -- Pattern with alternatives (should NOT rewrite) EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '^123|456$', ''); --- Rule 2: If a replaceRegexpOne function has a replacement of nothing other than \1 and some subpatterns in the regexp, or \0 and no subpatterns in the regexp, rewrite it with extract. - --- NOTE: \0 is specially treated as NUL instead of capture group reference. Need to use \\0 instead. - --- Only \0, no capture group (should rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc123$', '\\0'); - --- Only \1, with one capture group (should rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\1'); - --- Only \1, no capture group (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc$', '\1'); - --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc', '\\0'); - --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), 'abc$', '\\0'); - --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), 'abc', '\\0'); - --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc\\$', '\\0'); - --- Pattern not full (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^ab|c$', '\\0'); - --- \0 with extra characters (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc123$', 'pre\\0post'); - --- \1 with two capture groups (should rewrite — only \1 used) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(a)(b)$', '\1'); - --- \2 used (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(a)(b)$', '\2'); - --- Mixed content in replacement (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', 'X\1Y'); - --- Escaped backslash in replacement (should NOT rewrite) -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\\\\1'); - +-- Rule 2 (replaceRegexpOne -> extract) was removed because extract returns empty string on non-match, +-- while replaceRegexpOne returns the original string, making them semantically different. -- Rule 3: If an extract function has a regexp with some subpatterns and the regexp starts with ^.* or ending with an unescaped .*$, remove this prefix and/or suffix. @@ -104,14 +63,8 @@ EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123') -- Cascade tests --- Rule 1 + Rule 2: replaceRegexpAll to replaceRegexpOne to extract +-- Rule 1 only: replaceRegexpAll to replaceRegexpOne (Rule 2 removed) EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc'), '^(abc)', '\1'); --- Rule 2 + 3: replaceRegexpOne -> extract -> simplified extract -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc'), '^.*(abc).*$','\1'); - --- Rule 1 + 2 + 3: replaceRegexpAll -> replaceRegexpOne -> extract -> simplified extract -EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc'), '^.*(abc).*$','\1'); - --- ClickBench Q28 +-- ClickBench Q28: Rule 1 only: regexp_replace to replaceRegexpOne EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT REGEXP_REPLACE(identity('some referer'), '^https?://(?:www\.)?([^/]+)/.*$', '\1'); From e0a7f62c93eea5d78aa3ad6021567832431adda7 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 1 Mar 2026 03:16:16 +0000 Subject: [PATCH 08/53] Backport #98276 to 26.1: Fix off-by-one in ToDateMonotonicity boundary check --- src/Functions/FunctionsConversion.h | 6 +++--- ...3835_todate_monotonicity_boundary.reference | 1 + .../03835_todate_monotonicity_boundary.sql | 18 ++++++++++++++++++ 3 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03835_todate_monotonicity_boundary.reference create mode 100644 tests/queries/0_stateless/03835_todate_monotonicity_boundary.sql diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 0f8063bbaaa5..e63eec98f584 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -3676,13 +3676,13 @@ struct ToDateMonotonicity } else if ( ((left.getType() == Field::Types::UInt64 || left.isNull()) && (right.getType() == Field::Types::UInt64 || right.isNull()) - && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF))) + && ((left.isNull() || left.safeGet() <= DATE_LUT_MAX_DAY_NUM) && (right.isNull() || right.safeGet() > DATE_LUT_MAX_DAY_NUM))) || ((left.getType() == Field::Types::Int64 || left.isNull()) && (right.getType() == Field::Types::Int64 || right.isNull()) - && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF))) + && ((left.isNull() || left.safeGet() <= DATE_LUT_MAX_DAY_NUM) && (right.isNull() || right.safeGet() > DATE_LUT_MAX_DAY_NUM))) || (( (left.getType() == Field::Types::Float64 || left.isNull()) && (right.getType() == Field::Types::Float64 || right.isNull()) - && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF)))) + && ((left.isNull() || left.safeGet() <= DATE_LUT_MAX_DAY_NUM) && (right.isNull() || right.safeGet() > DATE_LUT_MAX_DAY_NUM)))) || !isNativeNumber(type)) { return {}; diff --git a/tests/queries/0_stateless/03835_todate_monotonicity_boundary.reference b/tests/queries/0_stateless/03835_todate_monotonicity_boundary.reference new file mode 100644 index 000000000000..d00491fd7e5b --- /dev/null +++ b/tests/queries/0_stateless/03835_todate_monotonicity_boundary.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03835_todate_monotonicity_boundary.sql b/tests/queries/0_stateless/03835_todate_monotonicity_boundary.sql new file mode 100644 index 000000000000..4460c5da7b68 --- /dev/null +++ b/tests/queries/0_stateless/03835_todate_monotonicity_boundary.sql @@ -0,0 +1,18 @@ +-- Regression test for off-by-one in ToDateMonotonicity boundary check. +-- The toDate function treats values <= DATE_LUT_MAX_DAY_NUM (65535) as day numbers +-- and values > 65535 as unix timestamps. The monotonicity check must correctly +-- identify ranges crossing this boundary as non-monotonic. +-- Previously caused LOGICAL_ERROR "Invalid binary search result in MergeTreeSetIndex" in debug builds. +-- https://github.com/ClickHouse/ClickHouse/issues/90461 + +DROP TABLE IF EXISTS t_todate_mono; + +CREATE TABLE t_todate_mono (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS index_granularity = 1; +INSERT INTO t_todate_mono SELECT number FROM numbers(100000); + +-- With index_granularity=1, mark 65535 covers the range [65535, 65536], +-- which crosses the DATE_LUT_MAX_DAY_NUM boundary. +-- The toDate conversion in the key condition chain must report this range as non-monotonic. +SELECT count() > 0 FROM t_todate_mono WHERE toDate(x) IN (toDate(12345), toDate(67890)); + +DROP TABLE t_todate_mono; From 947ff2b9045a155500f6f1d8643aaa71ace96dd4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sun, 1 Mar 2026 04:51:54 +0000 Subject: [PATCH 09/53] Backport #97682 to 26.1: Fix row policy bypass via `loop` table function --- src/Processors/QueryPlan/ReadFromLoopStep.cpp | 334 +++++++++++------- .../03928_loop_row_policy.reference | 7 + .../0_stateless/03928_loop_row_policy.sh | 34 ++ 3 files changed, 247 insertions(+), 128 deletions(-) create mode 100644 tests/queries/0_stateless/03928_loop_row_policy.reference create mode 100755 tests/queries/0_stateless/03928_loop_row_policy.sh diff --git a/src/Processors/QueryPlan/ReadFromLoopStep.cpp b/src/Processors/QueryPlan/ReadFromLoopStep.cpp index e90c41de5700..4750226add83 100644 --- a/src/Processors/QueryPlan/ReadFromLoopStep.cpp +++ b/src/Processors/QueryPlan/ReadFromLoopStep.cpp @@ -1,5 +1,13 @@ #include +#include #include +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -11,168 +19,238 @@ #include #include #include +#include namespace DB { - namespace ErrorCodes - { - extern const int TOO_MANY_RETRIES_TO_FETCH_PARTS; - } - class PullingPipelineExecutor; - class LoopSource : public ISource +namespace Setting +{ + extern const SettingsBool allow_experimental_analyzer; +} + +namespace ErrorCodes +{ + extern const int TOO_MANY_RETRIES_TO_FETCH_PARTS; +} + +class PullingPipelineExecutor; + +namespace +{ + void buildInterpreterQueryPlan( + QueryPlan & plan, + const String & database, + const String & table, + const Names & column_names, + const SelectQueryInfo & query_info, + ContextPtr context) { - public: - - LoopSource( - const Names & column_names_, - const SelectQueryInfo & query_info_, - const StorageSnapshotPtr & storage_snapshot_, - ContextPtr & context_, - QueryProcessingStage::Enum processed_stage_, - StoragePtr inner_storage_, - size_t max_block_size_, - size_t num_streams_) - : ISource(std::make_shared(storage_snapshot_->getSampleBlockForColumns(column_names_))) - , column_names(column_names_) - , query_info(query_info_) - , storage_snapshot(storage_snapshot_) - , processed_stage(processed_stage_) - , context(context_) - , inner_storage(std::move(inner_storage_)) - , max_block_size(max_block_size_) - , num_streams(num_streams_) - { - } + auto select_query = make_intrusive(); - String getName() const override { return "Loop"; } + auto select_expr_list = make_intrusive(); + for (const auto & col_name : column_names) + select_expr_list->children.push_back(make_intrusive(col_name)); + select_query->setExpression(ASTSelectQuery::Expression::SELECT, std::move(select_expr_list)); - Chunk generate() override - { - while (true) - { - if (!loop) - { - QueryPlan plan; - auto storage_snapshot_ = inner_storage->getStorageSnapshot(inner_storage->getInMemoryMetadataPtr(), context); - inner_storage->read( - plan, - column_names, - storage_snapshot_, - query_info, - context, - processed_stage, - max_block_size, - num_streams); - if (plan.isInitialized()) - { - auto builder = plan.buildQueryPipeline(QueryPlanOptimizationSettings(context), BuildQueryPipelineSettings(context)); - QueryPlanResourceHolder resources; - auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); - query_pipeline = QueryPipeline(std::move(pipe)); - executor = std::make_unique(query_pipeline); - } - loop = true; - } - Chunk chunk; - - if (query_info.trivial_limit > 0 && rows_read >= query_info.trivial_limit) - return chunk; + select_query->replaceDatabaseAndTable(database, table); - if (executor && executor->pull(chunk)) - { - rows_read += chunk.getNumRows(); - retries_count = 0; - if (query_info.trivial_limit == 0 || rows_read <= query_info.trivial_limit) - return chunk; - - size_t remaining_rows = query_info.trivial_limit + chunk.getNumRows() - rows_read; - auto columns = chunk.detachColumns(); - for (auto & col : columns) - { - col = col->cut(0, remaining_rows); - } - return {std::move(columns), remaining_rows}; - } - else - { - ++retries_count; - if (retries_count > max_retries_count) - throw Exception(ErrorCodes::TOO_MANY_RETRIES_TO_FETCH_PARTS, "Too many retries to pull from storage"); - loop = false; - executor.reset(); - query_pipeline.reset(); - } - } - } + auto select_ast = make_intrusive(); + select_ast->list_of_selects = make_intrusive(); + select_ast->list_of_selects->children.push_back(select_query); + select_ast->children.push_back(select_ast->list_of_selects); - private: - - const Names column_names; - SelectQueryInfo query_info; - const StorageSnapshotPtr storage_snapshot; - QueryProcessingStage::Enum processed_stage; - ContextPtr context; - StoragePtr inner_storage; - size_t max_block_size; - size_t num_streams; - // add retries. If inner_storage failed to pull X times in a row we'd better to fail here not to hang - size_t retries_count = 0; - size_t max_retries_count = 3; - size_t rows_read = 0; - bool loop = false; - QueryPipeline query_pipeline; - std::unique_ptr executor; - }; - - static ContextPtr disableParallelReplicas(ContextPtr context) - { - auto modified_context = Context::createCopy(context); - modified_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); - return modified_context; + auto options = SelectQueryOptions(QueryProcessingStage::Complete, 0, false); + + if (context->getSettingsRef()[Setting::allow_experimental_analyzer]) + { + InterpreterSelectQueryAnalyzer interpreter(select_ast, context, options, column_names); + if (query_info.storage_limits) + interpreter.addStorageLimits(*query_info.storage_limits); + plan = std::move(interpreter).extractQueryPlan(); + } + else + { + InterpreterSelectWithUnionQuery interpreter(select_ast, context, options, column_names); + if (query_info.storage_limits) + interpreter.addStorageLimits(*query_info.storage_limits); + interpreter.buildQueryPlan(plan); + } } +} - ReadFromLoopStep::ReadFromLoopStep( +class LoopSource : public ISource +{ +public: + + LoopSource( const Names & column_names_, const SelectQueryInfo & query_info_, const StorageSnapshotPtr & storage_snapshot_, - const ContextPtr & context_, + ContextPtr & context_, QueryProcessingStage::Enum processed_stage_, StoragePtr inner_storage_, size_t max_block_size_, size_t num_streams_) - : SourceStepWithFilter( - std::make_shared(storage_snapshot_->getSampleBlockForColumns(column_names_)), - column_names_, - query_info_, - storage_snapshot_, - disableParallelReplicas(context_)) + : ISource(std::make_shared(storage_snapshot_->getSampleBlockForColumns(column_names_))) , column_names(column_names_) + , query_info(query_info_) + , storage_snapshot(storage_snapshot_) , processed_stage(processed_stage_) + , context(context_) , inner_storage(std::move(inner_storage_)) , max_block_size(max_block_size_) , num_streams(num_streams_) { } - Pipe ReadFromLoopStep::makePipe() + String getName() const override { return "Loop"; } + + void initLoop() { - return Pipe(std::make_shared( - column_names, query_info, storage_snapshot, context, processed_stage, inner_storage, max_block_size, num_streams)); + if (loop) + return; + + QueryPlan plan; + + if (DatabaseCatalog::instance().isTableExist(inner_storage->getStorageID(), context)) + { + inner_context = Context::createCopy(context); + const auto & storage_id = inner_storage->getStorageID(); + buildInterpreterQueryPlan( + plan, storage_id.database_name, storage_id.table_name, + column_names, query_info, inner_context); + } + else + { + auto storage_snapshot_ = inner_storage->getStorageSnapshot(inner_storage->getInMemoryMetadataPtr(), context); + inner_storage->read( + plan, + column_names, + storage_snapshot_, + query_info, + context, + processed_stage, + max_block_size, + num_streams); + } + + if (plan.isInitialized()) + { + auto builder = plan.buildQueryPipeline(QueryPlanOptimizationSettings(context), BuildQueryPipelineSettings(context)); + QueryPlanResourceHolder resources; + auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); + query_pipeline = QueryPipeline(std::move(pipe)); + query_pipeline.addResources(std::move(resources)); + executor = std::make_unique(query_pipeline); + } + loop = true; } - void ReadFromLoopStep::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) + Chunk generate() override { - auto pipe = makePipe(); - - if (pipe.empty()) + while (true) { - chassert(output_header != nullptr); - pipe = Pipe(std::make_shared(output_header)); + if (!loop) + initLoop(); + + Chunk chunk; + + if (query_info.trivial_limit > 0 && rows_read >= query_info.trivial_limit) + return chunk; + + if (executor && executor->pull(chunk)) + { + rows_read += chunk.getNumRows(); + retries_count = 0; + if (query_info.trivial_limit == 0 || rows_read <= query_info.trivial_limit) + return chunk; + + size_t remaining_rows = query_info.trivial_limit + chunk.getNumRows() - rows_read; + auto columns = chunk.detachColumns(); + for (auto & col : columns) + col = col->cut(0, remaining_rows); + + return {std::move(columns), remaining_rows}; + } + + ++retries_count; + if (retries_count > max_retries_count) + throw Exception(ErrorCodes::TOO_MANY_RETRIES_TO_FETCH_PARTS, "Too many retries to pull from storage"); + + loop = false; + executor.reset(); + query_pipeline.reset(); } + } - pipeline.init(std::move(pipe)); +private: + const Names column_names; + SelectQueryInfo query_info; + const StorageSnapshotPtr storage_snapshot; + QueryProcessingStage::Enum processed_stage; + ContextPtr context; + StoragePtr inner_storage; + size_t max_block_size; + size_t num_streams; + ContextPtr inner_context; + // add retries. If inner_storage failed to pull X times in a row we'd better to fail here not to hang + size_t retries_count = 0; + size_t max_retries_count = 3; + size_t rows_read = 0; + bool loop = false; + QueryPipeline query_pipeline; + std::unique_ptr executor; +}; + +static ContextPtr disableParallelReplicas(ContextPtr context) +{ + auto modified_context = Context::createCopy(context); + modified_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + return modified_context; +} + +ReadFromLoopStep::ReadFromLoopStep( + const Names & column_names_, + const SelectQueryInfo & query_info_, + const StorageSnapshotPtr & storage_snapshot_, + const ContextPtr & context_, + QueryProcessingStage::Enum processed_stage_, + StoragePtr inner_storage_, + size_t max_block_size_, + size_t num_streams_) + : SourceStepWithFilter( + std::make_shared(storage_snapshot_->getSampleBlockForColumns(column_names_)), + column_names_, + query_info_, + storage_snapshot_, + disableParallelReplicas(context_)) + , column_names(column_names_) + , processed_stage(processed_stage_) + , inner_storage(std::move(inner_storage_)) + , max_block_size(max_block_size_) + , num_streams(num_streams_) +{ +} + +Pipe ReadFromLoopStep::makePipe() +{ + return Pipe(std::make_shared( + column_names, query_info, storage_snapshot, context, processed_stage, inner_storage, max_block_size, num_streams)); +} + +void ReadFromLoopStep::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) +{ + auto pipe = makePipe(); + + if (pipe.empty()) + { + chassert(output_header != nullptr); + pipe = Pipe(std::make_shared(output_header)); } + pipeline.init(std::move(pipe)); +} + } diff --git a/tests/queries/0_stateless/03928_loop_row_policy.reference b/tests/queries/0_stateless/03928_loop_row_policy.reference new file mode 100644 index 000000000000..912cf48bc76c --- /dev/null +++ b/tests/queries/0_stateless/03928_loop_row_policy.reference @@ -0,0 +1,7 @@ +--- Direct SELECT (row policy should filter to one row) --- +1 flag1 +--- loop() must also respect row policy --- +1 flag1 +1 flag1 +1 flag1 +1 flag1 diff --git a/tests/queries/0_stateless/03928_loop_row_policy.sh b/tests/queries/0_stateless/03928_loop_row_policy.sh new file mode 100755 index 000000000000..862f7a4605c7 --- /dev/null +++ b/tests/queries/0_stateless/03928_loop_row_policy.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# Tags: no-replicated-database + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +user="user03928_${CLICKHOUSE_DATABASE}_$RANDOM" +db=${CLICKHOUSE_DATABASE} + +${CLICKHOUSE_CLIENT} < Date: Sun, 1 Mar 2026 13:05:15 +0000 Subject: [PATCH 10/53] Update autogenerated version to 26.1.4.35 and contributors --- cmake/autogenerated_versions.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index ca6b2787b41c..35b7873e140e 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54509) +SET(VERSION_REVISION 54510) SET(VERSION_MAJOR 26) SET(VERSION_MINOR 1) -SET(VERSION_PATCH 4) -SET(VERSION_GITHASH 5549f2acae95c6d627654f50e212a85d059a55f9) -SET(VERSION_DESCRIBE v26.1.4.1-stable) -SET(VERSION_STRING 26.1.4.1) +SET(VERSION_PATCH 5) +SET(VERSION_GITHASH 94d63f06ae951bac9413c31cb91ebd5ea02b5066) +SET(VERSION_DESCRIBE v26.1.5.1-stable) +SET(VERSION_STRING 26.1.5.1) # end of autochange From a0675b246f444e528c393526d2696466cdcd7f5c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 2 Mar 2026 22:18:55 +0000 Subject: [PATCH 11/53] Backport #97748 to 26.1: Fix `ActiveDataPartSet::hasPartitionId` --- src/Storages/MergeTree/ActiveDataPartSet.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ActiveDataPartSet.cpp b/src/Storages/MergeTree/ActiveDataPartSet.cpp index 702fd8ccbe90..794e9bf2d554 100644 --- a/src/Storages/MergeTree/ActiveDataPartSet.cpp +++ b/src/Storages/MergeTree/ActiveDataPartSet.cpp @@ -321,7 +321,13 @@ bool ActiveDataPartSet::hasPartitionId(const String & partition_id) const { MergeTreePartInfo info; info.setPartitionId(partition_id); - return part_info_to_name.lower_bound(info) != part_info_to_name.end(); + + if (auto it = part_info_to_name.lower_bound(info); it == part_info_to_name.end()) + return false; + else if (it->first.getPartitionId() != partition_id) + return false; + else + return true; } } From 6adf8adac0e278e3a06fe2553ac19e19f6e69873 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 3 Mar 2026 11:22:54 +0000 Subject: [PATCH 12/53] Backport #98306 to 26.1: Use `postgres` REL_18_3 --- contrib/postgres | 2 +- contrib/postgres-cmake/pg_config.h | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/contrib/postgres b/contrib/postgres index 5ad0c31d0c3a..c37596dd61c5 160000 --- a/contrib/postgres +++ b/contrib/postgres @@ -1 +1 @@ -Subproject commit 5ad0c31d0c3a76ed64655f4d397934b5ecc9696f +Subproject commit c37596dd61c5f2b8b7521fdbcdabc651bd9412c4 diff --git a/contrib/postgres-cmake/pg_config.h b/contrib/postgres-cmake/pg_config.h index 169b0af039ea..12767588b94d 100644 --- a/contrib/postgres-cmake/pg_config.h +++ b/contrib/postgres-cmake/pg_config.h @@ -593,7 +593,7 @@ #define PACKAGE_NAME "PostgreSQL" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "PostgreSQL 18.0" +#define PACKAGE_STRING "PostgreSQL 18.3" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "postgresql" @@ -602,7 +602,7 @@ #define PACKAGE_URL "https://www.postgresql.org/" /* Define to the version of this package. */ -#define PACKAGE_VERSION "18.0" +#define PACKAGE_VERSION "18.3" /* Define to the name of a signed 128-bit integer type. */ #define PG_INT128_TYPE __int128 @@ -618,19 +618,19 @@ #define PG_MAJORVERSION_NUM 18 /* PostgreSQL minor version number */ -#define PG_MINORVERSION_NUM 0 +#define PG_MINORVERSION_NUM 3 /* Define to best printf format archetype, usually gnu_printf if available. */ #define PG_PRINTF_ATTRIBUTE gnu_printf /* PostgreSQL version as a string */ -#define PG_VERSION "18.0" +#define PG_VERSION "18.3" /* PostgreSQL version as a number */ -#define PG_VERSION_NUM 180000 +#define PG_VERSION_NUM 180003 /* A string containing the version number, platform, and C compiler */ -#define PG_VERSION_STR "PostgreSQL 18.0 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 15.2.1 20250813, 64-bit" +#define PG_VERSION_STR "PostgreSQL 18.3 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 15.2.1 20250813, 64-bit" /* Define to 1 to allow profiling output to be saved separately for each process. */ From 2a35cac81782d63bafdf7b699da911714c005d24 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 3 Mar 2026 15:29:08 +0000 Subject: [PATCH 13/53] Backport #98555 to 26.1: Fix usage of text index with other skip indexes --- src/Storages/MergeTree/MergeTreeReadTask.cpp | 11 +++- .../MergeTree/MergeTreeReaderIndex.cpp | 4 +- src/Storages/MergeTree/MergeTreeReaderIndex.h | 10 ++-- .../MergeTree/MergeTreeReaderTextIndex.cpp | 3 +- ..._text_index_incompelete_granules.reference | 1 + .../04010_text_index_incompelete_granules.sql | 53 +++++++++++++++++++ 6 files changed, 73 insertions(+), 9 deletions(-) create mode 100644 tests/queries/0_stateless/04010_text_index_incompelete_granules.reference create mode 100644 tests/queries/0_stateless/04010_text_index_incompelete_granules.sql diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index b49d01315c70..3128556d662b 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -302,7 +302,16 @@ void MergeTreeReadTask::initializeIndexReader(const MergeTreeIndexBuildContextPt } if (index_read_result || lazy_materializing_rows) - readers.prepared_index = std::make_unique(readers.main.get(), std::move(index_read_result), part_rows); + { + bool can_read_incomplete_granules = readers.main->canReadIncompleteGranules() + && std::ranges::all_of(readers.prewhere, [](const auto & reader) + { + return reader->canReadIncompleteGranules(); + }); + + readers.prepared_index = std::make_unique(readers.main.get(), std::move(index_read_result), part_rows, can_read_incomplete_granules); + } + } UInt64 MergeTreeReadTask::estimateNumRows() const diff --git a/src/Storages/MergeTree/MergeTreeReaderIndex.cpp b/src/Storages/MergeTree/MergeTreeReaderIndex.cpp index 23deb34ae992..623d5468b4a3 100644 --- a/src/Storages/MergeTree/MergeTreeReaderIndex.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderIndex.cpp @@ -10,7 +10,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -MergeTreeReaderIndex::MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_) +MergeTreeReaderIndex::MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_, bool can_read_incomplete_granules_) : IMergeTreeReader( main_reader_->data_part_info_for_read, {}, @@ -21,9 +21,9 @@ MergeTreeReaderIndex::MergeTreeReaderIndex(const IMergeTreeReader * main_reader_ nullptr, main_reader_->all_mark_ranges, main_reader_->settings) - , main_reader(main_reader_) , index_read_result(std::move(index_read_result_)) , lazy_materializing_rows(lazy_materializing_rows_) + , can_read_incomplete_granules(can_read_incomplete_granules_) { chassert(lazy_materializing_rows || index_read_result); chassert(lazy_materializing_rows || index_read_result->skip_index_read_result || index_read_result->projection_index_read_result); diff --git a/src/Storages/MergeTree/MergeTreeReaderIndex.h b/src/Storages/MergeTree/MergeTreeReaderIndex.h index 6a407b3bdb4d..476b7aacad51 100644 --- a/src/Storages/MergeTree/MergeTreeReaderIndex.h +++ b/src/Storages/MergeTree/MergeTreeReaderIndex.h @@ -20,7 +20,7 @@ class MergeTreeReaderIndex : public IMergeTreeReader public: using MatchingMarks = std::vector; - MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_); + MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_, bool can_read_incomplete_granules_); size_t readRows( size_t from_mark, @@ -30,7 +30,7 @@ class MergeTreeReaderIndex : public IMergeTreeReader size_t offset, Columns & res_columns) override; - bool canReadIncompleteGranules() const override { return main_reader->canReadIncompleteGranules(); } + bool canReadIncompleteGranules() const override { return can_read_incomplete_granules; } bool canSkipMark(size_t mark, size_t current_task_last_mark) override; @@ -40,14 +40,14 @@ class MergeTreeReaderIndex : public IMergeTreeReader bool mustApplyFilter() const override { return lazy_materializing_rows != nullptr; } private: - /// Delegates to the main reader to determine if reading incomplete index granules is supported. - const IMergeTreeReader * main_reader; - /// Used to filter data during merge tree reading. MergeTreeIndexReadResultPtr index_read_result; const PaddedPODArray * lazy_materializing_rows = nullptr; + /// Determines if reading incomplete index granules is supported. + bool can_read_incomplete_granules; + /// Current row position used when continuing reads across multiple calls. size_t current_row = 0; const UInt64 * next_lazy_row_it = nullptr; diff --git a/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp b/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp index 1cab212ac837..8ca135430e3d 100644 --- a/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp @@ -273,8 +273,9 @@ size_t MergeTreeReaderTextIndex::readRows( size_t read_rows = 0; createEmptyColumns(res_columns); + size_t total_marks = data_part_info_for_read->getIndexGranularity().getMarksCountWithoutFinal(); - while (read_rows < max_rows_to_read) + while (read_rows < max_rows_to_read && from_mark < total_marks) { /// When the number of rows in a part is smaller than `index_granularity`, /// `MergeTreeReaderTextIndex` must ensure that the virtual column it reads diff --git a/tests/queries/0_stateless/04010_text_index_incompelete_granules.reference b/tests/queries/0_stateless/04010_text_index_incompelete_granules.reference new file mode 100644 index 000000000000..8bd1af11bf28 --- /dev/null +++ b/tests/queries/0_stateless/04010_text_index_incompelete_granules.reference @@ -0,0 +1 @@ +2000 diff --git a/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql b/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql new file mode 100644 index 000000000000..3b25185693b7 --- /dev/null +++ b/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql @@ -0,0 +1,53 @@ +-- Tags: no-fasttest +-- Reproduces a bug where MergeTreeReaderTextIndex::readRows overruns past the +-- final mark when the text index reader is non-first in the reader chain +-- (pushed there by a prepared_index reader created from a minmax skip index). +-- +-- The root cause: each batch boundary that falls mid-mark causes the text index +-- reader's internal current_row to drift behind getMarkStartingRow(current_mark). +-- After enough batches, max_rows_to_read exceeds remaining mark rows, the while +-- loop hits the final mark (0 rows), makes no progress, and increments past the +-- end -> "Trying to get non existing mark N, while size is N". + +DROP TABLE IF EXISTS t_text_index_skip_bug; + +CREATE TABLE t_text_index_skip_bug +( + id UInt64, + body String, + created_at DateTime, + INDEX fts_body body TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1, + INDEX idx_minmax created_at TYPE minmax GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + index_granularity = 8192, + index_granularity_bytes = '10M', + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0; + +INSERT INTO t_text_index_skip_bug +SELECT + number, + concat('document ', toString(number), if(number % 100 = 0, ' vector', '')), + toDateTime('2024-01-01 00:00:00') - toIntervalSecond(number) +FROM numbers(200000); + +OPTIMIZE TABLE t_text_index_skip_bug FINAL; + +-- Full scan that reaches the end of the part. +-- Without the fix this crashes with "Trying to get non existing mark 26, while size is 26". +SELECT count() +FROM t_text_index_skip_bug +WHERE hasToken(body, 'vector') AND created_at >= (toDateTime('2024-01-01 00:00:00') - toIntervalMonth(1)) +SETTINGS + enable_full_text_index = 1, + use_skip_indexes = 1, + use_query_condition_cache = 0, + query_plan_direct_read_from_text_index = 1, + use_skip_indexes_on_data_read = 1, + max_threads = 1, + max_block_size = 65505; + +DROP TABLE t_text_index_skip_bug; From c1adaa1b9bc6f854ba605030d3c3145e308a8a06 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 3 Mar 2026 17:28:12 +0000 Subject: [PATCH 14/53] Backport #95408 to 26.1: Fix reading subcolumns of ALIAS columns --- .../Passes/FunctionToSubcolumnsPass.cpp | 2 +- src/Analyzer/Resolve/IdentifierResolver.cpp | 10 ++- src/Storages/ColumnsDescription.cpp | 55 +++++++------ src/Storages/ColumnsDescription.h | 4 +- .../MergeTree/MergeTreeWhereOptimizer.cpp | 2 +- src/Storages/ProjectionsDescription.cpp | 2 +- .../RocksDB/StorageEmbeddedRocksDB.cpp | 2 +- src/Storages/StorageMerge.cpp | 2 +- .../03594_alias_subcolumns.reference | 81 +++++++++++++++++++ .../0_stateless/03594_alias_subcolumns.sql | 40 +++++++++ ...12_reading_subcolumns_of_aliases.reference | 2 + .../03812_reading_subcolumns_of_aliases.sql | 21 +++++ ...03832_log_engine_alias_subcolumn.reference | 1 + .../03832_log_engine_alias_subcolumn.sql | 6 ++ 14 files changed, 195 insertions(+), 35 deletions(-) create mode 100644 tests/queries/0_stateless/03594_alias_subcolumns.reference create mode 100644 tests/queries/0_stateless/03594_alias_subcolumns.sql create mode 100644 tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.reference create mode 100644 tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.sql create mode 100644 tests/queries/0_stateless/03832_log_engine_alias_subcolumn.reference create mode 100644 tests/queries/0_stateless/03832_log_engine_alias_subcolumn.sql diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp index 6d26a7839927..3837aed994c5 100644 --- a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -436,7 +436,7 @@ std::tuple getTypedNodesForOptimizati return {}; auto * first_argument_column_node = function_arguments_nodes.front()->as(); - if (!first_argument_column_node || first_argument_column_node->getColumnName() == "__grouping_set") + if (!first_argument_column_node || first_argument_column_node->getColumnName() == "__grouping_set" || first_argument_column_node->hasExpression()) return {}; auto column_source = first_argument_column_node->getColumnSource(); diff --git a/src/Analyzer/Resolve/IdentifierResolver.cpp b/src/Analyzer/Resolve/IdentifierResolver.cpp index 3f0472b49876..efedf97393e0 100644 --- a/src/Analyzer/Resolve/IdentifierResolver.cpp +++ b/src/Analyzer/Resolve/IdentifierResolver.cpp @@ -366,7 +366,10 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromTableColumns(const /// Check if it's a subcolumn if (auto subcolumn_info = scope.table_expression_data_for_alias_resolution->tryGetSubcolumnInfo(identifier_full_name)) { - if (scope.table_expression_data_for_alias_resolution->supports_subcolumns) + /// Don't read subcolumn of aliases directly, only using getSubcolumn, + /// because aliases don't have real subcolumns, they should be extracted + /// after alias expression evaluation. + if (scope.table_expression_data_for_alias_resolution->supports_subcolumns && !subcolumn_info->column_node->hasExpression()) return std::make_shared(NameAndTypePair{identifier_full_name, subcolumn_info->subcolumn_type}, subcolumn_info->column_node->getColumnSource()); return wrapExpressionNodeInSubcolumn(subcolumn_info->column_node, String(subcolumn_info->subcolumn_name), scope.context); @@ -507,7 +510,10 @@ IdentifierResolveResult IdentifierResolver::tryResolveIdentifierFromStorage( { if (auto subcolumn_info = table_expression_data.tryGetSubcolumnInfo(identifier_full_name)) { - if (table_expression_data.supports_subcolumns) + /// Don't read subcolumn of aliases directly, only using getSubcolumn, + /// because aliases don't have real subcolumns, they should be extracted + /// after alias expression evaluation. + if (table_expression_data.supports_subcolumns && !subcolumn_info->column_node->hasExpression()) result_expression = std::make_shared(NameAndTypePair{identifier_full_name, subcolumn_info->subcolumn_type}, subcolumn_info->column_node->getColumnSource()); else result_expression = wrapExpressionNodeInSubcolumn(subcolumn_info->column_node, String(subcolumn_info->subcolumn_name), scope.context); diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index 003348901c5c..10b7543e3624 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -355,7 +355,9 @@ void ColumnsDescription::add(ColumnDescription column, const String & after_colu insert_it = range.second; } - if (add_subcolumns) + /// Aliases don't have real subcolumns, they should be extracted + /// using getSubcolumn after expression evaluation. + if (add_subcolumns && column.default_desc.kind != ColumnDefaultKind::Alias) addSubcolumns(column.name, column.type); columns.get<0>().insert(insert_it, std::move(column)); } @@ -629,13 +631,31 @@ bool ColumnsDescription::hasNested(const String & column_name) const return range.first != range.second && range.first->name.length() > column_name.length(); } -bool ColumnsDescription::hasSubcolumn(const String & column_name) const +static GetColumnsOptions::Kind defaultKindToGetKind(ColumnDefaultKind kind) +{ + switch (kind) + { + case ColumnDefaultKind::Default: + return GetColumnsOptions::Ordinary; + case ColumnDefaultKind::Materialized: + return GetColumnsOptions::Materialized; + case ColumnDefaultKind::Alias: + return GetColumnsOptions::Aliases; + case ColumnDefaultKind::Ephemeral: + return GetColumnsOptions::Ephemeral; + } + + return GetColumnsOptions::None; +} + +bool ColumnsDescription::hasSubcolumn(GetColumnsOptions::Kind kind, const String & column_name) const { - if (subcolumns.get<0>().count(column_name)) + auto jt = subcolumns.get<0>().find(column_name); + if (jt != subcolumns.get<0>().end() && (defaultKindToGetKind(columns.get<1>().find(jt->getNameInStorage())->default_desc.kind) & kind)) return true; /// Check for dynamic subcolumns - if (tryGetDynamicSubcolumn(column_name)) + if (tryGetDynamicSubcolumn(column_name, kind)) return true; return false; @@ -656,23 +676,6 @@ const ColumnDescription * ColumnsDescription::tryGet(const String & column_name) return it == columns.get<1>().end() ? nullptr : &(*it); } -static GetColumnsOptions::Kind defaultKindToGetKind(ColumnDefaultKind kind) -{ - switch (kind) - { - case ColumnDefaultKind::Default: - return GetColumnsOptions::Ordinary; - case ColumnDefaultKind::Materialized: - return GetColumnsOptions::Materialized; - case ColumnDefaultKind::Alias: - return GetColumnsOptions::Aliases; - case ColumnDefaultKind::Ephemeral: - return GetColumnsOptions::Ephemeral; - } - - return GetColumnsOptions::None; -} - NamesAndTypesList ColumnsDescription::getByNames(const GetColumnsOptions & options, const Names & names) const { NamesAndTypesList res; @@ -709,13 +712,13 @@ std::optional ColumnsDescription::tryGetColumn(const GetColumns if (options.with_subcolumns) { auto jt = subcolumns.get<0>().find(column_name); - if (jt != subcolumns.get<0>().end()) + if (jt != subcolumns.get<0>().end() && (defaultKindToGetKind(columns.get<1>().find(jt->getNameInStorage())->default_desc.kind) & options.kind)) return *jt; if (options.with_dynamic_subcolumns) { /// Check for dynamic subcolumns. - if (auto dynamic_subcolumn = tryGetDynamicSubcolumn(column_name)) + if (auto dynamic_subcolumn = tryGetDynamicSubcolumn(column_name, options)) return dynamic_subcolumn; } } @@ -806,7 +809,7 @@ bool ColumnsDescription::hasAlias(const String & column_name) const bool ColumnsDescription::hasColumnOrSubcolumn(GetColumnsOptions::Kind kind, const String & column_name) const { auto it = columns.get<1>().find(column_name); - if ((it != columns.get<1>().end() && (defaultKindToGetKind(it->default_desc.kind) & kind)) || hasSubcolumn(column_name)) + if ((it != columns.get<1>().end() && (defaultKindToGetKind(it->default_desc.kind) & kind)) || hasSubcolumn(kind, column_name)) return true; return false; @@ -966,12 +969,12 @@ std::vector ColumnsDescription::getAllRegisteredNames() const return names; } -std::optional ColumnsDescription::tryGetDynamicSubcolumn(const String & column_name) const +std::optional ColumnsDescription::tryGetDynamicSubcolumn(const String & column_name, const GetColumnsOptions & options) const { for (auto [ordinary_column_name, dynamic_subcolumn_name] : Nested::getAllColumnAndSubcolumnPairs(column_name)) { auto it = columns.get<1>().find(String(ordinary_column_name)); - if (it != columns.get<1>().end() && it->type->hasDynamicSubcolumns()) + if (it != columns.get<1>().end() && it->type->hasDynamicSubcolumns() && (defaultKindToGetKind(it->default_desc.kind) & options.kind)) { if (auto dynamic_subcolumn_type = it->type->tryGetSubcolumnType(dynamic_subcolumn_name)) return NameAndTypePair(String(ordinary_column_name), String(dynamic_subcolumn_name), it->type, dynamic_subcolumn_type); diff --git a/src/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h index 2a6d5038766c..c60215bad489 100644 --- a/src/Storages/ColumnsDescription.h +++ b/src/Storages/ColumnsDescription.h @@ -167,7 +167,7 @@ class ColumnsDescription : public IHints<> bool has(const String & column_name) const; bool hasNested(const String & column_name) const; - bool hasSubcolumn(const String & column_name) const; + bool hasSubcolumn(GetColumnsOptions::Kind kind, const String & column_name) const; const ColumnDescription & get(const String & column_name) const; const ColumnDescription * tryGet(const String & column_name) const; @@ -270,7 +270,7 @@ class ColumnsDescription : public IHints<> void addSubcolumns(const String & name_in_storage, const DataTypePtr & type_in_storage); void removeSubcolumns(const String & name_in_storage); - std::optional tryGetDynamicSubcolumn(const String & column_name) const; + std::optional tryGetDynamicSubcolumn(const String & column_name, const GetColumnsOptions & options) const; }; class ASTColumnDeclaration; diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index b3810ff8a43f..5b53885b47f5 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -76,7 +76,7 @@ static NameSet getTableColumns(const StorageSnapshotPtr & storage_snapshot, cons /// Add also requested subcolumns to known table columns. for (const auto & column : queried_columns) { - if (storage_columns.hasSubcolumn(column)) + if (storage_columns.hasSubcolumn(options.kind, column)) table_columns.insert(column); } diff --git a/src/Storages/ProjectionsDescription.cpp b/src/Storages/ProjectionsDescription.cpp index b2573de41502..99c2fcc7cd8c 100644 --- a/src/Storages/ProjectionsDescription.cpp +++ b/src/Storages/ProjectionsDescription.cpp @@ -384,7 +384,7 @@ void ProjectionDescription::fillProjectionDescriptionByQuery( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Projections cannot contain constant columns: {}", column_with_type_name.name); /// Subcolumns can be used in projection only when the original column is used. - if (columns.hasSubcolumn(column_with_type_name.name)) + if (columns.hasSubcolumn(GetColumnsOptions::All, column_with_type_name.name)) { auto subcolumn = columns.getColumnOrSubcolumn(GetColumnsOptions::All, column_with_type_name.name); if (!block.has(subcolumn.getNameInStorage())) diff --git a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp index e8d09472e170..e84b792edf50 100644 --- a/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp +++ b/src/Storages/RocksDB/StorageEmbeddedRocksDB.cpp @@ -797,7 +797,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) auto primary_key_names = metadata.getColumnsRequiredForPrimaryKey(); for (const auto & primary_key_name : primary_key_names) { - if (metadata.getColumns().hasSubcolumn(primary_key_name)) + if (metadata.getColumns().hasSubcolumn(GetColumnsOptions::All, primary_key_name)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "StorageEmbeddedRocksDB doesn't support subcolumns in primary key"); } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 515c2c1f4eeb..e0f1898d3abc 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -788,7 +788,7 @@ std::vector ReadFromMerge::createChildrenPlans(SelectQ Names column_names_to_read = column_names_as_aliases.empty() ? std::move(real_column_names) : std::move(column_names_as_aliases); - std::erase_if(column_names_to_read, [existing_columns = nested_storage_snapshot->getAllColumnsDescription()](const auto & column_name){ return !existing_columns.has(column_name) && !existing_columns.hasSubcolumn(column_name); }); + std::erase_if(column_names_to_read, [existing_columns = nested_storage_snapshot->getAllColumnsDescription()](const auto & column_name){ return !existing_columns.has(column_name) && !existing_columns.hasSubcolumn(GetColumnsOptions::All, column_name); }); auto child = createPlanForTable( nested_storage_snapshot, diff --git a/tests/queries/0_stateless/03594_alias_subcolumns.reference b/tests/queries/0_stateless/03594_alias_subcolumns.reference new file mode 100644 index 000000000000..6c284f56f092 --- /dev/null +++ b/tests/queries/0_stateless/03594_alias_subcolumns.reference @@ -0,0 +1,81 @@ +2 +1 +1 +0 +QUERY id: 0 + PROJECTION COLUMNS + aa.size0 UInt64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: getSubcolumn, function_type: ordinary, result_type: UInt64 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: aa, result_type: Array(UInt64), source_id: 5 + EXPRESSION + COLUMN id: 6, column_name: a, result_type: Array(UInt64), source_id: 5 + CONSTANT id: 7, constant_value: \'size0\', constant_value_type: String + JOIN TREE + TABLE id: 5, alias: __table1, table_name: default.t_alias_subcolumns + ORDER BY + LIST id: 8, nodes: 1 + SORT id: 9, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION id + COLUMN id: 10, column_name: id, result_type: UInt64, source_id: 5 +0 +1 +0 +1 +QUERY id: 0 + PROJECTION COLUMNS + na.null UInt8 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: getSubcolumn, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: na, result_type: Nullable(String), source_id: 5 + EXPRESSION + COLUMN id: 6, column_name: n, result_type: Nullable(String), source_id: 5 + CONSTANT id: 7, constant_value: \'null\', constant_value_type: String + JOIN TREE + TABLE id: 5, alias: __table1, table_name: default.t_alias_subcolumns + ORDER BY + LIST id: 8, nodes: 1 + SORT id: 9, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION id + COLUMN id: 10, column_name: id, result_type: UInt64, source_id: 5 +2 +1 +QUERY id: 0 + PROJECTION COLUMNS + count() UInt64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: count, function_type: aggregate, result_type: UInt64 + JOIN TREE + TABLE id: 3, alias: __table1, table_name: default.t_alias_subcolumns + WHERE + FUNCTION id: 4, function_name: not, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 1 + FUNCTION id: 6, function_name: empty, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 7, nodes: 1 + COLUMN id: 8, column_name: aa, result_type: Array(UInt64), source_id: 3 + EXPRESSION + COLUMN id: 9, column_name: a, result_type: Array(UInt64), source_id: 3 +1 +1 +QUERY id: 0 + PROJECTION COLUMNS + count(na) UInt64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: count, function_type: aggregate, result_type: UInt64 + ARGUMENTS + LIST id: 3, nodes: 1 + COLUMN id: 4, column_name: na, result_type: Nullable(String), source_id: 5 + EXPRESSION + COLUMN id: 6, column_name: n, result_type: Nullable(String), source_id: 5 + JOIN TREE + TABLE id: 5, alias: __table1, table_name: default.t_alias_subcolumns diff --git a/tests/queries/0_stateless/03594_alias_subcolumns.sql b/tests/queries/0_stateless/03594_alias_subcolumns.sql new file mode 100644 index 000000000000..844581224bd1 --- /dev/null +++ b/tests/queries/0_stateless/03594_alias_subcolumns.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS t_alias_subcolumns; + +SET enable_analyzer = 1; +SET optimize_functions_to_subcolumns = 1; + +CREATE TABLE t_alias_subcolumns +( + id UInt64, + a Array(UInt64), + n Nullable(String), + aa Array(UInt64) ALIAS a, + ab Array(UInt64) ALIAS arrayFilter(x -> x % 2 = 0, a), + na Nullable(String) ALIAS n, + nb Nullable(String) ALIAS substring(n, 1, 3) +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_alias_subcolumns VALUES (0, [1, 2], 'ffffffff') (1, [3], NULL); + +SELECT aa.size0 FROM t_alias_subcolumns ORDER BY id; +SELECT ab.size0 FROM t_alias_subcolumns ORDER BY id; + +EXPLAIN QUERY TREE SELECT aa.size0 FROM t_alias_subcolumns ORDER BY id; + +SELECT na.null FROM t_alias_subcolumns ORDER BY id; +SELECT nb.null FROM t_alias_subcolumns ORDER BY id; + +EXPLAIN QUERY TREE SELECT na.null FROM t_alias_subcolumns ORDER BY id; + +SELECT count() FROM t_alias_subcolumns WHERE NOT empty(aa); +SELECT count() FROM t_alias_subcolumns WHERE NOT empty(ab); + +EXPLAIN QUERY TREE SELECT count() FROM t_alias_subcolumns WHERE NOT empty(aa); + +SELECT count(na) FROM t_alias_subcolumns; +SELECT count(nb) FROM t_alias_subcolumns; + +EXPLAIN QUERY TREE SELECT count(na) FROM t_alias_subcolumns; + +DROP TABLE t_alias_subcolumns; diff --git a/tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.reference b/tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.reference new file mode 100644 index 000000000000..ab9a465591d8 --- /dev/null +++ b/tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.reference @@ -0,0 +1,2 @@ +42 3 3 3 3 3 +42 3 3 3 3 3 diff --git a/tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.sql b/tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.sql new file mode 100644 index 000000000000..c6e2aca3e039 --- /dev/null +++ b/tests/queries/0_stateless/03812_reading_subcolumns_of_aliases.sql @@ -0,0 +1,21 @@ +set enable_analyzer=1; +set optimize_functions_to_subcolumns=1; + +drop table if exists test; +create table test +( + id UInt64, + array Array(UInt32), + tuple Tuple(array Array(UInt32)), + json JSON, + array_alias_1 Array(UInt32) alias array, + array_alias_2 Array(String) alias array::Array(String), + array_alias_3 Array(UInt32) alias tuple.array, + array_alias_4 Array(String) alias tuple.array::Array(String), + array_alias_5 Array(UInt32) alias json.array::Array(UInt32) +); + +insert into test select 42, [1,2,3], tuple([1,2,3]), '{"array" : [1,2,3]}'; +select id, array_alias_1.size0, array_alias_2.size0, array_alias_3.size0, array_alias_4.size0, array_alias_5.size0 from test; +select id, length(array_alias_1), length(array_alias_2), length(array_alias_3), length(array_alias_4), length(array_alias_5) from test; +drop table test; diff --git a/tests/queries/0_stateless/03832_log_engine_alias_subcolumn.reference b/tests/queries/0_stateless/03832_log_engine_alias_subcolumn.reference new file mode 100644 index 000000000000..573541ac9702 --- /dev/null +++ b/tests/queries/0_stateless/03832_log_engine_alias_subcolumn.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/03832_log_engine_alias_subcolumn.sql b/tests/queries/0_stateless/03832_log_engine_alias_subcolumn.sql new file mode 100644 index 000000000000..4783b5d5295f --- /dev/null +++ b/tests/queries/0_stateless/03832_log_engine_alias_subcolumn.sql @@ -0,0 +1,6 @@ +SET enable_analyzer=1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int, c1 Nullable(Int) ALIAS 1) ENGINE = Log(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT t0.c1.null FROM t0; +DROP TABLE t0; From 3512928d58982cd538df69563d2787b0ec31df79 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 3 Mar 2026 18:28:25 +0000 Subject: [PATCH 15/53] Backport #98551 to 26.1: Fix column rollback in StorageBuffer::appendBlock --- src/Storages/StorageBuffer.cpp | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index c5637afda521..09f699869835 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -564,7 +565,6 @@ void StorageBuffer::read( static void appendBlock(LoggerPtr log, const Block & from, Block & to) { size_t rows = from.rows(); - size_t old_rows = to.rows(); size_t old_bytes = to.bytes(); if (to.empty()) @@ -575,7 +575,15 @@ static void appendBlock(LoggerPtr log, const Block & from, Block & to) from.checkNumberOfRows(); to.checkNumberOfRows(); + /// Take checkpoints of all destination columns before any modifications + /// to be able to rollback in case of an exception in the middle of insertion. + ColumnCheckpoints checkpoints; + checkpoints.reserve(to.columns()); + for (size_t column_no = 0; column_no < to.columns(); ++column_no) + checkpoints.push_back(to.getByPosition(column_no).column->getCheckpoint()); + MutableColumnPtr last_col; + size_t mutated_columns = 0; try { MemoryTrackerBlockerInThread temporarily_disable_memory_tracker; @@ -601,6 +609,7 @@ static void appendBlock(LoggerPtr log, const Block & from, Block & to) LockMemoryExceptionInThread temporarily_ignore_any_memory_limits(VariableContext::Global); last_col = IColumn::mutate(std::move(to.getByPosition(column_no).column)); } + ++mutated_columns; /// In case of ColumnAggregateFunction aggregate states will /// be allocated from the query context but can be destroyed from the @@ -633,10 +642,11 @@ static void appendBlock(LoggerPtr log, const Block & from, Block & to) try { - for (size_t column_no = 0, columns = to.columns(); column_no < columns; ++column_no) + for (size_t column_no = 0; column_no < mutated_columns; ++column_no) { ColumnPtr & col_to = to.getByPosition(column_no).column; - /// If there is no column, then the exception was thrown in the middle of append, in the insertRangeFrom() + /// If there is no column, the exception was thrown in the middle of append, + /// during insertRangeFrom() — move last_col back so we can roll it back. if (!col_to) { col_to = std::move(last_col); @@ -646,8 +656,11 @@ static void appendBlock(LoggerPtr log, const Block & from, Block & to) /// But if there is still nothing, abort if (!col_to) throw Exception(ErrorCodes::LOGICAL_ERROR, "No column to rollback"); - if (col_to->size() != old_rows) - col_to = col_to->cut(0, old_rows); + + /// Rollback to the state before the exception. + auto mutable_col = IColumn::mutate(std::move(col_to)); + mutable_col->rollback(*checkpoints[column_no]); + col_to = std::move(mutable_col); } } catch (...) From 54116009e18a0f47edcb9ea82dd1c0851bea0d86 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 3 Mar 2026 18:30:30 +0000 Subject: [PATCH 16/53] Backport #98115 to 26.1: Enforce READ ON FILE checks for scalar file() and DESCRIBE TABLE file(). --- src/Functions/FunctionFile.cpp | 9 ++++++- src/TableFunctions/TableFunctionFile.cpp | 3 +++ ...file_function_read_on_file_grant.reference | 4 ++++ .../03822_file_function_read_on_file_grant.sh | 24 +++++++++++++++++++ 4 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03822_file_function_read_on_file_grant.reference create mode 100755 tests/queries/0_stateless/03822_file_function_read_on_file_grant.sh diff --git a/src/Functions/FunctionFile.cpp b/src/Functions/FunctionFile.cpp index b7f154877876..697345fb06d1 100644 --- a/src/Functions/FunctionFile.cpp +++ b/src/Functions/FunctionFile.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -30,7 +31,13 @@ class FunctionFile : public IFunction { public: static constexpr auto name = "file"; - static FunctionPtr create(ContextPtr) { return std::make_shared(); } + static FunctionPtr create(ContextPtr context) + { + if (context && context->getApplicationType() != Context::ApplicationType::LOCAL) + context->checkAccess(AccessType::READ, toStringSource(AccessTypeObjects::Source::FILE)); + + return std::make_shared(); + } bool isVariadic() const override { return true; } String getName() const override { return name; } diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index 8466ef914240..8a163381a74f 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -107,6 +107,9 @@ ColumnsDescription TableFunctionFile::getActualTableStructure(ContextPtr context if (fd >= 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Schema inference is not supported for table function '{}' with file descriptor", getName()); + if (context->getApplicationType() != Context::ApplicationType::LOCAL) + context->checkAccess(AccessType::READ, toStringSource(AccessTypeObjects::Source::FILE)); + chassert(file_source); /// TableFunctionFile::parseFirstArguments() initializes either `fd` or `file_source`. ColumnsDescription columns; diff --git a/tests/queries/0_stateless/03822_file_function_read_on_file_grant.reference b/tests/queries/0_stateless/03822_file_function_read_on_file_grant.reference new file mode 100644 index 000000000000..865d4ff5230b --- /dev/null +++ b/tests/queries/0_stateless/03822_file_function_read_on_file_grant.reference @@ -0,0 +1,4 @@ +ACCESS_DENIED +ACCESS_DENIED +FILE_DOESNT_EXIST +CANNOT_STAT diff --git a/tests/queries/0_stateless/03822_file_function_read_on_file_grant.sh b/tests/queries/0_stateless/03822_file_function_read_on_file_grant.sh new file mode 100755 index 000000000000..6db574849a79 --- /dev/null +++ b/tests/queries/0_stateless/03822_file_function_read_on_file_grant.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +user="user_03822_${CLICKHOUSE_DATABASE}_$RANDOM" +missing_txt="missing_03822_${CLICKHOUSE_DATABASE}_$RANDOM.txt" +missing_csv="missing_03822_${CLICKHOUSE_DATABASE}_$RANDOM.csv" + +${CLICKHOUSE_CLIENT} <&1 | grep -c "ACCESS_DENIED") >= 1 )) && echo "ACCESS_DENIED" || echo "UNEXPECTED"; +(( $(${CLICKHOUSE_CLIENT} --user $user --query "DESCRIBE TABLE file('$missing_csv', 'CSV')" 2>&1 | grep -c "ACCESS_DENIED") >= 1 )) && echo "ACCESS_DENIED" || echo "UNEXPECTED"; + +${CLICKHOUSE_CLIENT} --query "GRANT READ ON FILE TO $user"; + +(( $(${CLICKHOUSE_CLIENT} --user $user --query "SELECT file('$missing_txt')" 2>&1 | grep -c "FILE_DOESNT_EXIST") >= 1 )) && echo "FILE_DOESNT_EXIST" || echo "UNEXPECTED"; +(( $(${CLICKHOUSE_CLIENT} --user $user --query "DESCRIBE TABLE file('$missing_csv', 'CSV')" 2>&1 | grep -c "CANNOT_STAT") >= 1 )) && echo "CANNOT_STAT" || echo "UNEXPECTED"; + +${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS $user"; From b3e1ac83f0a79dfb054870233970fc1e4c56c860 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 4 Mar 2026 02:17:19 +0100 Subject: [PATCH 17/53] Update 04010_text_index_incompelete_granules.sql --- .../0_stateless/04010_text_index_incompelete_granules.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql b/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql index 3b25185693b7..9609c8421ec2 100644 --- a/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql +++ b/tests/queries/0_stateless/04010_text_index_incompelete_granules.sql @@ -9,6 +9,8 @@ -- loop hits the final mark (0 rows), makes no progress, and increments past the -- end -> "Trying to get non existing mark N, while size is N". +SET enable_full_text_index = 1; + DROP TABLE IF EXISTS t_text_index_skip_bug; CREATE TABLE t_text_index_skip_bug From a39165638d98256fa2252ea188993e39180a0159 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 4 Mar 2026 13:36:24 +0000 Subject: [PATCH 18/53] Backport #98304 to 26.1: Use `mongo-c-driver` 2.2.2 --- contrib/mongo-c-driver | 2 +- contrib/mongo-c-driver-cmake/CMakeLists.txt | 14 ++++++++++---- contrib/mongo-cxx-driver-cmake/CMakeLists.txt | 15 +++++++++++++++ 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/contrib/mongo-c-driver b/contrib/mongo-c-driver index 06a09de5578f..ba0d1dbf2b74 160000 --- a/contrib/mongo-c-driver +++ b/contrib/mongo-c-driver @@ -1 +1 @@ -Subproject commit 06a09de5578f33d2f1c9ab120969af9254e4432a +Subproject commit ba0d1dbf2b743a5a96609e7fe6b642876f0900ed diff --git a/contrib/mongo-c-driver-cmake/CMakeLists.txt b/contrib/mongo-c-driver-cmake/CMakeLists.txt index 3ffc7867ecde..0b608517607c 100644 --- a/contrib/mongo-c-driver-cmake/CMakeLists.txt +++ b/contrib/mongo-c-driver-cmake/CMakeLists.txt @@ -5,13 +5,13 @@ if(NOT USE_MONGODB) endif() set(libbson_VERSION_MAJOR 2) -set(libbson_VERSION_MINOR 1) +set(libbson_VERSION_MINOR 2) set(libbson_VERSION_PATCH 2) -set(libbson_VERSION 2.1.2) +set(libbson_VERSION 2.2.2) set(libmongoc_VERSION_MAJOR 2) -set(libmongoc_VERSION_MINOR 1) +set(libmongoc_VERSION_MINOR 2) set(libmongoc_VERSION_PATCH 2) -set(libmongoc_VERSION 2.1.2) +set(libmongoc_VERSION 2.2.2) set(LIBBSON_SOURCES_ROOT "${ClickHouse_SOURCE_DIR}/contrib/mongo-c-driver/src") set(LIBBSON_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/libbson/src") @@ -136,6 +136,8 @@ set(UTF8PROC_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/utf8proc-2.8.0") set(UTF8PROC_SOURCES "${UTF8PROC_SOURCE_DIR}/utf8proc.c") set(UTHASH_SOURCE_DIR "${LIBBSON_SOURCES_ROOT}/uthash") +set(MONGOC_CXX_COMPILER_ID "${CMAKE_CXX_COMPILER_ID}") +set(MONGOC_CXX_COMPILER_VERSION "${CMAKE_CXX_COMPILER_VERSION}") configure_file( ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config.h.in ${LIBMONGOC_BINARY_DIR}/mongoc/mongoc-config.h @@ -144,6 +146,10 @@ configure_file( ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-version.h.in ${LIBMONGOC_BINARY_DIR}/mongoc/mongoc-version.h ) +configure_file( + ${LIBMONGOC_SOURCE_DIR}/mongoc/mongoc-config-private.h.in + ${LIBMONGOC_BINARY_DIR}/mongoc/mongoc-config-private.h +) add_library(_libmongoc ${LIBMONGOC_SOURCES} ${COMMON_SOURCES} ${UTF8PROC_SOURCES}) add_library(ch_contrib::libmongoc ALIAS _libmongoc) target_include_directories(_libmongoc SYSTEM PUBLIC ${LIBMONGOC_SOURCE_DIR} ${LIBMONGOC_BINARY_DIR} ${LIBMONGOC_SOURCE_DIR}/mongoc ${LIBMONGOC_BINARY_DIR}/mongoc ${COMMON_SOURCE_DIR} ${UTF8PROC_SOURCE_DIR} ${UTHASH_SOURCE_DIR} ) diff --git a/contrib/mongo-cxx-driver-cmake/CMakeLists.txt b/contrib/mongo-cxx-driver-cmake/CMakeLists.txt index 84058b56b929..8c750e1082a4 100644 --- a/contrib/mongo-cxx-driver-cmake/CMakeLists.txt +++ b/contrib/mongo-cxx-driver-cmake/CMakeLists.txt @@ -12,12 +12,19 @@ include(GenerateExportHeader) set(BSONCXX_SOURCES ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/private/itoa.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/private/version.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v1/config/config.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v1/config/export.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v1/config/version.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v1/detail/postlude.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v1/detail/prelude.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/element.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/value.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/array/view.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/builder/core.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/config.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/export.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/config/version.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/decimal128.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/element.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/document/value.cpp @@ -31,6 +38,7 @@ set(BSONCXX_SOURCES ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/value.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/types/bson_value/view.cpp ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/validate.cpp + ${BSONCXX_SOURCES_DIR}/lib/bsoncxx/v_noabi/bsoncxx/vector.cpp ) set(BSONCXX_POLY_USE_IMPLS ON) @@ -95,6 +103,9 @@ set(MONGOCXX_SOURCES ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/private/conversions.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/private/mongoc.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/private/numeric_casting.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v1/config/config.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v1/config/export.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v1/config/version.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v1/detail/postlude.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v1/detail/prelude.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/bulk_write.cpp @@ -103,6 +114,9 @@ set(MONGOCXX_SOURCES ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_encryption.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/client_session.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/collection.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/config.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/export.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/config/version.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/cursor.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/database.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/command_failed_event.cpp @@ -119,6 +133,7 @@ set(MONGOCXX_SOURCES ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_closed_event.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_description.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/events/topology_opening_event.cpp + ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/authentication_exception.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/bulk_write_exception.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/error_code.cpp ${MONGOCXX_SOURCES_DIR}/lib/mongocxx/v_noabi/mongocxx/exception/exception.cpp From 01de41f7c4dd6e9e9a5702eafbc31d51fd50513d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 4 Mar 2026 16:24:26 +0000 Subject: [PATCH 19/53] Backport #98740 to 26.1: CI: skip all jobs for release PRs in filter_job hook --- ci/jobs/scripts/workflow_hooks/filter_job.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ci/jobs/scripts/workflow_hooks/filter_job.py b/ci/jobs/scripts/workflow_hooks/filter_job.py index 8e4239a4f892..e3459dbb086a 100644 --- a/ci/jobs/scripts/workflow_hooks/filter_job.py +++ b/ci/jobs/scripts/workflow_hooks/filter_job.py @@ -58,6 +58,15 @@ def should_skip_job(job_name): _info_cache = Info() print(f"INFO: PR labels: {_info_cache.pr_labels}") + # There is no way to prevent GitHub Actions from running the PR workflow on + # release branches, so we skip all jobs here. The ReleaseCI workflow is used + # for testing on release branches instead. + if ( + Labels.RELEASE in _info_cache.pr_labels + or Labels.RELEASE_LTS in _info_cache.pr_labels + ): + return True, "Skipped for release PR" + changed_files = _info_cache.get_kv_data("changed_files") if not changed_files: print("WARNING: no changed files found for PR - do not filter jobs") From 207c33721fd31b8e00ee485294d279cf8d17f210 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 5 Mar 2026 08:23:24 +0000 Subject: [PATCH 20/53] Backport #97502 to 26.1: Fix `sumCount` aggregate function not being able to read older serialized states after introduction of `Nullable(Tuple)` --- .../AggregateFunctionSumCount.cpp | 20 ++ tests/integration/helpers/cluster.py | 5 + ...regate_function_state_tuple_return_type.py | 269 +++++++++++++++++ .../0_stateless/02119_sumcount.reference | 12 +- .../03927_sumcount_compatibility.reference | 87 ++++++ .../03927_sumcount_compatibility.sql | 83 ++++++ ..._tuple_return_type_compatibility.reference | 277 +++++++++++++++++ ...nction_tuple_return_type_compatibility.sql | 279 ++++++++++++++++++ 8 files changed, 1026 insertions(+), 6 deletions(-) create mode 100644 tests/integration/test_backward_compatibility/test_aggregate_function_state_tuple_return_type.py create mode 100644 tests/queries/0_stateless/03927_sumcount_compatibility.reference create mode 100644 tests/queries/0_stateless/03927_sumcount_compatibility.sql create mode 100644 tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.reference create mode 100644 tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.sql diff --git a/src/AggregateFunctions/AggregateFunctionSumCount.cpp b/src/AggregateFunctions/AggregateFunctionSumCount.cpp index c6000b3abe35..0bec0cebc475 100644 --- a/src/AggregateFunctions/AggregateFunctionSumCount.cpp +++ b/src/AggregateFunctions/AggregateFunctionSumCount.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -45,6 +46,25 @@ class AggregateFunctionSumCount final : public AggregateFunctionAvg String getName() const final { return "sumCount"; } + /// Keep the legacy nullable behavior/state layout for sumCount. + /// After `Nullable(Tuple)` was introduced, for Nullable arguments in the + /// generic Null-combinator path it started using `AggregateFunctionNullUnary` + /// instead of `AggregateFunctionNullUnary` for sumCount. + /// This adds a leading null-flag byte during serialization and expects that + /// byte during deserialization, which breaks compatibility with previously + /// serialized sumCount states. + /// So we force the legacy adapter for sumCount to preserve compatibility. + /// The extra null-flag is also redundant for sumCount: "has non-NULL rows" + /// can be inferred from `count` (`count > 0` means at least one row was seen). + AggregateFunctionPtr getOwnNullAdapter( + const AggregateFunctionPtr & nested_function, + const DataTypes & arguments, + const Array & params, + const AggregateFunctionProperties & /*properties*/) const final + { + return std::make_shared>(nested_function, arguments, params); + } + #if USE_EMBEDDED_COMPILER bool isCompilable() const override diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 74a80ad5bfa1..2cb419bb3ea0 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -120,6 +120,11 @@ def find_default_config_path(): # This means that this minimum need to be, at least, 1 year older than the current release CLICKHOUSE_CI_MIN_TESTED_VERSION = "23.3" +# `Nullable(Tuple)` experimental feature is introduced in 26.1. This has lead to changes in the output return type +# of many aggregate functions from `Tuple(...)` to `Nullable(Tuple(...))`. This version can be used as baseline to do +# compatibility checks for features that are affected by this experimental feature. +CLICKHOUSE_CI_PRE_NULLABLE_TUPLE_VERSION = "25.12" + ZOOKEEPER_CONTAINERS = ("zoo1", "zoo2", "zoo3") NET_LOCK_PATH = "/tmp/docker_net.lock" diff --git a/tests/integration/test_backward_compatibility/test_aggregate_function_state_tuple_return_type.py b/tests/integration/test_backward_compatibility/test_aggregate_function_state_tuple_return_type.py new file mode 100644 index 000000000000..e61ebec892d2 --- /dev/null +++ b/tests/integration/test_backward_compatibility/test_aggregate_function_state_tuple_return_type.py @@ -0,0 +1,269 @@ +import pytest + +from helpers.cluster import ( + CLICKHOUSE_CI_PRE_NULLABLE_TUPLE_VERSION, + ClickHouseCluster, +) + +cluster = ClickHouseCluster(__file__) +pre_nullable_tuple_node_1 = cluster.add_instance( + "pre_nullable_tuple_node_1", + with_zookeeper=False, + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_PRE_NULLABLE_TUPLE_VERSION, + stay_alive=True, + with_installed_binary=True, +) +pre_nullable_tuple_node_2 = cluster.add_instance( + "pre_nullable_tuple_node_2", + with_zookeeper=False, + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_PRE_NULLABLE_TUPLE_VERSION, + stay_alive=True, + with_installed_binary=True, +) +pre_nullable_tuple_node_3 = cluster.add_instance( + "pre_nullable_tuple_node_3", + with_zookeeper=False, + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_PRE_NULLABLE_TUPLE_VERSION, + stay_alive=True, + with_installed_binary=True, +) +pre_nullable_tuple_node_4 = cluster.add_instance( + "pre_nullable_tuple_node_4", + with_zookeeper=False, + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_PRE_NULLABLE_TUPLE_VERSION, + stay_alive=True, + with_installed_binary=True, +) + +node3 = cluster.add_instance("node3", with_zookeeper=False, use_old_analyzer=True) +node4 = cluster.add_instance("node4", with_zookeeper=False, use_old_analyzer=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup(): + yield + pre_nullable_tuple_node_1.restart_with_original_version(clear_data_dir=True) + + +# Compatibility check for aggregate functions that return tuples. +# Before 26.1 many of these functions returned `Tuple(...)`. +# Since 26.1 they may return `Nullable(Tuple(...))` when nullable arguments are used. +# This test checks that mixed-version distributed execution stays compatible across that change. +def test_backward_compatibility_for_tuple_return_type(start_cluster): + mixed_nodes = (pre_nullable_tuple_node_1, pre_nullable_tuple_node_2, node3, node4) + test_nodes = mixed_nodes + (pre_nullable_tuple_node_3, pre_nullable_tuple_node_4) + for node in test_nodes: + node.query( + "CREATE TABLE tab_tuple_return" + "(x Nullable(Float64), y Nullable(Float64), g Nullable(UInt8), " + "a Nullable(Int32), b Nullable(Int32), c Nullable(Int32), m Nullable(Float64), " + "keys Array(UInt8), vals64 Array(UInt64), vals8 Array(UInt8)) " + "ENGINE = Log" + ) + + rowset = ( + "(1, 3, 0, 1, 2, 3, 2, [1], [5], [5]), " + "(2, 5, 0, NULL, 1, 0, 2, [1], [7], [7]), " + "(NULL, 7, 1, 3, 4, 5, 2, [2], [11], [11]), " + "(4, 9, 1, 2, 0, 4, 2, [1], [13], [13]), " + "(5, 11, 1, 5, 3, 2, 2, [2], [17], [17])" + ) + for node in test_nodes: + node.query( + "INSERT INTO tab_tuple_return " + "(x, y, g, a, b, c, m, keys, vals64, vals8) VALUES " + rowset + ) + + mixed_remote_tab = "remote('pre_nullable_tuple_node_1,pre_nullable_tuple_node_2,node3,node4', default, tab_tuple_return)" + pre_nullable_only_remote_tab = "remote('pre_nullable_tuple_node_1,pre_nullable_tuple_node_2,pre_nullable_tuple_node_3,pre_nullable_tuple_node_4', default, tab_tuple_return)" + + def build_checks(remote_tab): + return [ + ( + "simpleLinearRegression", + f"SELECT tuple(" + f"roundBankers(tupleElement(simpleLinearRegression(x, y), 1), 4), " + f"roundBankers(tupleElement(simpleLinearRegression(x, y), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "analysisOfVariance", + f"SELECT tuple(" + f"roundBankers(tupleElement(analysisOfVariance(x, g), 1), 4), " + f"roundBankers(tupleElement(analysisOfVariance(x, g), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "kolmogorovSmirnovTest", + f"SELECT tuple(" + f"roundBankers(tupleElement(kolmogorovSmirnovTest('two-sided')(x, g), 1), 4), " + f"roundBankers(tupleElement(kolmogorovSmirnovTest('two-sided')(x, g), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "mannWhitneyUTest", + f"SELECT tuple(" + f"roundBankers(tupleElement(mannWhitneyUTest('two-sided')(x, g), 1), 4), " + f"roundBankers(tupleElement(mannWhitneyUTest('two-sided')(x, g), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "studentTTest", + f"SELECT tuple(" + f"roundBankers(tupleElement(studentTTest(x, g), 1), 4), " + f"roundBankers(tupleElement(studentTTest(x, g), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "welchTTest", + f"SELECT tuple(" + f"roundBankers(tupleElement(welchTTest(x, g), 1), 4), " + f"roundBankers(tupleElement(welchTTest(x, g), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "meanZTest", + f"SELECT tuple(" + f"roundBankers(tupleElement(meanZTest(1., 1., 0.95)(x, g), 1), 4), " + f"roundBankers(tupleElement(meanZTest(1., 1., 0.95)(x, g), 2), 4), " + f"roundBankers(tupleElement(meanZTest(1., 1., 0.95)(x, g), 3), 4), " + f"roundBankers(tupleElement(meanZTest(1., 1., 0.95)(x, g), 4), 4)) " + f"FROM {remote_tab}", + ), + ( + "studentTTestOneSample", + f"SELECT tuple(" + f"roundBankers(tupleElement(studentTTestOneSample(x, m), 1), 4), " + f"roundBankers(tupleElement(studentTTestOneSample(x, m), 2), 4)) " + f"FROM {remote_tab}", + ), + ( + "argAndMin", + f"SELECT argAndMin(a, b) FROM {remote_tab}", + ), + ( + "argAndMax", + f"SELECT argAndMax(a, b) FROM {remote_tab}", + ), + ( + "argMin", + f"SELECT argMin(tuple(a, b), c) FROM {remote_tab}", + ), + ( + "argMax", + f"SELECT argMax(tuple(a, b), c) FROM {remote_tab}", + ), + ( + "sumMap", + f"SELECT sumMap(tuple(keys, vals64)) FROM {remote_tab}", + ), + ( + "sumMappedArrays", + f"SELECT sumMappedArrays(tuple(keys, vals64)) FROM {remote_tab}", + ), + ( + "sumMapWithOverflow", + f"SELECT sumMapWithOverflow(tuple(keys, vals8)) FROM {remote_tab}", + ), + ( + "sumCount", + f"SELECT sumCount(x) FROM {remote_tab}", + ), + ] + + def build_empty_checks(remote_tab): + return [ + ("sumCount_empty", f"SELECT sumCount(x) FROM {remote_tab} WHERE 0"), + ("sumMap_empty", f"SELECT sumMap(tuple(keys, vals64)) FROM {remote_tab} WHERE 0"), + ( + "sumMappedArrays_empty", + f"SELECT sumMappedArrays(tuple(keys, vals64)) FROM {remote_tab} WHERE 0", + ), + ( + "sumMapWithOverflow_empty", + f"SELECT sumMapWithOverflow(tuple(keys, vals8)) FROM {remote_tab} WHERE 0", + ), + ] + + mixed_checks = build_checks(mixed_remote_tab) + pre_nullable_only_checks = build_checks(pre_nullable_only_remote_tab) + + mixed_baseline_results = {} + for name, query in mixed_checks: + mixed_baseline_results[name] = pre_nullable_tuple_node_1.query(query) + + assert mixed_baseline_results["sumCount"] == "(48,16)\n" + + for name, query in mixed_checks: + assert pre_nullable_tuple_node_2.query(query) == mixed_baseline_results[name], name + assert node3.query(query) == mixed_baseline_results[name], name + assert node4.query(query) == mixed_baseline_results[name], name + + # Extra baseline with 4x pre-26.1 nodes. + pre_nullable_only_baseline_results = {} + for name, query in pre_nullable_only_checks: + pre_nullable_only_baseline_results[name] = pre_nullable_tuple_node_1.query(query) + + for name, _ in mixed_checks: + assert pre_nullable_only_baseline_results[name] == mixed_baseline_results[name], name + + # Cover empty aggregate states with deterministic tuple-return functions. + mixed_empty_checks = build_empty_checks(mixed_remote_tab) + pre_nullable_only_empty_checks = build_empty_checks(pre_nullable_only_remote_tab) + + mixed_empty_baseline_results = {} + for name, query in mixed_empty_checks: + mixed_empty_baseline_results[name] = pre_nullable_tuple_node_1.query(query) + + assert mixed_empty_baseline_results["sumCount_empty"] == "(0,0)\n" + + for name, query in mixed_empty_checks: + assert pre_nullable_tuple_node_2.query(query) == mixed_empty_baseline_results[name], name + assert node3.query(query) == mixed_empty_baseline_results[name], name + assert node4.query(query) == mixed_empty_baseline_results[name], name + + pre_nullable_only_empty_baseline_results = {} + for name, query in pre_nullable_only_empty_checks: + pre_nullable_only_empty_baseline_results[name] = pre_nullable_tuple_node_1.query(query) + + for name, _ in mixed_empty_checks: + assert ( + pre_nullable_only_empty_baseline_results[name] + == mixed_empty_baseline_results[name] + ), name + + # Upgrade one pre-26.1 node to latest and re-check compatibility from all coordinators. + pre_nullable_tuple_node_1.restart_with_latest_version(fix_metadata=True) + + for name, query in mixed_checks: + assert pre_nullable_tuple_node_1.query(query) == mixed_baseline_results[name], name + assert pre_nullable_tuple_node_2.query(query) == mixed_baseline_results[name], name + assert node3.query(query) == mixed_baseline_results[name], name + assert node4.query(query) == mixed_baseline_results[name], name + + for name, query in mixed_empty_checks: + assert ( + pre_nullable_tuple_node_1.query(query) == mixed_empty_baseline_results[name] + ), name + assert ( + pre_nullable_tuple_node_2.query(query) == mixed_empty_baseline_results[name] + ), name + assert node3.query(query) == mixed_empty_baseline_results[name], name + assert node4.query(query) == mixed_empty_baseline_results[name], name + + for node in test_nodes: + node.query("DROP TABLE IF EXISTS tab_tuple_return") diff --git a/tests/queries/0_stateless/02119_sumcount.reference b/tests/queries/0_stateless/02119_sumcount.reference index 919791bbc9ec..437c52c899a1 100644 --- a/tests/queries/0_stateless/02119_sumcount.reference +++ b/tests/queries/0_stateless/02119_sumcount.reference @@ -1,15 +1,15 @@ Tuple(UInt64, UInt64) (9007199254740994,3) -Nullable(Tuple(UInt64, UInt64)) (9007199254740994,3) Tuple(UInt64, UInt64) (9007199254740994,3) -Nullable(Tuple(UInt64, UInt64)) (9007199254740994,3) +Tuple(UInt64, UInt64) (9007199254740994,3) +Tuple(UInt64, UInt64) (9007199254740994,3) +Tuple(Float64, UInt64) (9007199254740994,3) Tuple(Float64, UInt64) (9007199254740994,3) -Nullable(Tuple(Float64, UInt64)) (9007199254740994,3) Tuple(Float64, UInt64) (9007199254740994,3) -Nullable(Tuple(Float64, UInt64)) (9007199254740994,3) +Tuple(Float64, UInt64) (9007199254740994,3) +Tuple(Float64, UInt64) (16777218,3) +Tuple(Float64, UInt64) (16777218,3) Tuple(Float64, UInt64) (16777218,3) -Nullable(Tuple(Float64, UInt64)) (16777218,3) Tuple(Float64, UInt64) (16777218,3) -Nullable(Tuple(Float64, UInt64)) (16777218,3) Tuple(Int64, UInt64) (7140,120) Tuple(UInt64, UInt64) (31125,250) Tuple(Int128, UInt64) (100,100) diff --git a/tests/queries/0_stateless/03927_sumcount_compatibility.reference b/tests/queries/0_stateless/03927_sumcount_compatibility.reference new file mode 100644 index 000000000000..8f3f798866da --- /dev/null +++ b/tests/queries/0_stateless/03927_sumcount_compatibility.reference @@ -0,0 +1,87 @@ +-- { echo } + +SET allow_suspicious_low_cardinality_types = 1; +SELECT + toTypeName(sumCount(NULL)), + sumCount(NULL); +Nullable(Nothing) \N +SELECT + toTypeName(sumCount(CAST(NULL, 'Nullable(Float32)'))), + sumCount(CAST(NULL, 'Nullable(Float32)')); +Tuple(Float64, UInt64) (0,0) +SELECT + toTypeName(sumCount(CAST(NULL, 'LowCardinality(Nullable(Float32))'))), + sumCount(CAST(NULL, 'LowCardinality(Nullable(Float32))')); +Tuple(Float64, UInt64) (0,0) +SELECT hex(sumCountState(CAST(NULL, 'Nullable(Float32)'))); +000000000000000000 +SELECT hex(sumCountState(CAST(NULL, 'LowCardinality(Nullable(Float32))'))); +000000000000000000 +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(arrayJoin([NULL, NULL, NULL]), 'Nullable(Float32)') AS x +); +(0,0) 000000000000000000 +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(arrayJoin([NULL, NULL, NULL]), 'LowCardinality(Nullable(Float32))') AS x +); +(0,0) 000000000000000000 +SELECT finalizeAggregation(CAST(unhex('000000000000000000'), 'AggregateFunction(sumCount, Nullable(Float32))')); +(0,0) +SELECT finalizeAggregation(CAST(unhex('000000000000000000'), 'AggregateFunction(sumCount, LowCardinality(Nullable(Float32)))')); +(0,0) +SELECT finalizeAggregation(CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, Nullable(Float32))')); +(2.363335573687322e38,9) +SELECT finalizeAggregation(CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, LowCardinality(Nullable(Float32)))')); +(2.363335573687322e38,9) +SELECT finalizeAggregation(CAST(unhex('0000000064789'), 'AggregateFunction(sumCount, Nullable(Float32))')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('000000008639E64789'), 'AggregateFunction(sumCount, Nullable(Float32))')); -- { serverError ATTEMPT_TO_READ_AFTER_EOF } +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(number % 2, 'Nullable(UInt8)') AS x + FROM numbers(130) +); +(65,130) 41000000000000008201 +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(number % 2, 'LowCardinality(Nullable(UInt8))') AS x + FROM numbers(130) +); +(65,130) 41000000000000008201 +SELECT finalizeAggregation(CAST(unhex('41000000000000008201'), 'AggregateFunction(sumCount, Nullable(UInt8))')); +(65,130) +SELECT finalizeAggregation(CAST(unhex('41000000000000008201'), 'AggregateFunction(sumCount, LowCardinality(Nullable(UInt8)))')); +(65,130) +SELECT sumCountMerge(st) +FROM +( + SELECT CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, Nullable(Float32))') AS st + UNION ALL + SELECT CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, Nullable(Float32))') AS st +); +(4.726671147374644e38,18) +SELECT hex(sumCountState(x)) +FROM +( + SELECT CAST( + arrayJoin( + [3.4028235e38, -3.4028235e38, NULL, 2.3835032e38, -6.857267e37, -4.4835215e37, -2.6999879e38, 1.9934134e38, 2.362745e38, -5.4225925e37]), + 'LowCardinality(Nullable(Float32))') + AS x +); +000000008639E64709 +SELECT hex(sumCountState(x)) +FROM +( + SELECT CAST( + arrayJoin( + [3.4028235e38, -3.4028235e38, NULL, 2.3835032e38, -6.857267e37, -4.4835215e37, -2.6999879e38, 1.9934134e38, 2.362745e38, -5.4225925e37]), + 'Nullable(Float32)') + AS x +); +000000008639E64709 diff --git a/tests/queries/0_stateless/03927_sumcount_compatibility.sql b/tests/queries/0_stateless/03927_sumcount_compatibility.sql new file mode 100644 index 000000000000..61a2cec30147 --- /dev/null +++ b/tests/queries/0_stateless/03927_sumcount_compatibility.sql @@ -0,0 +1,83 @@ +-- { echo } + +SET allow_suspicious_low_cardinality_types = 1; + +SELECT + toTypeName(sumCount(NULL)), + sumCount(NULL); + +SELECT + toTypeName(sumCount(CAST(NULL, 'Nullable(Float32)'))), + sumCount(CAST(NULL, 'Nullable(Float32)')); + +SELECT + toTypeName(sumCount(CAST(NULL, 'LowCardinality(Nullable(Float32))'))), + sumCount(CAST(NULL, 'LowCardinality(Nullable(Float32))')); + +SELECT hex(sumCountState(CAST(NULL, 'Nullable(Float32)'))); +SELECT hex(sumCountState(CAST(NULL, 'LowCardinality(Nullable(Float32))'))); + +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(arrayJoin([NULL, NULL, NULL]), 'Nullable(Float32)') AS x +); + +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(arrayJoin([NULL, NULL, NULL]), 'LowCardinality(Nullable(Float32))') AS x +); + +SELECT finalizeAggregation(CAST(unhex('000000000000000000'), 'AggregateFunction(sumCount, Nullable(Float32))')); +SELECT finalizeAggregation(CAST(unhex('000000000000000000'), 'AggregateFunction(sumCount, LowCardinality(Nullable(Float32)))')); + +SELECT finalizeAggregation(CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, Nullable(Float32))')); +SELECT finalizeAggregation(CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, LowCardinality(Nullable(Float32)))')); +SELECT finalizeAggregation(CAST(unhex('0000000064789'), 'AggregateFunction(sumCount, Nullable(Float32))')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('000000008639E64789'), 'AggregateFunction(sumCount, Nullable(Float32))')); -- { serverError ATTEMPT_TO_READ_AFTER_EOF } + +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(number % 2, 'Nullable(UInt8)') AS x + FROM numbers(130) +); + +SELECT sumCount(x), hex(sumCountState(x)) +FROM +( + SELECT CAST(number % 2, 'LowCardinality(Nullable(UInt8))') AS x + FROM numbers(130) +); + +SELECT finalizeAggregation(CAST(unhex('41000000000000008201'), 'AggregateFunction(sumCount, Nullable(UInt8))')); +SELECT finalizeAggregation(CAST(unhex('41000000000000008201'), 'AggregateFunction(sumCount, LowCardinality(Nullable(UInt8)))')); + +SELECT sumCountMerge(st) +FROM +( + SELECT CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, Nullable(Float32))') AS st + UNION ALL + SELECT CAST(unhex('000000008639E64709'), 'AggregateFunction(sumCount, Nullable(Float32))') AS st +); + +SELECT hex(sumCountState(x)) +FROM +( + SELECT CAST( + arrayJoin( + [3.4028235e38, -3.4028235e38, NULL, 2.3835032e38, -6.857267e37, -4.4835215e37, -2.6999879e38, 1.9934134e38, 2.362745e38, -5.4225925e37]), + 'LowCardinality(Nullable(Float32))') + AS x +); + +SELECT hex(sumCountState(x)) +FROM +( + SELECT CAST( + arrayJoin( + [3.4028235e38, -3.4028235e38, NULL, 2.3835032e38, -6.857267e37, -4.4835215e37, -2.6999879e38, 1.9934134e38, 2.362745e38, -5.4225925e37]), + 'Nullable(Float32)') + AS x +); diff --git a/tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.reference b/tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.reference new file mode 100644 index 000000000000..f5c3e2ab365a --- /dev/null +++ b/tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.reference @@ -0,0 +1,277 @@ +-- { echo } + +-- Compatibility coverage check for tuple-returning aggregate functions with Nullable arguments. +-- sumCount is covered separately in 03927_sumcount_compatibility. +-- Make sure serialized states of these functions can be deserialized. + +-- simpleLinearRegression non-empty state. +SELECT finalizeAggregation(CAST(unhex('01040000000000000000000000000028400000000000003C4000000000000047400000000000005A40'), 'AggregateFunction(simpleLinearRegression, Nullable(Float64), Nullable(Float64))')); +(2,1) +-- analysisOfVariance non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('0102000000000000224000000000000028400200000000008041400000000000005A400203000000000000000200000000000000'), 'AggregateFunction(analysisOfVariance, Nullable(Float64), Nullable(UInt8))')) AS res +); +(0.81,0.4345) +-- kolmogorovSmirnovTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010302000000000000F03F0000000000000840000000000000144000000000000000400000000000002440'), 'AggregateFunction(kolmogorovSmirnovTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')) AS res +); +(0.5,0.9) +-- mannWhitneyUTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010302000000000000F03F0000000000000840000000000000144000000000000000400000000000002440'), 'AggregateFunction(mannWhitneyUTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')) AS res +); +(2,0.7728) +-- studentTTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('01000000000000084000000000000000400000000000002240000000000000284000000000008041400000000000005A40'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))')) AS res +); +(-0.9,0.4345) +-- welchTTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('01000000000000084000000000000000400000000000002240000000000000284000000000008041400000000000005A40'), 'AggregateFunction(welchTTest, Nullable(Float64), Nullable(UInt8))')) AS res +); +(-0.7206,0.5877) +-- meanZTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4), roundBankers(res.3, 4), roundBankers(res.4, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010000000000000840000000000000004000000000000022400000000000002840'), 'AggregateFunction(meanZTest(1., 1., 0.95), Nullable(Float64), Nullable(UInt8))')) AS res +); +(-3.2863,0.001,-4.7892,-1.2108) +-- studentTTestOneSample non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010000000000000840000000000000224000000000008041400000000000000040'), 'AggregateFunction(studentTTestOneSample, Nullable(Float64), Nullable(Float64))')) AS res +); +(2.5981,0.1217) +-- argAndMin non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101030000000100000000'), 'AggregateFunction(argAndMin, Nullable(Int32), Nullable(Int32))')); +(3,0) +-- argAndMax non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101010000000102000000'), 'AggregateFunction(argAndMax, Nullable(Int32), Nullable(Int32))')); +(1,2) +-- argMin(tuple, val) non-empty state. +SELECT finalizeAggregation(CAST(unhex('01010100010000000100000000'), 'AggregateFunction(argMin, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); +(NULL,1) +-- argMax(tuple, val) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101000300000000040000000105000000'), 'AggregateFunction(argMax, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); +(3,4) +-- sumMap(nullable tuple) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMap, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); +([5],[7]) +-- sumMappedArrays(nullable tuple) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMappedArrays, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); +([5],[7]) +-- sumMapWithOverflow(nullable tuple) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMapWithOverflow, Nullable(Tuple(Array(UInt8), Array(UInt8))))')); +([5],[7]) +-- All null states are now different after we have introduced `Nullable(Tuple)`. However the following test will ensure that +-- we can still decode the old all-null states (which had a different layout) without problems, and that the new all-null states are decodable as well. + +-- Decode legacy all-null states for nullable signatures. + +-- simpleLinearRegression legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('0100000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(simpleLinearRegression, Nullable(Float64), Nullable(Float64))')); +(nan,nan) +-- analysisOfVariance legacy all-null state should keep historical BAD_ARGUMENTS behavior. +SELECT finalizeAggregation(CAST(unhex('01000000'), 'AggregateFunction(analysisOfVariance, Nullable(Float64), Nullable(UInt8))')); -- { serverError BAD_ARGUMENTS } +-- kolmogorovSmirnovTest legacy all-null state should keep historical BAD_ARGUMENTS behavior. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(kolmogorovSmirnovTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); -- { serverError BAD_ARGUMENTS } +-- mannWhitneyUTest legacy all-null state should keep historical BAD_ARGUMENTS behavior. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(mannWhitneyUTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); -- { serverError BAD_ARGUMENTS } +-- studentTTest legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))')); +(nan,nan) +-- welchTTest legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(welchTTest, Nullable(Float64), Nullable(UInt8))')); +(nan,nan) +-- meanZTest legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(meanZTest(1., 1., 0.95), Nullable(Float64), Nullable(UInt8))')); +(nan,0,nan,nan) +-- studentTTestOneSample legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTestOneSample, Nullable(Float64), Nullable(Float64))')); +(nan,nan) +-- argAndMin legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argAndMin, Nullable(Int32), Nullable(Int32))')); +(0,0) +-- argAndMax legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argAndMax, Nullable(Int32), Nullable(Int32))')); +(0,0) +-- argMin(tuple, val) legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argMin, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); +(NULL,NULL) +-- argMax(tuple, val) legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argMax, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); +(NULL,NULL) +-- sumMap/sumMappedArrays/sumMapWithOverflow nullable-tuple signatures have no legacy all-null states: +-- older versions did not support Nullable(Tuple(...)) arguments for these functions. + +-- Decode legacy tuple-signature states for sumMap/sumMappedArrays/sumMapWithOverflow. +-- These are non-nullable tuple signatures, so `00` below means empty state, not nullable all-null state. + +-- sumMap(old tuple signature) non-empty legacy state. +SELECT finalizeAggregation(CAST(unhex('01050700000000000000'), 'AggregateFunction(sumMap, Tuple(Array(UInt8), Array(UInt64)))')); +([5],[7]) +-- sumMappedArrays(old tuple signature) non-empty legacy state. +SELECT finalizeAggregation(CAST(unhex('01050700000000000000'), 'AggregateFunction(sumMappedArrays, Tuple(Array(UInt8), Array(UInt64)))')); +([5],[7]) +-- sumMapWithOverflow(old tuple signature) non-empty legacy state. +SELECT finalizeAggregation(CAST(unhex('01050700000000000000'), 'AggregateFunction(sumMapWithOverflow, Tuple(Array(UInt8), Array(UInt8)))')); +([5],[7]) +-- sumMap(old tuple signature) empty legacy state (non-nullable signature). +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMap, Tuple(Array(UInt8), Array(UInt64)))')); +([],[]) +-- sumMappedArrays(old tuple signature) empty legacy state (non-nullable signature). +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMappedArrays, Tuple(Array(UInt8), Array(UInt64)))')); +([],[]) +-- sumMapWithOverflow(old tuple signature) empty legacy state (non-nullable signature). +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMapWithOverflow, Tuple(Array(UInt8), Array(UInt8)))')); +([],[]) +-- Decode current all-null states. + +-- simpleLinearRegression current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(simpleLinearRegression, Nullable(Float64), Nullable(Float64))')); +\N +-- analysisOfVariance current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(analysisOfVariance, Nullable(Float64), Nullable(UInt8))')); +\N +-- kolmogorovSmirnovTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(kolmogorovSmirnovTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); +\N +-- mannWhitneyUTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(mannWhitneyUTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); +\N +-- studentTTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))')); +\N +-- welchTTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(welchTTest, Nullable(Float64), Nullable(UInt8))')); +\N +-- meanZTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(meanZTest(1., 1., 0.95), Nullable(Float64), Nullable(UInt8))')); +\N +-- studentTTestOneSample current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(studentTTestOneSample, Nullable(Float64), Nullable(Float64))')); +\N +-- argAndMin current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argAndMin, Nullable(Int32), Nullable(Int32))')); +\N +-- argAndMax current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argAndMax, Nullable(Int32), Nullable(Int32))')); +\N +-- argMin(tuple, val) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argMin, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); +\N +-- argMax(tuple, val) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argMax, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); +\N +-- sumMap(nullable tuple) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMap, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); +\N +-- sumMappedArrays(nullable tuple) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMappedArrays, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); +\N +-- sumMapWithOverflow(nullable tuple) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMapWithOverflow, Nullable(Tuple(Array(UInt8), Array(UInt8))))')); +\N +-- studentTTest merge behavior for legacy/current all-null states. + +-- Merge legacy all-null state with legacy all-null state. +SELECT studentTTestMerge(st) +FROM +( + SELECT CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st + UNION ALL + SELECT CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st +); +(nan,nan) +-- Merge current all-null state with current all-null state. +SELECT studentTTestMerge(st) +FROM +( + SELECT CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st + UNION ALL + SELECT CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st +); +\N +-- Merge legacy all-null state with current all-null state. +SELECT studentTTestMerge(st) +FROM +( + SELECT CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st + UNION ALL + SELECT CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st +); +(nan,nan) +-- Return type drift for Nullable arguments. +-- We return Nullable(Tuple) now for all these functions now which is semantically more correct + +-- simpleLinearRegression: legacy type was Tuple(k Float64, b Float64). +SELECT toTypeName(simpleLinearRegression(x, y)) +FROM values('x Nullable(Float64), y Nullable(Float64)', (1, 3), (2, 5), (NULL, 7), (4, 9), (5, 11)); +Nullable(Tuple(k Float64, b Float64)) +-- analysisOfVariance: legacy type was Tuple(f_statistic Float64, p_value Float64). +SELECT toTypeName(analysisOfVariance(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); +Nullable(Tuple(f_statistic Float64, p_value Float64)) +-- kolmogorovSmirnovTest: legacy type was Tuple(d_statistic Float64, p_value Float64). +SELECT toTypeName(kolmogorovSmirnovTest('two-sided')(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); +Nullable(Tuple(d_statistic Float64, p_value Float64)) +-- mannWhitneyUTest: legacy type was Tuple(u_statistic Float64, p_value Float64). +SELECT toTypeName(mannWhitneyUTest('two-sided')(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); +Nullable(Tuple(u_statistic Float64, p_value Float64)) +-- studentTTest: legacy type was Tuple(t_statistic Float64, p_value Float64). +SELECT toTypeName(studentTTest(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); +Nullable(Tuple(t_statistic Float64, p_value Float64)) +-- welchTTest: legacy type was Tuple(t_statistic Float64, p_value Float64). +SELECT toTypeName(welchTTest(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); +Nullable(Tuple(t_statistic Float64, p_value Float64)) +-- meanZTest: legacy type was Tuple(z_statistic Float64, p_value Float64, confidence_interval_low Float64, confidence_interval_high Float64). +SELECT toTypeName(meanZTest(1., 1., 0.95)(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); +Nullable(Tuple(z_statistic Float64, p_value Float64, confidence_interval_low Float64, confidence_interval_high Float64)) +-- studentTTestOneSample: legacy type was Tuple(t_statistic Float64, p_value Float64). +SELECT toTypeName(studentTTestOneSample(v, m)) +FROM values('v Nullable(Float64), m Nullable(Float64)', (1, 2), (3, 2), (NULL, 2), (5, 2)); +Nullable(Tuple(t_statistic Float64, p_value Float64)) +-- argAndMin: legacy type was Tuple(Int32, Int32). +SELECT toTypeName(argAndMin(a, b)) +FROM values('a Nullable(Int32), b Nullable(Int32)', (1, 2), (NULL, 1), (3, 0)); +Nullable(Tuple(Int32, Int32)) +-- argAndMax: legacy type was Tuple(Int32, Int32). +SELECT toTypeName(argAndMax(a, b)) +FROM values('a Nullable(Int32), b Nullable(Int32)', (1, 2), (NULL, 1), (3, 0)); +Nullable(Tuple(Int32, Int32)) +-- argMin(tuple, val): legacy type was Tuple(Nullable(Int32), Nullable(Int32)). +SELECT toTypeName(argMin(tuple(a, b), c)) +FROM values('a Nullable(Int32), b Nullable(Int32), c Nullable(Int32)', (1, 2, 3), (NULL, 1, 0), (3, 4, 5)); +Nullable(Tuple(Nullable(Int32), Nullable(Int32))) +-- argMax(tuple, val): legacy type was Tuple(Nullable(Int32), Nullable(Int32)). +SELECT toTypeName(argMax(tuple(a, b), c)) +FROM values('a Nullable(Int32), b Nullable(Int32), c Nullable(Int32)', (1, 2, 3), (NULL, 1, 0), (3, 4, 5)); +Nullable(Tuple(Nullable(Int32), Nullable(Int32))) +-- sumMap(nullable tuple): legacy type was Tuple(Array(UInt8), Array(UInt64)). +SELECT toTypeName(finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMap, Nullable(Tuple(Array(UInt8), Array(UInt64))))'))); +Nullable(Tuple(Array(UInt8), Array(UInt64))) +-- sumMappedArrays(nullable tuple): legacy type was Tuple(Array(UInt8), Array(UInt64)). +SELECT toTypeName(finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMappedArrays, Nullable(Tuple(Array(UInt8), Array(UInt64))))'))); +Nullable(Tuple(Array(UInt8), Array(UInt64))) +-- sumMapWithOverflow(nullable tuple): legacy type was Tuple(Array(UInt8), Array(UInt8)). +SELECT toTypeName(finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMapWithOverflow, Nullable(Tuple(Array(UInt8), Array(UInt8))))'))); +Nullable(Tuple(Array(UInt8), Array(UInt8))) diff --git a/tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.sql b/tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.sql new file mode 100644 index 000000000000..112b2fec9944 --- /dev/null +++ b/tests/queries/0_stateless/03928_aggregate_function_tuple_return_type_compatibility.sql @@ -0,0 +1,279 @@ +-- { echo } + +-- Compatibility coverage check for tuple-returning aggregate functions with Nullable arguments. +-- sumCount is covered separately in 03927_sumcount_compatibility. +-- Make sure serialized states of these functions can be deserialized. + +-- simpleLinearRegression non-empty state. +SELECT finalizeAggregation(CAST(unhex('01040000000000000000000000000028400000000000003C4000000000000047400000000000005A40'), 'AggregateFunction(simpleLinearRegression, Nullable(Float64), Nullable(Float64))')); + +-- analysisOfVariance non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('0102000000000000224000000000000028400200000000008041400000000000005A400203000000000000000200000000000000'), 'AggregateFunction(analysisOfVariance, Nullable(Float64), Nullable(UInt8))')) AS res +); + +-- kolmogorovSmirnovTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010302000000000000F03F0000000000000840000000000000144000000000000000400000000000002440'), 'AggregateFunction(kolmogorovSmirnovTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')) AS res +); + +-- mannWhitneyUTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010302000000000000F03F0000000000000840000000000000144000000000000000400000000000002440'), 'AggregateFunction(mannWhitneyUTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')) AS res +); + +-- studentTTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('01000000000000084000000000000000400000000000002240000000000000284000000000008041400000000000005A40'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))')) AS res +); + +-- welchTTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('01000000000000084000000000000000400000000000002240000000000000284000000000008041400000000000005A40'), 'AggregateFunction(welchTTest, Nullable(Float64), Nullable(UInt8))')) AS res +); + +-- meanZTest non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4), roundBankers(res.3, 4), roundBankers(res.4, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010000000000000840000000000000004000000000000022400000000000002840'), 'AggregateFunction(meanZTest(1., 1., 0.95), Nullable(Float64), Nullable(UInt8))')) AS res +); + +-- studentTTestOneSample non-empty state. +SELECT tuple(roundBankers(res.1, 4), roundBankers(res.2, 4)) +FROM +( + SELECT finalizeAggregation(CAST(unhex('010000000000000840000000000000224000000000008041400000000000000040'), 'AggregateFunction(studentTTestOneSample, Nullable(Float64), Nullable(Float64))')) AS res +); + +-- argAndMin non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101030000000100000000'), 'AggregateFunction(argAndMin, Nullable(Int32), Nullable(Int32))')); + +-- argAndMax non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101010000000102000000'), 'AggregateFunction(argAndMax, Nullable(Int32), Nullable(Int32))')); + +-- argMin(tuple, val) non-empty state. +SELECT finalizeAggregation(CAST(unhex('01010100010000000100000000'), 'AggregateFunction(argMin, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); + +-- argMax(tuple, val) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101000300000000040000000105000000'), 'AggregateFunction(argMax, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); + +-- sumMap(nullable tuple) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMap, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); + +-- sumMappedArrays(nullable tuple) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMappedArrays, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); + +-- sumMapWithOverflow(nullable tuple) non-empty state. +SELECT finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMapWithOverflow, Nullable(Tuple(Array(UInt8), Array(UInt8))))')); + +-- All null states are now different after we have introduced `Nullable(Tuple)`. However the following test will ensure that +-- we can still decode the old all-null states (which had a different layout) without problems, and that the new all-null states are decodable as well. + +-- Decode legacy all-null states for nullable signatures. + +-- simpleLinearRegression legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('0100000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(simpleLinearRegression, Nullable(Float64), Nullable(Float64))')); + +-- analysisOfVariance legacy all-null state should keep historical BAD_ARGUMENTS behavior. +SELECT finalizeAggregation(CAST(unhex('01000000'), 'AggregateFunction(analysisOfVariance, Nullable(Float64), Nullable(UInt8))')); -- { serverError BAD_ARGUMENTS } + +-- kolmogorovSmirnovTest legacy all-null state should keep historical BAD_ARGUMENTS behavior. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(kolmogorovSmirnovTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); -- { serverError BAD_ARGUMENTS } + +-- mannWhitneyUTest legacy all-null state should keep historical BAD_ARGUMENTS behavior. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(mannWhitneyUTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); -- { serverError BAD_ARGUMENTS } + +-- studentTTest legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))')); + +-- welchTTest legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(welchTTest, Nullable(Float64), Nullable(UInt8))')); + +-- meanZTest legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(meanZTest(1., 1., 0.95), Nullable(Float64), Nullable(UInt8))')); + +-- studentTTestOneSample legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTestOneSample, Nullable(Float64), Nullable(Float64))')); + +-- argAndMin legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argAndMin, Nullable(Int32), Nullable(Int32))')); + +-- argAndMax legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argAndMax, Nullable(Int32), Nullable(Int32))')); + +-- argMin(tuple, val) legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argMin, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); + +-- argMax(tuple, val) legacy all-null state. +SELECT finalizeAggregation(CAST(unhex('010000'), 'AggregateFunction(argMax, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); + +-- sumMap/sumMappedArrays/sumMapWithOverflow nullable-tuple signatures have no legacy all-null states: +-- older versions did not support Nullable(Tuple(...)) arguments for these functions. + +-- Decode legacy tuple-signature states for sumMap/sumMappedArrays/sumMapWithOverflow. +-- These are non-nullable tuple signatures, so `00` below means empty state, not nullable all-null state. + +-- sumMap(old tuple signature) non-empty legacy state. +SELECT finalizeAggregation(CAST(unhex('01050700000000000000'), 'AggregateFunction(sumMap, Tuple(Array(UInt8), Array(UInt64)))')); + +-- sumMappedArrays(old tuple signature) non-empty legacy state. +SELECT finalizeAggregation(CAST(unhex('01050700000000000000'), 'AggregateFunction(sumMappedArrays, Tuple(Array(UInt8), Array(UInt64)))')); + +-- sumMapWithOverflow(old tuple signature) non-empty legacy state. +SELECT finalizeAggregation(CAST(unhex('01050700000000000000'), 'AggregateFunction(sumMapWithOverflow, Tuple(Array(UInt8), Array(UInt8)))')); + +-- sumMap(old tuple signature) empty legacy state (non-nullable signature). +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMap, Tuple(Array(UInt8), Array(UInt64)))')); + +-- sumMappedArrays(old tuple signature) empty legacy state (non-nullable signature). +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMappedArrays, Tuple(Array(UInt8), Array(UInt64)))')); + +-- sumMapWithOverflow(old tuple signature) empty legacy state (non-nullable signature). +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMapWithOverflow, Tuple(Array(UInt8), Array(UInt8)))')); + +-- Decode current all-null states. + +-- simpleLinearRegression current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(simpleLinearRegression, Nullable(Float64), Nullable(Float64))')); + +-- analysisOfVariance current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(analysisOfVariance, Nullable(Float64), Nullable(UInt8))')); + +-- kolmogorovSmirnovTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(kolmogorovSmirnovTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); + +-- mannWhitneyUTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(mannWhitneyUTest(''two-sided''), Nullable(Float64), Nullable(UInt8))')); + +-- studentTTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))')); + +-- welchTTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(welchTTest, Nullable(Float64), Nullable(UInt8))')); + +-- meanZTest current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(meanZTest(1., 1., 0.95), Nullable(Float64), Nullable(UInt8))')); + +-- studentTTestOneSample current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(studentTTestOneSample, Nullable(Float64), Nullable(Float64))')); + +-- argAndMin current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argAndMin, Nullable(Int32), Nullable(Int32))')); + +-- argAndMax current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argAndMax, Nullable(Int32), Nullable(Int32))')); + +-- argMin(tuple, val) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argMin, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); + +-- argMax(tuple, val) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(argMax, Tuple(Nullable(Int32), Nullable(Int32)), Nullable(Int32))')); + +-- sumMap(nullable tuple) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMap, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); + +-- sumMappedArrays(nullable tuple) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMappedArrays, Nullable(Tuple(Array(UInt8), Array(UInt64))))')); + +-- sumMapWithOverflow(nullable tuple) current all-null state. +SELECT finalizeAggregation(CAST(unhex('00'), 'AggregateFunction(sumMapWithOverflow, Nullable(Tuple(Array(UInt8), Array(UInt8))))')); + +-- studentTTest merge behavior for legacy/current all-null states. + +-- Merge legacy all-null state with legacy all-null state. +SELECT studentTTestMerge(st) +FROM +( + SELECT CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st + UNION ALL + SELECT CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st +); + +-- Merge current all-null state with current all-null state. +SELECT studentTTestMerge(st) +FROM +( + SELECT CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st + UNION ALL + SELECT CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st +); + +-- Merge legacy all-null state with current all-null state. +SELECT studentTTestMerge(st) +FROM +( + SELECT CAST(unhex('01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st + UNION ALL + SELECT CAST(unhex('00'), 'AggregateFunction(studentTTest, Nullable(Float64), Nullable(UInt8))') AS st +); + +-- Return type drift for Nullable arguments. +-- We return Nullable(Tuple) now for all these functions now which is semantically more correct + +-- simpleLinearRegression: legacy type was Tuple(k Float64, b Float64). +SELECT toTypeName(simpleLinearRegression(x, y)) +FROM values('x Nullable(Float64), y Nullable(Float64)', (1, 3), (2, 5), (NULL, 7), (4, 9), (5, 11)); + +-- analysisOfVariance: legacy type was Tuple(f_statistic Float64, p_value Float64). +SELECT toTypeName(analysisOfVariance(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); + +-- kolmogorovSmirnovTest: legacy type was Tuple(d_statistic Float64, p_value Float64). +SELECT toTypeName(kolmogorovSmirnovTest('two-sided')(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); + +-- mannWhitneyUTest: legacy type was Tuple(u_statistic Float64, p_value Float64). +SELECT toTypeName(mannWhitneyUTest('two-sided')(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); + +-- studentTTest: legacy type was Tuple(t_statistic Float64, p_value Float64). +SELECT toTypeName(studentTTest(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); + +-- welchTTest: legacy type was Tuple(t_statistic Float64, p_value Float64). +SELECT toTypeName(welchTTest(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); + +-- meanZTest: legacy type was Tuple(z_statistic Float64, p_value Float64, confidence_interval_low Float64, confidence_interval_high Float64). +SELECT toTypeName(meanZTest(1., 1., 0.95)(v, g)) +FROM values('v Nullable(Float64), g Nullable(UInt8)', (1, 0), (3, 0), (5, 0), (2, 1), (10, 1), (NULL, 1)); + +-- studentTTestOneSample: legacy type was Tuple(t_statistic Float64, p_value Float64). +SELECT toTypeName(studentTTestOneSample(v, m)) +FROM values('v Nullable(Float64), m Nullable(Float64)', (1, 2), (3, 2), (NULL, 2), (5, 2)); + +-- argAndMin: legacy type was Tuple(Int32, Int32). +SELECT toTypeName(argAndMin(a, b)) +FROM values('a Nullable(Int32), b Nullable(Int32)', (1, 2), (NULL, 1), (3, 0)); + +-- argAndMax: legacy type was Tuple(Int32, Int32). +SELECT toTypeName(argAndMax(a, b)) +FROM values('a Nullable(Int32), b Nullable(Int32)', (1, 2), (NULL, 1), (3, 0)); + +-- argMin(tuple, val): legacy type was Tuple(Nullable(Int32), Nullable(Int32)). +SELECT toTypeName(argMin(tuple(a, b), c)) +FROM values('a Nullable(Int32), b Nullable(Int32), c Nullable(Int32)', (1, 2, 3), (NULL, 1, 0), (3, 4, 5)); + +-- argMax(tuple, val): legacy type was Tuple(Nullable(Int32), Nullable(Int32)). +SELECT toTypeName(argMax(tuple(a, b), c)) +FROM values('a Nullable(Int32), b Nullable(Int32), c Nullable(Int32)', (1, 2, 3), (NULL, 1, 0), (3, 4, 5)); + +-- sumMap(nullable tuple): legacy type was Tuple(Array(UInt8), Array(UInt64)). +SELECT toTypeName(finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMap, Nullable(Tuple(Array(UInt8), Array(UInt64))))'))); + +-- sumMappedArrays(nullable tuple): legacy type was Tuple(Array(UInt8), Array(UInt64)). +SELECT toTypeName(finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMappedArrays, Nullable(Tuple(Array(UInt8), Array(UInt64))))'))); + +-- sumMapWithOverflow(nullable tuple): legacy type was Tuple(Array(UInt8), Array(UInt8)). +SELECT toTypeName(finalizeAggregation(CAST(unhex('0101050700000000000000'), 'AggregateFunction(sumMapWithOverflow, Nullable(Tuple(Array(UInt8), Array(UInt8))))'))); From 811578137a349ebb913e3874847c56da3fb0a1d4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 5 Mar 2026 12:26:05 +0000 Subject: [PATCH 21/53] Backport #98776 to 26.1: Fix detecting set skip index usefulness with OR with false (i.e. or(x, 0)) predicate --- src/Storages/MergeTree/MergeTreeIndexSet.cpp | 10 ++++- ..._set_index_or_non_indexed_column.reference | 12 ++++++ .../04027_set_index_or_non_indexed_column.sql | 38 +++++++++++++++++++ 3 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/04027_set_index_or_non_indexed_column.reference create mode 100644 tests/queries/0_stateless/04027_set_index_or_non_indexed_column.sql diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index ab1bf72f4b20..eec57657d606 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -730,6 +730,14 @@ bool MergeTreeIndexConditionSet::checkDAGUseless(const ActionsDAG::Node & node, bool all_useless = true; for (const auto & arg : arguments) { + /// For OR, skip constant false children — they are identity elements + /// of OR and don't affect filtering. Without this, the constant + /// check above returns false (not useless) for `getBool(0) == 0`, + /// which would incorrectly make the entire OR appear non-useless + /// even when no indexed columns are referenced. + if (function_name == "or" && arg->column && isColumnConst(*arg->column) && !arg->column->getBool(0)) + continue; + bool u = checkDAGUseless(*arg, context, sets_to_prepare, atomic); all_useless = all_useless && u; } @@ -740,7 +748,7 @@ bool MergeTreeIndexConditionSet::checkDAGUseless(const ActionsDAG::Node & node, return std::any_of( arguments.begin(), arguments.end(), - [&](const auto & arg) { return checkDAGUseless(*arg, context, sets_to_prepare, true /*atomic*/); }); + [&](const auto & arg) { return checkDAGUseless(*arg, context, sets_to_prepare, /*atomic=*/ true); }); } auto column_name = tree_node.getColumnName(); diff --git a/tests/queries/0_stateless/04027_set_index_or_non_indexed_column.reference b/tests/queries/0_stateless/04027_set_index_or_non_indexed_column.reference new file mode 100644 index 000000000000..eb7086d27f4f --- /dev/null +++ b/tests/queries/0_stateless/04027_set_index_or_non_indexed_column.reference @@ -0,0 +1,12 @@ +-- { echoOn } + +-- Predicate with or(non_indexed, 0): set index should NOT be used. +SELECT count() FROM t_set_index_or WHERE or(a = 5, 0); +1 +SELECT count() FROM t_set_index_or WHERE or(a = 5, 0) SETTINGS force_data_skipping_indices='idx_b'; -- { serverError INDEX_NOT_USED } +-- Predicate on indexed column: set index should be used. +SELECT count() FROM t_set_index_or WHERE b = 5 SETTINGS force_data_skipping_indices='idx_b'; +100 +-- Indexed column inside or(..., 0): set index should still be used. +SELECT count() FROM t_set_index_or WHERE or(b = 5, 0) SETTINGS force_data_skipping_indices='idx_b'; +100 diff --git a/tests/queries/0_stateless/04027_set_index_or_non_indexed_column.sql b/tests/queries/0_stateless/04027_set_index_or_non_indexed_column.sql new file mode 100644 index 000000000000..60e9ccd1ade5 --- /dev/null +++ b/tests/queries/0_stateless/04027_set_index_or_non_indexed_column.sql @@ -0,0 +1,38 @@ +-- Tags: no-random-merge-tree-settings + +-- Test that set index is not incorrectly used when the predicate doesn't +-- reference any indexed columns but contains `or(..., 0)`. +-- The analyzer may produce such expressions when rewriting OR of equalities +-- on LowCardinality columns: `lc = 'a' OR lc = 'b'` → `or(in(lc, Set), 0)`. +-- The constant `0` (identity element of OR) was incorrectly making +-- `checkDAGUseless` return false. + +DROP TABLE IF EXISTS t_set_index_or; + +CREATE TABLE t_set_index_or +( + a UInt64, + b UInt64, + INDEX idx_b (b) TYPE set(100) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY a +SETTINGS index_granularity = 100, index_granularity_bytes = '10Mi', add_minmax_index_for_numeric_columns = 0; + +INSERT INTO t_set_index_or SELECT number, number / 100 FROM numbers(10000); + +-- { echoOn } + +-- Predicate with or(non_indexed, 0): set index should NOT be used. +SELECT count() FROM t_set_index_or WHERE or(a = 5, 0); +SELECT count() FROM t_set_index_or WHERE or(a = 5, 0) SETTINGS force_data_skipping_indices='idx_b'; -- { serverError INDEX_NOT_USED } + +-- Predicate on indexed column: set index should be used. +SELECT count() FROM t_set_index_or WHERE b = 5 SETTINGS force_data_skipping_indices='idx_b'; + +-- Indexed column inside or(..., 0): set index should still be used. +SELECT count() FROM t_set_index_or WHERE or(b = 5, 0) SETTINGS force_data_skipping_indices='idx_b'; + +-- { echoOff } + +DROP TABLE t_set_index_or; From 84bbaba3420fe27fb8d56c3eb4649c5d264ce073 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 5 Mar 2026 14:29:02 +0000 Subject: [PATCH 22/53] Backport #98797 to 26.1: Fix `SYSTEM START REPLICATED VIEW` not waking up the refresh task --- src/Storages/MaterializedView/RefreshTask.cpp | 1 + ...027_system_start_replicated_view.reference | 4 ++ .../04027_system_start_replicated_view.sh | 70 +++++++++++++++++++ 3 files changed, 75 insertions(+) create mode 100644 tests/queries/0_stateless/04027_system_start_replicated_view.reference create mode 100755 tests/queries/0_stateless/04027_system_start_replicated_view.sh diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index ee960f6b4eb3..41ec719e4060 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -163,6 +163,7 @@ OwnedRefreshTask RefreshTask::create( task->refresh_task_watch_callback = std::make_shared([w = task->coordination.watches, task_waker = task->refresh_task->getWatchCallback()](const Coordination::WatchResponse & response) { w->root_watch_active.store(false); + w->children_watch_active.store(false); w->should_reread_znodes.store(true); (*task_waker)(response); }); diff --git a/tests/queries/0_stateless/04027_system_start_replicated_view.reference b/tests/queries/0_stateless/04027_system_start_replicated_view.reference new file mode 100644 index 000000000000..ad2345f00608 --- /dev/null +++ b/tests/queries/0_stateless/04027_system_start_replicated_view.reference @@ -0,0 +1,4 @@ +<1: running> 1 +<2: stopped> Disabled +<3: restarted> 1 +<4: new rows> 1 diff --git a/tests/queries/0_stateless/04027_system_start_replicated_view.sh b/tests/queries/0_stateless/04027_system_start_replicated_view.sh new file mode 100755 index 000000000000..41401cf96bff --- /dev/null +++ b/tests/queries/0_stateless/04027_system_start_replicated_view.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Tags: zookeeper + +# Regression test: SYSTEM START REPLICATED VIEW did not wake the refresh task +# because the ZooKeeper children watch was not re-registered after the first fire. + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +CLICKHOUSE_CLIENT=$(echo "$CLICKHOUSE_CLIENT" | sed 's/--session_timezone[= ][^ ]*//g') +CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --session_timezone Etc/UTC" + +db="rdb_$CLICKHOUSE_DATABASE" + +function cleanup() +{ + $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none -q "drop database if exists $db" 2>/dev/null +} +trap cleanup EXIT + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none -nq " + create database $db engine=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', 's1', 'r1'); + create view ${db}.refreshes as + select * from system.view_refreshes where database = '$db' order by view; + create materialized view ${db}.rmv + refresh after 1 second append + (x Int64) engine MergeTree order by x + empty + as select 1 as x; +" + +# Wait for the first refresh to succeed. +for _ in $(seq 1 60); do + if [ "$($CLICKHOUSE_CLIENT -q "select last_success_time is null from ${db}.refreshes -- $LINENO" | xargs)" = '0' ]; then + break + fi + sleep 0.5 +done +$CLICKHOUSE_CLIENT -q "select '<1: running>', status != 'Disabled' from ${db}.refreshes" + +# Stop the view globally via Keeper. +$CLICKHOUSE_CLIENT -q "system stop replicated view ${db}.rmv" +for _ in $(seq 1 60); do + if [ "$($CLICKHOUSE_CLIENT -q "select status from ${db}.refreshes -- $LINENO" | xargs)" = 'Disabled' ]; then + break + fi + sleep 0.5 +done +$CLICKHOUSE_CLIENT -q "select '<2: stopped>', status from ${db}.refreshes" + +# Remember the current row count. +cnt_before=$($CLICKHOUSE_CLIENT -q "select count() from ${db}.rmv") + +# Start the view back up. +$CLICKHOUSE_CLIENT -q "system start replicated view ${db}.rmv" + +# The bug: the view stayed Disabled forever here because the children watch +# was not re-registered after the stop event consumed it. +# Wait for the view to leave Disabled state and complete at least one more refresh. +for _ in $(seq 1 30); do + cnt_after=$($CLICKHOUSE_CLIENT -q "select count() from ${db}.rmv") + if [ "$cnt_after" -gt "$cnt_before" ]; then + break + fi + sleep 0.5 +done + +$CLICKHOUSE_CLIENT -q "select '<3: restarted>', status != 'Disabled' from ${db}.refreshes" +$CLICKHOUSE_CLIENT -q "select '<4: new rows>', count() > $cnt_before from ${db}.rmv" From 48d74f9fee0b90e3946655ef9018bcdc579a44cd Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 5 Mar 2026 18:49:10 +0000 Subject: [PATCH 23/53] Backport #98829 to 26.1: Fix adjusting RLIMIT_SIGPENDING (via pending_signals) --- src/Daemon/BaseDaemon.cpp | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 689c55d2eec9..447d131db219 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -290,10 +290,23 @@ void BaseDaemon::initialize(Application & self) struct rlimit rlim; if (getrlimit(RLIMIT_SIGPENDING, &rlim)) throw Poco::Exception("Cannot getrlimit"); - rlim.rlim_cur = pending_signals; - if (setrlimit(RLIMIT_SIGPENDING, &rlim)) + + /// Only adjust if the current soft limit is below the requested value. + if (rlim.rlim_cur < pending_signals) { - std::cerr << "Cannot set pending signals to " + std::to_string(rlim.rlim_cur) << std::endl; + rlim_t old_cur = rlim.rlim_cur; + rlim_t old_max = rlim.rlim_max; + + /// Raise hard limit only if needed (requires CAP_SYS_RESOURCE). + /// (Note it is "unlimited" compatible, since it is rlim_t(-1)) + rlim.rlim_max = std::max(rlim.rlim_max, pending_signals); + + rlim.rlim_cur = pending_signals; + + if (setrlimit(RLIMIT_SIGPENDING, &rlim)) + std::cerr << "Cannot set RLIMIT_SIGPENDING (soft=" << old_cur << ", hard=" << old_max << ") to " << pending_signals << std::endl; + else + std::cerr << "Set RLIMIT_SIGPENDING from (soft=" << old_cur << ", hard=" << old_max << ") to " << pending_signals << std::endl; } } #endif From b47aad2a1150e0c741adb0e225d34897ff7b5ca7 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 5 Mar 2026 19:39:35 +0000 Subject: [PATCH 24/53] Backport #97299 to 26.1: Add restart-only `allow_nullable_tuple_in_extracted_subcolumns` setting for `Tuple/Variant/Dynamic/JSON` tuple subcolumns compatibility --- .../experimental_settings_ignore.txt | 1 + src/Core/Settings.cpp | 16 + src/Core/SettingsChangesHistory.cpp | 1 + src/DataTypes/DataTypeDynamic.cpp | 17 +- src/DataTypes/NullableUtils.cpp | 49 +- src/DataTypes/NullableUtils.h | 23 + .../SerializationDynamicElement.cpp | 3 +- .../Serializations/SerializationVariant.cpp | 6 +- src/DataTypes/transformTypesRecursively.cpp | 21 +- src/DataTypes/transformTypesRecursively.h | 8 +- src/Formats/SchemaInferenceUtils.cpp | 19 +- src/Formats/SchemaInferenceUtils.h | 6 + src/Functions/dynamicElement.cpp | 3 +- src/Functions/tupleElement.cpp | 5 +- src/Functions/variantElement.cpp | 3 +- .../__init__.py | 0 .../allow_nullable_tuple_subcolumns_off.xml | 7 + .../allow_nullable_tuple_subcolumns_on.xml | 7 + ...analyzer_join_resolve_nested.off.reference | 24 + ..._analyzer_join_resolve_nested.on.reference | 24 + ...variant_text_deserialization.off.reference | 9 + ..._variant_text_deserialization.on.reference | 9 + .../02941_variant_type_1.off.reference | 165 ++ .../02941_variant_type_1.on.reference | 165 ++ ...read_shared_subcolumns_small.off.reference | 606 +++++++ ..._read_shared_subcolumns_small.on.reference | 606 +++++++ ...ynamic_read_subcolumns_small.off.reference | 546 +++++++ ...dynamic_read_subcolumns_small.on.reference | 546 +++++++ ..._alters_1_compact_merge_tree.off.reference | 152 ++ ...e_alters_1_compact_merge_tree.on.reference | 152 ++ ...dynamic_type_alters_1_memory.off.reference | 152 ++ ..._dynamic_type_alters_1_memory.on.reference | 152 ++ ...ype_alters_1_wide_merge_tree.off.reference | 152 ++ ...type_alters_1_wide_merge_tree.on.reference | 152 ++ ..._alters_2_compact_merge_tree.off.reference | 75 + ...e_alters_2_compact_merge_tree.on.reference | 75 + ...ype_alters_2_wide_merge_tree.off.reference | 75 + ...type_alters_2_wide_merge_tree.on.reference | 75 + ...041_dynamic_type_check_table.off.reference | 56 + ...3041_dynamic_type_check_table.on.reference | 56 + .../03162_dynamic_type_nested.off.reference | 4 + .../03162_dynamic_type_nested.on.reference | 4 + .../03290_nullable_json.off.reference | 39 + .../expected/03290_nullable_json.on.reference | 39 + ...t_escape_filename_merge_tree.off.reference | 2 + ...nt_escape_filename_merge_tree.on.reference | 2 + ...e_inside_nullable_subcolumns.off.reference | 28 + ...le_inside_nullable_subcolumns.on.reference | 28 + ...able_variant_dynamic_element.off.reference | 8 + ...lable_variant_dynamic_element.on.reference | 8 + ...ide_nullable_json_subcolumns.off.reference | 8 + ...side_nullable_json_subcolumns.on.reference | 8 + ...de_nullable_tuple_subcolumns.off.reference | 4 + ...ide_nullable_tuple_subcolumns.on.reference | 4 + ...nullable_subcolumns_off_only.off.reference | 0 ...e_nullable_subcolumns_on_only.on.reference | 4 + .../02731_analyzer_join_resolve_nested.sql | 44 + .../02940_variant_text_deserialization.sql | 9 + .../queries/02941_variant_type_1.sql | 59 + ...6_dynamic_read_shared_subcolumns_small.sql | 78 + .../03036_dynamic_read_subcolumns_small.sql | 75 + ...namic_type_alters_1_compact_merge_tree.sql | 55 + .../03040_dynamic_type_alters_1_memory.sql | 53 + ..._dynamic_type_alters_1_wide_merge_tree.sql | 55 + ...namic_type_alters_2_compact_merge_tree.sql | 43 + ..._dynamic_type_alters_2_wide_merge_tree.sql | 45 + .../03041_dynamic_type_check_table.sql | 69 + .../queries/03162_dynamic_type_nested.sql | 25 + .../queries/03290_nullable_json.sql | 29 + ...369_variant_escape_filename_merge_tree.sql | 34 + ...03913_tuple_inside_nullable_subcolumns.sql | 53 + ...nside_nullable_variant_dynamic_element.sql | 23 + ..._tuple_inside_nullable_json_subcolumns.sql | 43 + ...tuple_inside_nullable_tuple_subcolumns.sql | 26 + ...le_inside_nullable_subcolumns_off_only.sql | 12 + ...ple_inside_nullable_subcolumns_on_only.sql | 12 + .../test_nullable_tuple_subcolumns/test.py | 164 ++ ...731_analyzer_join_resolve_nested.reference | 54 +- ...940_variant_text_deserialization.reference | 10 +- .../02941_variant_type_1.reference | 1410 ++++++++--------- ..._read_shared_subcolumns_small.reference.j2 | 600 +++---- ...dynamic_read_subcolumns_small.reference.j2 | 540 +++---- ...type_alters_1_compact_merge_tree.reference | 286 ++-- ...040_dynamic_type_alters_1_memory.reference | 286 ++-- ...ic_type_alters_1_wide_merge_tree.reference | 286 ++-- ...type_alters_2_compact_merge_tree.reference | 138 +- ...ic_type_alters_2_wide_merge_tree.reference | 138 +- .../03041_dynamic_type_check_table.reference | 72 +- .../03162_dynamic_type_nested.reference | 2 +- ...9_json_null_as_default_for_tuple.reference | 9 + .../03229_json_null_as_default_for_tuple.sql | 8 + .../0_stateless/03290_nullable_json.reference | 24 +- ...riant_escape_filename_merge_tree.reference | 4 +- ..._nullable_function_tuple_element.reference | 6 +- ...tuple_inside_nullable_subcolumns.reference | 76 + ...03913_tuple_inside_nullable_subcolumns.sql | 60 + .../03914_dynamic_illegal_subcolumn.reference | 0 .../03914_dynamic_illegal_subcolumn.sql | 14 + ...nullable_variant_dynamic_element.reference | 31 + ...nside_nullable_variant_dynamic_element.sql | 26 + ..._inside_nullable_json_subcolumns.reference | 40 + ..._tuple_inside_nullable_json_subcolumns.sql | 42 + ...inside_nullable_tuple_subcolumns.reference | 4 + ...tuple_inside_nullable_tuple_subcolumns.sql | 24 + ...racted_subcolumns_not_changeable.reference | 2 + ...in_extracted_subcolumns_not_changeable.sql | 22 + 106 files changed, 7677 insertions(+), 1952 deletions(-) create mode 100644 tests/integration/test_nullable_tuple_subcolumns/__init__.py create mode 100644 tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_off.xml create mode 100644 tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_on.xml create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_off_only.off.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_on_only.on.reference create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/02731_analyzer_join_resolve_nested.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/02940_variant_text_deserialization.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/02941_variant_type_1.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_shared_subcolumns_small.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_subcolumns_small.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_compact_merge_tree.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_memory.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_wide_merge_tree.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_compact_merge_tree.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_wide_merge_tree.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03041_dynamic_type_check_table.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03162_dynamic_type_nested.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03290_nullable_json.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03369_variant_escape_filename_merge_tree.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03913_tuple_inside_nullable_subcolumns.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03915_tuple_inside_nullable_variant_dynamic_element.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03916_tuple_inside_nullable_json_subcolumns.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/03917_tuple_inside_nullable_tuple_subcolumns.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_off_only.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_on_only.sql create mode 100644 tests/integration/test_nullable_tuple_subcolumns/test.py create mode 100644 tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.reference create mode 100644 tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.sql create mode 100644 tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.reference create mode 100644 tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.sql create mode 100644 tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.reference create mode 100644 tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.sql create mode 100644 tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.reference create mode 100644 tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.sql create mode 100644 tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.reference create mode 100644 tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.sql create mode 100644 tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.reference create mode 100644 tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.sql diff --git a/ci/jobs/scripts/check_style/experimental_settings_ignore.txt b/ci/jobs/scripts/check_style/experimental_settings_ignore.txt index 696f970b2d12..61053a2045ba 100644 --- a/ci/jobs/scripts/check_style/experimental_settings_ignore.txt +++ b/ci/jobs/scripts/check_style/experimental_settings_ignore.txt @@ -62,6 +62,7 @@ allow_experimental_dynamic_type allow_experimental_json_type allow_experimental_vector_similarity_index enable_vector_similarity_index +allow_nullable_tuple_in_extracted_subcolumns allow_experimental_live_view allow_special_serialization_kinds_in_output_formats allow_not_comparable_types_in_comparison_functions diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index e72653747d54..757514fbc26d 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -7424,7 +7424,23 @@ Allow to create database with Engine=MaterializedPostgreSQL(...). \ DECLARE(Bool, allow_experimental_nullable_tuple_type, false, R"( Allows creation of [Nullable](../../sql-reference/data-types/nullable) [Tuple](../../sql-reference/data-types/tuple.md) columns in tables. + +This setting does not control whether extracted tuple subcolumns can be `Nullable` (for example, from Dynamic, Variant, JSON, or Tuple columns). +Use `allow_nullable_tuple_in_extracted_subcolumns` to control whether extracted tuple subcolumns can be `Nullable`. )", EXPERIMENTAL) \ + DECLARE(Bool, allow_nullable_tuple_in_extracted_subcolumns, false, R"( +Controls whether extracted subcolumns of type `Tuple(...)` can be typed as `Nullable(Tuple(...))`. + +- `false`: Return `Tuple(...)` and use default tuple values for rows where the subcolumn is missing. +- `true`: Return `Nullable(Tuple(...))` and use `NULL` for rows where the subcolumn is missing. + +This setting controls extracted subcolumn behavior only. +It does not control whether `Nullable(Tuple(...))` columns can be created in tables; that is controlled by `allow_experimental_nullable_tuple_type`. + +ClickHouse uses the value for this setting loaded at server startup. +Changes made with `SET` or query-level `SETTINGS` do not change extracted subcolumn behavior. +To change extracted subcolumn behavior, update `allow_nullable_tuple_in_extracted_subcolumns` in startup profile configuration (for example, users.xml) and restart the server. +)", 0) \ \ /** Experimental feature for moving data between shards. */ \ DECLARE(Bool, allow_experimental_query_deduplication, false, R"( diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index e8686e2a43ea..5c8e0d8095b4 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -63,6 +63,7 @@ const VersionToSettingsChangesMap & getSettingsChangesHistory() {"join_runtime_bloom_filter_max_ratio_of_set_bits", 0.7, 0.7, "New setting"}, {"check_conversion_from_numbers_to_enum", false, true, "New setting"}, {"allow_experimental_nullable_tuple_type", false, false, "New experimental setting"}, + {"allow_nullable_tuple_in_extracted_subcolumns", false, false, "New setting controlling whether extracted Tuple subcolumns can be nullable."}, {"use_skip_indexes_on_data_read", false, true, "Default enable"}, {"check_conversion_from_numbers_to_enum", false, false, "New setting"}, {"archive_adaptive_buffer_max_size_bytes", 8 * 1024 * 1024, 8 * 1024 * 1024, "New setting"}, diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index d1ae4682e6ef..29fe4588a77f 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_COLUMN; + extern const int LOGICAL_ERROR; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int UNEXPECTED_AST_STRUCTURE; } @@ -215,20 +217,31 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa bool is_null_map_subcolumn = subcolumn_nested_name == "null"; if (is_null_map_subcolumn) { - if (!subcolumn_type->canBeInsideNullable()) + if (!canExtractedSubcolumnsBeInsideNullable(subcolumn_type)) + { + if (throw_if_null) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", subcolumn_name); return nullptr; + } res->type = std::make_shared(); } else if (!subcolumn_nested_name.empty()) { res = getSubcolumnData(subcolumn_nested_name, *res, throw_if_null); if (!res) + { + if (throw_if_null) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Expected getSubcolumnData() to throw for subcolumn '{}' in throw_if_null mode", + subcolumn_name); return nullptr; + } } res->serialization = std::make_shared(res->serialization, subcolumn_type->getName(), String(subcolumn_nested_name), is_null_map_subcolumn); /// Make resulting subcolumn Nullable only if type subcolumn can be inside Nullable or can be LowCardinality(Nullable()). - bool make_subcolumn_nullable = subcolumn_type->canBeInsideNullable() || subcolumn_type->lowCardinality(); + bool make_subcolumn_nullable = canExtractedSubcolumnsBeInsideNullableOrLowCardinalityNullable(subcolumn_type); if (!is_null_map_subcolumn && make_subcolumn_nullable) res->type = makeNullableOrLowCardinalityNullableSafe(res->type); diff --git a/src/DataTypes/NullableUtils.cpp b/src/DataTypes/NullableUtils.cpp index d0b0a4f9be62..9fd10c68c9aa 100644 --- a/src/DataTypes/NullableUtils.cpp +++ b/src/DataTypes/NullableUtils.cpp @@ -1,14 +1,57 @@ #include #include +#include #include #include #include +#include +#include #include namespace DB { +namespace Setting +{ +extern const SettingsBool allow_nullable_tuple_in_extracted_subcolumns; +} + +static bool isNullableTupleInExtractedSubcolumnsEnabledByGlobalSetting() +{ + auto context = Context::getGlobalContextInstance(); + return context && context->getSettingsRef()[Setting::allow_nullable_tuple_in_extracted_subcolumns]; +} + +static bool canExtractedSubcolumnsBeInsideNullable(const ColumnPtr & column) +{ + if (checkAndGetColumn(column.get())) + return isNullableTupleInExtractedSubcolumnsEnabledByGlobalSetting(); + + return column->canBeInsideNullable(); +} + +bool canExtractedSubcolumnsBeInsideNullable(const DataTypePtr & type) +{ + if (isTuple(type)) + return isNullableTupleInExtractedSubcolumnsEnabledByGlobalSetting(); + + return type->canBeInsideNullable(); +} + +bool canExtractedSubcolumnsBeInsideNullableOrLowCardinalityNullable(const DataTypePtr & type) +{ + return canExtractedSubcolumnsBeInsideNullable(removeLowCardinality(type)); +} + +DataTypePtr makeExtractedSubcolumnsNullableOrLowCardinalityNullableSafe(const DataTypePtr & type) +{ + if (!canExtractedSubcolumnsBeInsideNullableOrLowCardinalityNullable(type)) + return type; + + return makeNullableOrLowCardinalityNullableSafe(type); +} + ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map) { ColumnPtr null_map_holder; @@ -69,19 +112,21 @@ ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullM DataTypePtr NullableSubcolumnCreator::create(const DataTypePtr & prev) const { + if (!canExtractedSubcolumnsBeInsideNullable(prev)) + return prev; return makeNullableSafe(prev); } SerializationPtr NullableSubcolumnCreator::create(const SerializationPtr & prev_serialization, const DataTypePtr & prev_type) const { - if (prev_type && !prev_type->canBeInsideNullable()) + if (prev_type && !canExtractedSubcolumnsBeInsideNullable(prev_type)) return prev_serialization; return std::make_shared(prev_serialization); } ColumnPtr NullableSubcolumnCreator::create(const ColumnPtr & prev) const { - if (prev->canBeInsideNullable()) + if (canExtractedSubcolumnsBeInsideNullable(prev)) return ColumnNullable::create(prev, null_map); return prev; } diff --git a/src/DataTypes/NullableUtils.h b/src/DataTypes/NullableUtils.h index 7e6d5b291b9e..b73d733d198c 100644 --- a/src/DataTypes/NullableUtils.h +++ b/src/DataTypes/NullableUtils.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -13,6 +14,28 @@ namespace DB */ ColumnPtr extractNestedColumnsAndNullMap(ColumnRawPtrs & key_columns, ConstNullMapPtr & null_map); +/** Returns whether `type` can be wrapped into `Nullable(...)` with current + * `allow_nullable_tuple_in_extracted_subcolumns` setting value from global context. + * Config changes for global context settings are applied after server restart. + * For non-tuple types this matches `IDataType::canBeInsideNullable()`. + */ +bool canExtractedSubcolumnsBeInsideNullable(const DataTypePtr & type); + +/** Same check as `canExtractedSubcolumnsBeInsideNullable()`, but for + * `LowCardinality(T)` checks whether nested `T` can be nullable by + * settings, i.e. whether wrapping into `LowCardinality(Nullable(T))` is + * possible. + */ +bool canExtractedSubcolumnsBeInsideNullableOrLowCardinalityNullable(const DataTypePtr & type); + +/** Wraps `type` into `Nullable(...)` or `LowCardinality(Nullable(...))` when + * allowed by type capabilities and current + * `allow_nullable_tuple_in_extracted_subcolumns` setting value from global context. + * Config changes for global context settings are applied after server restart. + * Returns `type` unchanged when wrapping is not allowed. + */ +DataTypePtr makeExtractedSubcolumnsNullableOrLowCardinalityNullableSafe(const DataTypePtr & type); + struct NullableSubcolumnCreator : public ISerialization::ISubcolumnCreator { const ColumnPtr null_map; diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.cpp b/src/DataTypes/Serializations/SerializationDynamicElement.cpp index 8d419b862d26..d93752b65236 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.cpp +++ b/src/DataTypes/Serializations/SerializationDynamicElement.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -176,7 +177,7 @@ void SerializationDynamicElement::deserializeBinaryBulkWithMultipleStreams( /// If we need to read a subcolumn from variant column, create an empty variant column, fill it and extract subcolumn. auto variant_type = DataTypeFactory::instance().get(dynamic_element_name); - auto result_type = makeNullableOrLowCardinalityNullableSafe(variant_type); + auto result_type = makeExtractedSubcolumnsNullableOrLowCardinalityNullableSafe(variant_type); MutableColumnPtr variant_column = nested_subcolumn.empty() || is_null_map_subcolumn ? result_column->assumeMutable() : result_type->createColumn(); variant_column->reserve(variant_column->size() + limit); MutableColumnPtr non_nullable_variant_column = variant_column->assumeMutable(); diff --git a/src/DataTypes/Serializations/SerializationVariant.cpp b/src/DataTypes/Serializations/SerializationVariant.cpp index 5689c25bbba0..e0d639c96613 100644 --- a/src/DataTypes/Serializations/SerializationVariant.cpp +++ b/src/DataTypes/Serializations/SerializationVariant.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -106,12 +107,13 @@ void SerializationVariant::enumerateStreams( for (ColumnVariant::Discriminator i = 0; i < static_cast(variant_serializations.size()); ++i) { DataTypePtr type = type_variant ? type_variant->getVariant(i) : nullptr; + const bool make_subcolumn_nullable = !type || canExtractedSubcolumnsBeInsideNullableOrLowCardinalityNullable(type); settings.path.back().creator = std::make_shared( local_discriminators, variant_names[i], i, column_variant ? column_variant->localDiscriminatorByGlobal(i) : i, - !type || type->canBeInsideNullable() || type->lowCardinality()); + make_subcolumn_nullable); auto variant_data = SubstreamData(variant_serializations[i]) .withType(type) @@ -136,7 +138,7 @@ void SerializationVariant::enumerateStreams( chassert(variant_serializations.size() <= std::numeric_limits::max()); for (ColumnVariant::Discriminator i = 0; i < static_cast(variant_serializations.size()); ++i) { - if (!variant_types[i]->canBeInsideNullable()) + if (!canExtractedSubcolumnsBeInsideNullable(variant_types[i])) continue; settings.path.back().creator = std::make_shared(local_discriminators, variant_names[i], i, column_variant ? column_variant->localDiscriminatorByGlobal(i) : i); diff --git a/src/DataTypes/transformTypesRecursively.cpp b/src/DataTypes/transformTypesRecursively.cpp index 9b34ab9b414a..f46be351dafb 100644 --- a/src/DataTypes/transformTypesRecursively.cpp +++ b/src/DataTypes/transformTypesRecursively.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB @@ -16,7 +17,11 @@ TypeIndexesSet getTypesIndexes(const DataTypes & types) return type_indexes; } -void transformTypesRecursively(DataTypes & types, std::function transform_simple_types, std::function transform_complex_types) +void transformTypesRecursively( + DataTypes & types, + std::function transform_simple_types, + std::function transform_complex_types, + const FormatSettings * format_settings) { TypeIndexesSet type_indexes = getTypesIndexes(types); @@ -41,11 +46,13 @@ void transformTypesRecursively(DataTypes & types, std::functioncanBeInsideNullable()) + const bool can_make_nullable = format_settings ? canBeInsideNullableBySchemaSettings(nested_types[i], *format_settings) + : nested_types[i]->canBeInsideNullable(); + if (is_nullable[i] && can_make_nullable) types[i] = makeNullable(nested_types[i]); else types[i] = nested_types[i]; @@ -71,7 +78,7 @@ void transformTypesRecursively(DataTypes & types, std::function(type.get())->getNestedType()); - transformTypesRecursively(nested_types, transform_simple_types, transform_complex_types); + transformTypesRecursively(nested_types, transform_simple_types, transform_complex_types, format_settings); for (size_t i = 0; i != types.size(); ++i) types[i] = std::make_shared(nested_types[i]); } @@ -124,7 +131,7 @@ void transformTypesRecursively(DataTypes & types, std::function transposed_nested_types(types.size()); for (size_t elem_idx = 0; elem_idx < tuple_size; ++elem_idx) { - transformTypesRecursively(nested_types[elem_idx], transform_simple_types, transform_complex_types); + transformTypesRecursively(nested_types[elem_idx], transform_simple_types, transform_complex_types, format_settings); for (size_t i = 0; i != types.size(); ++i) transposed_nested_types[i].push_back(nested_types[elem_idx][i]); } @@ -162,8 +169,8 @@ void transformTypesRecursively(DataTypes & types, std::functiongetValueType()); } - transformTypesRecursively(key_types, transform_simple_types, transform_complex_types); - transformTypesRecursively(value_types, transform_simple_types, transform_complex_types); + transformTypesRecursively(key_types, transform_simple_types, transform_complex_types, format_settings); + transformTypesRecursively(value_types, transform_simple_types, transform_complex_types, format_settings); for (size_t i = 0; i != types.size(); ++i) types[i] = std::make_shared(key_types[i], value_types[i]); diff --git a/src/DataTypes/transformTypesRecursively.h b/src/DataTypes/transformTypesRecursively.h index f9c776b42053..b3f53e253519 100644 --- a/src/DataTypes/transformTypesRecursively.h +++ b/src/DataTypes/transformTypesRecursively.h @@ -6,13 +6,19 @@ namespace DB { +struct FormatSettings; + /// Function that applies custom transformation functions to provided types recursively. /// Implementation is similar to function getLeastSuperType: /// If all types are Array/Map/Tuple/Nullable, this function will be called to nested types. /// If not all types are the same complex type (Array/Map/Tuple), this function won't be called to nested types. /// Function transform_simple_types will be applied to resulting simple types after all recursive calls. /// Function transform_complex_types will be applied to complex types (Array/Map/Tuple) after recursive call to their nested types. -void transformTypesRecursively(DataTypes & types, std::function transform_simple_types, std::function transform_complex_types); +void transformTypesRecursively( + DataTypes & types, + std::function transform_simple_types, + std::function transform_complex_types, + const FormatSettings * format_settings = nullptr); void callOnNestedSimpleTypes(DataTypePtr & type, std::function callback); diff --git a/src/Formats/SchemaInferenceUtils.cpp b/src/Formats/SchemaInferenceUtils.cpp index d4ee53db6b98..499ec5341a9e 100644 --- a/src/Formats/SchemaInferenceUtils.cpp +++ b/src/Formats/SchemaInferenceUtils.cpp @@ -470,15 +470,16 @@ namespace updateTypeIndexes(data_types, type_indexes); } - /// If we have both Nullable and non Nullable types, make all types Nullable - void transformNullableTypes(DataTypes & data_types, TypeIndexesSet & type_indexes) + /// If we have both Nullable and non Nullable types, make all types Nullable. + /// Nullable(Tuple(...)) is controlled by schema_inference_allow_nullable_tuple_type. + void transformNullableTypes(DataTypes & data_types, TypeIndexesSet & type_indexes, const FormatSettings & settings) { if (!type_indexes.contains(TypeIndex::Nullable)) return; for (auto & type : data_types) { - if (type->canBeInsideNullable()) + if (canBeInsideNullableBySchemaSettings(type, settings)) type = makeNullable(type); } @@ -737,7 +738,7 @@ namespace auto transform_complex_types = [&](DataTypes & data_types, TypeIndexesSet & type_indexes) { /// Make types Nullable if needed. - transformNullableTypes(data_types, type_indexes); + transformNullableTypes(data_types, type_indexes, settings); /// If we have type Nothing, it means that we had empty Array/Map while inference. /// If there is at least one non Nothing type, change all Nothing types to it. @@ -770,7 +771,7 @@ namespace transformVariant(data_types, type_indexes); }; - transformTypesRecursively(types, transform_simple_types, transform_complex_types); + transformTypesRecursively(types, transform_simple_types, transform_complex_types, &settings); } template @@ -1384,6 +1385,14 @@ namespace } } +bool canBeInsideNullableBySchemaSettings(const DataTypePtr & type, const FormatSettings & settings) +{ + if (isTuple(type) && !settings.schema_inference_allow_nullable_tuple_type) + return false; + + return type->canBeInsideNullable(); +} + bool checkIfTypesAreEqual(const DataTypes & types) { if (types.empty()) diff --git a/src/Formats/SchemaInferenceUtils.h b/src/Formats/SchemaInferenceUtils.h index 0f056e6bc7e2..004eae7775e2 100644 --- a/src/Formats/SchemaInferenceUtils.h +++ b/src/Formats/SchemaInferenceUtils.h @@ -33,6 +33,12 @@ struct JSONInferenceInfo bool allow_merging_named_tuples = false; }; +/// Check whether a type can be wrapped into Nullable according to schema inference settings. +/// Currently, only Tuple is setting-dependent: +/// If `schema_inference_allow_nullable_tuple_type` is disabled, Tuple cannot be wrapped into Nullable. +/// Otherwise this check is equivalent to type->canBeInsideNullable(). +bool canBeInsideNullableBySchemaSettings(const DataTypePtr & type, const FormatSettings & settings); + /// Try to determine datatype of the value in buffer/string. If the type cannot be inferred, return nullptr. /// In general, it tries to parse a type using the following logic: /// If we see '[', we try to parse an array of values and recursively determine datatype for each element. diff --git a/src/Functions/dynamicElement.cpp b/src/Functions/dynamicElement.cpp index f75e75fcb26e..549e14a287ba 100644 --- a/src/Functions/dynamicElement.cpp +++ b/src/Functions/dynamicElement.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -66,7 +67,7 @@ class FunctionDynamicElement : public IFunction getName(), arguments[0].type->getName()); - auto return_type = makeNullableOrLowCardinalityNullableSafe(getRequestedType(arguments[1].column)); + auto return_type = makeExtractedSubcolumnsNullableOrLowCardinalityNullableSafe(getRequestedType(arguments[1].column)); for (; count_arrays; --count_arrays) return_type = std::make_shared(return_type); diff --git a/src/Functions/tupleElement.cpp b/src/Functions/tupleElement.cpp index 9225f5c1762b..2706864b99c7 100644 --- a/src/Functions/tupleElement.cpp +++ b/src/Functions/tupleElement.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -106,7 +107,7 @@ class FunctionTupleElement : public IFunction { DataTypePtr element_type = tuple->getElements()[index.value()]; - if (is_input_type_nullable && element_type->canBeInsideNullable()) + if (is_input_type_nullable && canExtractedSubcolumnsBeInsideNullable(element_type)) element_type = std::make_shared(element_type); return wrapInArrays(std::move(element_type), count_arrays); @@ -217,7 +218,7 @@ class FunctionTupleElement : public IFunction ColumnPtr merged_null_map = mergeNullMaps(null_map_column, res_nullable->getNullMapColumnPtr()); res = ColumnNullable::create(res_nullable->getNestedColumnPtr(), merged_null_map); } - else if (element_type->canBeInsideNullable()) + else if (canExtractedSubcolumnsBeInsideNullable(element_type)) { res = ColumnNullable::create(res, null_map_column); } diff --git a/src/Functions/variantElement.cpp b/src/Functions/variantElement.cpp index d91bbeb1fac5..59dd45c15c67 100644 --- a/src/Functions/variantElement.cpp +++ b/src/Functions/variantElement.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -72,7 +73,7 @@ class FunctionVariantElement : public IFunction std::optional variant_global_discr = getVariantGlobalDiscriminator(arguments[1].column, *variant_type, number_of_arguments); if (variant_global_discr.has_value()) { - DataTypePtr return_type = makeNullableOrLowCardinalityNullableSafe(variant_type->getVariant(variant_global_discr.value())); + DataTypePtr return_type = makeExtractedSubcolumnsNullableOrLowCardinalityNullableSafe(variant_type->getVariant(variant_global_discr.value())); for (; count_arrays; --count_arrays) return_type = std::make_shared(return_type); diff --git a/tests/integration/test_nullable_tuple_subcolumns/__init__.py b/tests/integration/test_nullable_tuple_subcolumns/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_off.xml b/tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_off.xml new file mode 100644 index 000000000000..6899e8dd6202 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_off.xml @@ -0,0 +1,7 @@ + + + + 0 + + + diff --git a/tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_on.xml b/tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_on.xml new file mode 100644 index 000000000000..85a346b5755c --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/configs/allow_nullable_tuple_subcolumns_on.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.off.reference new file mode 100644 index 000000000000..c1826168bd5a --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.off.reference @@ -0,0 +1,24 @@ +(((1,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) String +(((2,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) String +(((3,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) String +((1,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) String +((2,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) String +((0,''),'') Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) String +(1,'s') s Tuple(\n t UInt32,\n s String) String +(2,'s') s Tuple(\n t UInt32,\n s String) String +(0,'') Tuple(\n t UInt32,\n s String) String +1 s UInt32 String +2 s UInt32 String +0 UInt32 String +(((1,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) Nullable(String) +(((2,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) Nullable(String) +(((3,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) Nullable(String) +((1,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +((2,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +((0,''),'') \N Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +(1,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(2,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(0,'') \N Tuple(\n t UInt32,\n s String) Nullable(String) +1 s Nullable(UInt32) Nullable(String) +2 s Nullable(UInt32) Nullable(String) +\N \N Nullable(UInt32) Nullable(String) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.on.reference new file mode 100644 index 000000000000..d20a06fc820a --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/02731_analyzer_join_resolve_nested.on.reference @@ -0,0 +1,24 @@ +(((1,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) String +(((2,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) String +(((3,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) String +((1,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) String +((2,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) String +((0,''),'') Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) String +(1,'s') s Tuple(\n t UInt32,\n s String) String +(2,'s') s Tuple(\n t UInt32,\n s String) String +(0,'') Tuple(\n t UInt32,\n s String) String +1 s UInt32 String +2 s UInt32 String +0 UInt32 String +(((1,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t Int64, s String), s String), s String)) Nullable(String) +(((2,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t Int64, s String), s String), s String)) Nullable(String) +(((3,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t Int64, s String), s String), s String)) Nullable(String) +((1,'s'),'s') s Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) +((2,'s'),'s') s Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) +\N \N Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) +(1,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) +(2,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) +\N \N Nullable(Tuple(t UInt32, s String)) Nullable(String) +1 s Nullable(UInt32) Nullable(String) +2 s Nullable(UInt32) Nullable(String) +\N \N Nullable(UInt32) Nullable(String) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.off.reference new file mode 100644 index 000000000000..9e6b6ea7fb18 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.off.reference @@ -0,0 +1,9 @@ +Tuple +{"v":null,"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":0,"b":0}} +{"v":"string","variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":0,"b":0}} +{"v":{"a":42,"b":0},"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":42,"b":0}} +{"v":{"a":44,"b":0},"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":44,"b":0}} +\N (0,0) +string (0,0) +(42,0) (42,0) +{"a" : 44, "d" : 32} (0,0) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.on.reference new file mode 100644 index 000000000000..12e493dc6451 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/02940_variant_text_deserialization.on.reference @@ -0,0 +1,9 @@ +Tuple +{"v":null,"variantElement(v, 'Tuple(a UInt64, b UInt64)')":null} +{"v":"string","variantElement(v, 'Tuple(a UInt64, b UInt64)')":null} +{"v":{"a":42,"b":0},"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":42,"b":0}} +{"v":{"a":44,"b":0},"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":44,"b":0}} +\N \N +string \N +(42,0) (42,0) +{"a" : 44, "d" : 32} \N diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.off.reference new file mode 100644 index 000000000000..57add1029200 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.off.reference @@ -0,0 +1,165 @@ +test1 +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,1) +(1,2) +(2,3) +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +3 +0 +0 +0 +test2 +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,1) +(0,0) +(2,3) +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +0 +3 +0 +0 +0 +test3 +(0,0) +(0,0) +(0,0) +(0,0) +(4,5) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(10,11) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(16,17) +(0,0) +0 +0 +0 +0 +4 +0 +0 +0 +0 +0 +10 +0 +0 +0 +0 +0 +16 +0 +0 +0 +0 +0 +5 +0 +0 +0 +0 +0 +11 +0 +0 +0 +0 +0 +17 +0 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.on.reference new file mode 100644 index 000000000000..34a6e69cf8c9 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/02941_variant_type_1.on.reference @@ -0,0 +1,165 @@ +test1 +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +(0,1) +(1,2) +(2,3) +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +0 +1 +2 +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +1 +2 +3 +\N +\N +\N +test2 +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +(0,1) +\N +(2,3) +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +0 +\N +2 +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +1 +\N +3 +\N +\N +\N +test3 +\N +\N +\N +\N +(4,5) +\N +\N +\N +\N +\N +(10,11) +\N +\N +\N +\N +\N +(16,17) +\N +\N +\N +\N +\N +4 +\N +\N +\N +\N +\N +10 +\N +\N +\N +\N +\N +16 +\N +\N +\N +\N +\N +5 +\N +\N +\N +\N +\N +11 +\N +\N +\N +\N +\N +17 +\N diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.off.reference new file mode 100644 index 000000000000..e687a4878bdf --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.off.reference @@ -0,0 +1,606 @@ +Memory +0 +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000 +0 +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1 +0 +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.on.reference new file mode 100644 index 000000000000..3496f0da851f --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_shared_subcolumns_small.on.reference @@ -0,0 +1,606 @@ +Memory +0 +0 \N [] [] +1 \N [] [] +2 \N [] [] +3 \N [] [] +4 \N [] [] +5 \N [] [] +6 \N [] [] +7 \N [] [] +8 \N [] [] +9 \N [] [] +str_10 \N [] [] +10 \N [] [] +10 \N [] [] +[[0,1]] \N [] [] +str_11 \N [] [] +str_11 \N [] [] +str_12 \N [] [] +12 \N [] [] +12 \N [] [] +str_13 \N [] [] +13 \N [] [] +13 \N [] [] +str_14 \N [] [] +14 \N [] [] +14 \N [] [] +str_15 \N [] [] +15 \N [] [] +15 \N [] [] +[[0,1,2,3,4,5,6]] \N [] [] +str_16 \N [] [] +str_16 \N [] [] +str_17 \N [] [] +17 \N [] [] +17 \N [] [] +str_18 \N [] [] +18 \N [] [] +18 \N [] [] +str_19 \N [] [] +19 \N [] [] +19 \N [] [] +[20] \N [] [20] +['str_21','str_21'] \N [] [NULL,NULL] +[22,22,22] \N [] [22,22,22] +[23,23,23,23] \N [] [23,23,23,23] +[24,24,24,24,24] \N [] [24,24,24,24,24] +[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +[40] \N [] [40] +41 \N [] [] +\N \N [] [] +str_43 \N [] [] +[44,44,44,44,44] \N [] [44,44,44,44,44] +45 \N [] [] +\N \N [] [] +str_47 \N [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 \N [] [] +\N \N [] [] +str_51 \N [] [] +[52,52,52] \N [] [52,52,52] +53 \N [] [] +\N \N [] [] +str_55 \N [] [] +[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] +57 \N [] [] +\N \N [] [] +str_59 \N [] [] +[60] \N [] [60] +61 \N [] [] +\N \N [] [] +str_63 \N [] [] +[64,64,64,64,64] \N [] [64,64,64,64,64] +65 \N [] [] +\N \N [] [] +str_67 \N [] [] +[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] +69 \N [] [] +\N \N [] [] +str_71 \N [] [] +[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] +73 \N [] [] +\N \N [] [] +str_75 \N [] [] +[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] +77 \N [] [] +\N \N [] [] +str_79 \N [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000 +0 +0 \N [] [] +1 \N [] [] +2 \N [] [] +3 \N [] [] +4 \N [] [] +5 \N [] [] +6 \N [] [] +7 \N [] [] +8 \N [] [] +9 \N [] [] +str_10 \N [] [] +10 \N [] [] +10 \N [] [] +[[0,1]] \N [] [] +str_11 \N [] [] +str_11 \N [] [] +str_12 \N [] [] +12 \N [] [] +12 \N [] [] +str_13 \N [] [] +13 \N [] [] +13 \N [] [] +str_14 \N [] [] +14 \N [] [] +14 \N [] [] +str_15 \N [] [] +15 \N [] [] +15 \N [] [] +[[0,1,2,3,4,5,6]] \N [] [] +str_16 \N [] [] +str_16 \N [] [] +str_17 \N [] [] +17 \N [] [] +17 \N [] [] +str_18 \N [] [] +18 \N [] [] +18 \N [] [] +str_19 \N [] [] +19 \N [] [] +19 \N [] [] +[20] \N [] [20] +['str_21','str_21'] \N [] [NULL,NULL] +[22,22,22] \N [] [22,22,22] +[23,23,23,23] \N [] [23,23,23,23] +[24,24,24,24,24] \N [] [24,24,24,24,24] +[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +[40] \N [] [40] +41 \N [] [] +\N \N [] [] +str_43 \N [] [] +[44,44,44,44,44] \N [] [44,44,44,44,44] +45 \N [] [] +\N \N [] [] +str_47 \N [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 \N [] [] +\N \N [] [] +str_51 \N [] [] +[52,52,52] \N [] [52,52,52] +53 \N [] [] +\N \N [] [] +str_55 \N [] [] +[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] +57 \N [] [] +\N \N [] [] +str_59 \N [] [] +[60] \N [] [60] +61 \N [] [] +\N \N [] [] +str_63 \N [] [] +[64,64,64,64,64] \N [] [64,64,64,64,64] +65 \N [] [] +\N \N [] [] +str_67 \N [] [] +[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] +69 \N [] [] +\N \N [] [] +str_71 \N [] [] +[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] +73 \N [] [] +\N \N [] [] +str_75 \N [] [] +[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] +77 \N [] [] +\N \N [] [] +str_79 \N [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1 +0 +0 \N [] [] +1 \N [] [] +2 \N [] [] +3 \N [] [] +4 \N [] [] +5 \N [] [] +6 \N [] [] +7 \N [] [] +8 \N [] [] +9 \N [] [] +str_10 \N [] [] +10 \N [] [] +10 \N [] [] +[[0,1]] \N [] [] +str_11 \N [] [] +str_11 \N [] [] +str_12 \N [] [] +12 \N [] [] +12 \N [] [] +str_13 \N [] [] +13 \N [] [] +13 \N [] [] +str_14 \N [] [] +14 \N [] [] +14 \N [] [] +str_15 \N [] [] +15 \N [] [] +15 \N [] [] +[[0,1,2,3,4,5,6]] \N [] [] +str_16 \N [] [] +str_16 \N [] [] +str_17 \N [] [] +17 \N [] [] +17 \N [] [] +str_18 \N [] [] +18 \N [] [] +18 \N [] [] +str_19 \N [] [] +19 \N [] [] +19 \N [] [] +[20] \N [] [20] +['str_21','str_21'] \N [] [NULL,NULL] +[22,22,22] \N [] [22,22,22] +[23,23,23,23] \N [] [23,23,23,23] +[24,24,24,24,24] \N [] [24,24,24,24,24] +[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +[40] \N [] [40] +41 \N [] [] +\N \N [] [] +str_43 \N [] [] +[44,44,44,44,44] \N [] [44,44,44,44,44] +45 \N [] [] +\N \N [] [] +str_47 \N [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 \N [] [] +\N \N [] [] +str_51 \N [] [] +[52,52,52] \N [] [52,52,52] +53 \N [] [] +\N \N [] [] +str_55 \N [] [] +[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] +57 \N [] [] +\N \N [] [] +str_59 \N [] [] +[60] \N [] [60] +61 \N [] [] +\N \N [] [] +str_63 \N [] [] +[64,64,64,64,64] \N [] [64,64,64,64,64] +65 \N [] [] +\N \N [] [] +str_67 \N [] [] +[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] +69 \N [] [] +\N \N [] [] +str_71 \N [] [] +[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] +73 \N [] [] +\N \N [] [] +str_75 \N [] [] +[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] +77 \N [] [] +\N \N [] [] +str_79 \N [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.off.reference new file mode 100644 index 000000000000..fd06a9a87820 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.off.reference @@ -0,0 +1,546 @@ +Memory +0 +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +[[0]] 0 [] [] +str_10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +[[0,1,2]] 0 [] [] +str_12 0 [] [] +[[0,1,2,3]] 0 [] [] +str_13 0 [] [] +[[0,1,2,3,4]] 0 [] [] +str_14 0 [] [] +[[0,1,2,3,4,5]] 0 [] [] +str_15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +[[0,1,2,3,4,5,6,7]] 0 [] [] +str_17 0 [] [] +[[0,1,2,3,4,5,6,7,8]] 0 [] [] +str_18 0 [] [] +[[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[1] [[0]] [[[]]] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[3] [[0,1,2]] [[[],[],[]]] +[] [] [] +[4] [[0,1,2,3]] [[[],[],[],[]]] +[] [] [] +[5] [[0,1,2,3,4]] [[[],[],[],[],[]]] +[] [] [] +[6] [[0,1,2,3,4,5]] [[[],[],[],[],[],[]]] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[8] [[0,1,2,3,4,5,6,7]] [[[],[],[],[],[],[],[],[]]] +[] [] [] +[9] [[0,1,2,3,4,5,6,7,8]] [[[],[],[],[],[],[],[],[],[]]] +[] [] [] +[10] [[0,1,2,3,4,5,6,7,8,9]] [[[],[],[],[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000 +0 +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +[[0]] 0 [] [] +str_10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +[[0,1,2]] 0 [] [] +str_12 0 [] [] +[[0,1,2,3]] 0 [] [] +str_13 0 [] [] +[[0,1,2,3,4]] 0 [] [] +str_14 0 [] [] +[[0,1,2,3,4,5]] 0 [] [] +str_15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +[[0,1,2,3,4,5,6,7]] 0 [] [] +str_17 0 [] [] +[[0,1,2,3,4,5,6,7,8]] 0 [] [] +str_18 0 [] [] +[[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[1] [[0]] [[[]]] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[3] [[0,1,2]] [[[],[],[]]] +[] [] [] +[4] [[0,1,2,3]] [[[],[],[],[]]] +[] [] [] +[5] [[0,1,2,3,4]] [[[],[],[],[],[]]] +[] [] [] +[6] [[0,1,2,3,4,5]] [[[],[],[],[],[],[]]] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[8] [[0,1,2,3,4,5,6,7]] [[[],[],[],[],[],[],[],[]]] +[] [] [] +[9] [[0,1,2,3,4,5,6,7,8]] [[[],[],[],[],[],[],[],[],[]]] +[] [] [] +[10] [[0,1,2,3,4,5,6,7,8,9]] [[[],[],[],[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1 +0 +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +[[0]] 0 [] [] +str_10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +[[0,1,2]] 0 [] [] +str_12 0 [] [] +[[0,1,2,3]] 0 [] [] +str_13 0 [] [] +[[0,1,2,3,4]] 0 [] [] +str_14 0 [] [] +[[0,1,2,3,4,5]] 0 [] [] +str_15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +[[0,1,2,3,4,5,6,7]] 0 [] [] +str_17 0 [] [] +[[0,1,2,3,4,5,6,7,8]] 0 [] [] +str_18 0 [] [] +[[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[1] [[0]] [[[]]] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[3] [[0,1,2]] [[[],[],[]]] +[] [] [] +[4] [[0,1,2,3]] [[[],[],[],[]]] +[] [] [] +[5] [[0,1,2,3,4]] [[[],[],[],[],[]]] +[] [] [] +[6] [[0,1,2,3,4,5]] [[[],[],[],[],[],[]]] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[8] [[0,1,2,3,4,5,6,7]] [[[],[],[],[],[],[],[],[]]] +[] [] [] +[9] [[0,1,2,3,4,5,6,7,8]] [[[],[],[],[],[],[],[],[],[]]] +[] [] [] +[10] [[0,1,2,3,4,5,6,7,8,9]] [[[],[],[],[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.on.reference new file mode 100644 index 000000000000..9fdca35f0a49 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03036_dynamic_read_subcolumns_small.on.reference @@ -0,0 +1,546 @@ +Memory +0 +0 \N [] [] +1 \N [] [] +2 \N [] [] +3 \N [] [] +4 \N [] [] +5 \N [] [] +6 \N [] [] +7 \N [] [] +8 \N [] [] +9 \N [] [] +[[0]] \N [] [] +str_10 \N [] [] +[[0,1]] \N [] [] +str_11 \N [] [] +[[0,1,2]] \N [] [] +str_12 \N [] [] +[[0,1,2,3]] \N [] [] +str_13 \N [] [] +[[0,1,2,3,4]] \N [] [] +str_14 \N [] [] +[[0,1,2,3,4,5]] \N [] [] +str_15 \N [] [] +[[0,1,2,3,4,5,6]] \N [] [] +str_16 \N [] [] +[[0,1,2,3,4,5,6,7]] \N [] [] +str_17 \N [] [] +[[0,1,2,3,4,5,6,7,8]] \N [] [] +str_18 \N [] [] +[[0,1,2,3,4,5,6,7,8,9]] \N [] [] +str_19 \N [] [] +[20] \N [] [20] +['str_21','str_21'] \N [] [NULL,NULL] +[22,22,22] \N [] [22,22,22] +[23,23,23,23] \N [] [23,23,23,23] +[24,24,24,24,24] \N [] [24,24,24,24,24] +[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +[40] \N [] [40] +41 \N [] [] +\N \N [] [] +str_43 \N [] [] +[44,44,44,44,44] \N [] [44,44,44,44,44] +45 \N [] [] +\N \N [] [] +str_47 \N [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 \N [] [] +\N \N [] [] +str_51 \N [] [] +[52,52,52] \N [] [52,52,52] +53 \N [] [] +\N \N [] [] +str_55 \N [] [] +[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] +57 \N [] [] +\N \N [] [] +str_59 \N [] [] +[60] \N [] [60] +61 \N [] [] +\N \N [] [] +str_63 \N [] [] +[64,64,64,64,64] \N [] [64,64,64,64,64] +65 \N [] [] +\N \N [] [] +str_67 \N [] [] +[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] +69 \N [] [] +\N \N [] [] +str_71 \N [] [] +[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] +73 \N [] [] +\N \N [] [] +str_75 \N [] [] +[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] +77 \N [] [] +\N \N [] [] +str_79 \N [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[1] [[0]] [[[]]] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[3] [[0,1,2]] [[[],[],[]]] +[] [] [] +[4] [[0,1,2,3]] [[[],[],[],[]]] +[] [] [] +[5] [[0,1,2,3,4]] [[[],[],[],[],[]]] +[] [] [] +[6] [[0,1,2,3,4,5]] [[[],[],[],[],[],[]]] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[8] [[0,1,2,3,4,5,6,7]] [[[],[],[],[],[],[],[],[]]] +[] [] [] +[9] [[0,1,2,3,4,5,6,7,8]] [[[],[],[],[],[],[],[],[],[]]] +[] [] [] +[10] [[0,1,2,3,4,5,6,7,8,9]] [[[],[],[],[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000 +0 +0 \N [] [] +1 \N [] [] +2 \N [] [] +3 \N [] [] +4 \N [] [] +5 \N [] [] +6 \N [] [] +7 \N [] [] +8 \N [] [] +9 \N [] [] +[[0]] \N [] [] +str_10 \N [] [] +[[0,1]] \N [] [] +str_11 \N [] [] +[[0,1,2]] \N [] [] +str_12 \N [] [] +[[0,1,2,3]] \N [] [] +str_13 \N [] [] +[[0,1,2,3,4]] \N [] [] +str_14 \N [] [] +[[0,1,2,3,4,5]] \N [] [] +str_15 \N [] [] +[[0,1,2,3,4,5,6]] \N [] [] +str_16 \N [] [] +[[0,1,2,3,4,5,6,7]] \N [] [] +str_17 \N [] [] +[[0,1,2,3,4,5,6,7,8]] \N [] [] +str_18 \N [] [] +[[0,1,2,3,4,5,6,7,8,9]] \N [] [] +str_19 \N [] [] +[20] \N [] [20] +['str_21','str_21'] \N [] [NULL,NULL] +[22,22,22] \N [] [22,22,22] +[23,23,23,23] \N [] [23,23,23,23] +[24,24,24,24,24] \N [] [24,24,24,24,24] +[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +[40] \N [] [40] +41 \N [] [] +\N \N [] [] +str_43 \N [] [] +[44,44,44,44,44] \N [] [44,44,44,44,44] +45 \N [] [] +\N \N [] [] +str_47 \N [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 \N [] [] +\N \N [] [] +str_51 \N [] [] +[52,52,52] \N [] [52,52,52] +53 \N [] [] +\N \N [] [] +str_55 \N [] [] +[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] +57 \N [] [] +\N \N [] [] +str_59 \N [] [] +[60] \N [] [60] +61 \N [] [] +\N \N [] [] +str_63 \N [] [] +[64,64,64,64,64] \N [] [64,64,64,64,64] +65 \N [] [] +\N \N [] [] +str_67 \N [] [] +[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] +69 \N [] [] +\N \N [] [] +str_71 \N [] [] +[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] +73 \N [] [] +\N \N [] [] +str_75 \N [] [] +[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] +77 \N [] [] +\N \N [] [] +str_79 \N [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[1] [[0]] [[[]]] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[3] [[0,1,2]] [[[],[],[]]] +[] [] [] +[4] [[0,1,2,3]] [[[],[],[],[]]] +[] [] [] +[5] [[0,1,2,3,4]] [[[],[],[],[],[]]] +[] [] [] +[6] [[0,1,2,3,4,5]] [[[],[],[],[],[],[]]] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[8] [[0,1,2,3,4,5,6,7]] [[[],[],[],[],[],[],[],[]]] +[] [] [] +[9] [[0,1,2,3,4,5,6,7,8]] [[[],[],[],[],[],[],[],[],[]]] +[] [] [] +[10] [[0,1,2,3,4,5,6,7,8,9]] [[[],[],[],[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1 +0 +0 \N [] [] +1 \N [] [] +2 \N [] [] +3 \N [] [] +4 \N [] [] +5 \N [] [] +6 \N [] [] +7 \N [] [] +8 \N [] [] +9 \N [] [] +[[0]] \N [] [] +str_10 \N [] [] +[[0,1]] \N [] [] +str_11 \N [] [] +[[0,1,2]] \N [] [] +str_12 \N [] [] +[[0,1,2,3]] \N [] [] +str_13 \N [] [] +[[0,1,2,3,4]] \N [] [] +str_14 \N [] [] +[[0,1,2,3,4,5]] \N [] [] +str_15 \N [] [] +[[0,1,2,3,4,5,6]] \N [] [] +str_16 \N [] [] +[[0,1,2,3,4,5,6,7]] \N [] [] +str_17 \N [] [] +[[0,1,2,3,4,5,6,7,8]] \N [] [] +str_18 \N [] [] +[[0,1,2,3,4,5,6,7,8,9]] \N [] [] +str_19 \N [] [] +[20] \N [] [20] +['str_21','str_21'] \N [] [NULL,NULL] +[22,22,22] \N [] [22,22,22] +[23,23,23,23] \N [] [23,23,23,23] +[24,24,24,24,24] \N [] [24,24,24,24,24] +[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +\N \N [] [] +[40] \N [] [40] +41 \N [] [] +\N \N [] [] +str_43 \N [] [] +[44,44,44,44,44] \N [] [44,44,44,44,44] +45 \N [] [] +\N \N [] [] +str_47 \N [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 \N [] [] +\N \N [] [] +str_51 \N [] [] +[52,52,52] \N [] [52,52,52] +53 \N [] [] +\N \N [] [] +str_55 \N [] [] +[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] +57 \N [] [] +\N \N [] [] +str_59 \N [] [] +[60] \N [] [60] +61 \N [] [] +\N \N [] [] +str_63 \N [] [] +[64,64,64,64,64] \N [] [64,64,64,64,64] +65 \N [] [] +\N \N [] [] +str_67 \N [] [] +[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] +69 \N [] [] +\N \N [] [] +str_71 \N [] [] +[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] +73 \N [] [] +\N \N [] [] +str_75 \N [] [] +[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] +77 \N [] [] +\N \N [] [] +str_79 \N [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[1] [[0]] [[[]]] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[3] [[0,1,2]] [[[],[],[]]] +[] [] [] +[4] [[0,1,2,3]] [[[],[],[],[]]] +[] [] [] +[5] [[0,1,2,3,4]] [[[],[],[],[],[]]] +[] [] [] +[6] [[0,1,2,3,4,5]] [[[],[],[],[],[],[]]] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[8] [[0,1,2,3,4,5,6,7]] [[[],[],[],[],[],[],[],[]]] +[] [] [] +[9] [[0,1,2,3,4,5,6,7,8]] [[[],[],[],[],[],[],[],[],[]]] +[] [] [] +[10] [[0,1,2,3,4,5,6,7,8,9]] [[[],[],[],[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.off.reference new file mode 100644 index 000000000000..2f77f462617d --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.off.reference @@ -0,0 +1,152 @@ +initial insert +alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +insert after alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +alter modify column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +insert after alter modify column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +alter modify column 2 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +insert after alter modify column 2 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +19 19 \N 0 +20 20 20 0 +21 21 str_21 0 +22 22 1970-01-23 0 +alter modify column 3 +0 0 0 0 +1 1 0 0 +2 2 0 0 +3 3 0 0 +4 4 0 0 +5 5 0 0 +6 6 0 0 +7 7 0 0 +8 8 0 0 +9 9 0 0 +10 10 0 0 +11 11 0 0 +12 12 0 0 +13 13 0 0 +14 14 0 0 +15 15 0 0 +16 16 0 0 +17 17 0 0 +18 18 0 0 +19 19 0 0 +20 20 0 0 +21 21 0 0 +22 22 0 0 +insert after alter modify column 3 +0 0 0 0 +1 1 0 0 +2 2 0 0 +3 3 0 0 +4 4 0 0 +5 5 0 0 +6 6 0 0 +7 7 0 0 +8 8 0 0 +9 9 0 0 +10 10 0 0 +11 11 0 0 +12 12 0 0 +13 13 0 0 +14 14 0 0 +15 15 0 0 +16 16 0 0 +17 17 0 0 +18 18 0 0 +19 19 0 0 +20 20 0 0 +21 21 0 0 +22 22 0 0 +23 \N 0 0 +24 24 0 0 +25 str_25 0 0 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.on.reference new file mode 100644 index 000000000000..c6253f897cb3 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_compact_merge_tree.on.reference @@ -0,0 +1,152 @@ +initial insert +alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +insert after alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +alter modify column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +insert after alter modify column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +alter modify column 2 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +insert after alter modify column 2 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +19 19 \N \N +20 20 20 \N +21 21 str_21 \N +22 22 1970-01-23 \N +alter modify column 3 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N +5 5 \N \N +6 6 \N \N +7 7 \N \N +8 8 \N \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 \N \N +13 13 \N \N +14 14 \N \N +15 15 \N \N +16 16 \N \N +17 17 \N \N +18 18 \N \N +19 19 \N \N +20 20 \N \N +21 21 \N \N +22 22 \N \N +insert after alter modify column 3 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N +5 5 \N \N +6 6 \N \N +7 7 \N \N +8 8 \N \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 \N \N +13 13 \N \N +14 14 \N \N +15 15 \N \N +16 16 \N \N +17 17 \N \N +18 18 \N \N +19 19 \N \N +20 20 \N \N +21 21 \N \N +22 22 \N \N +23 \N \N \N +24 24 \N \N +25 str_25 \N \N diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.off.reference new file mode 100644 index 000000000000..2f77f462617d --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.off.reference @@ -0,0 +1,152 @@ +initial insert +alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +insert after alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +alter modify column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +insert after alter modify column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +alter modify column 2 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +insert after alter modify column 2 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +19 19 \N 0 +20 20 20 0 +21 21 str_21 0 +22 22 1970-01-23 0 +alter modify column 3 +0 0 0 0 +1 1 0 0 +2 2 0 0 +3 3 0 0 +4 4 0 0 +5 5 0 0 +6 6 0 0 +7 7 0 0 +8 8 0 0 +9 9 0 0 +10 10 0 0 +11 11 0 0 +12 12 0 0 +13 13 0 0 +14 14 0 0 +15 15 0 0 +16 16 0 0 +17 17 0 0 +18 18 0 0 +19 19 0 0 +20 20 0 0 +21 21 0 0 +22 22 0 0 +insert after alter modify column 3 +0 0 0 0 +1 1 0 0 +2 2 0 0 +3 3 0 0 +4 4 0 0 +5 5 0 0 +6 6 0 0 +7 7 0 0 +8 8 0 0 +9 9 0 0 +10 10 0 0 +11 11 0 0 +12 12 0 0 +13 13 0 0 +14 14 0 0 +15 15 0 0 +16 16 0 0 +17 17 0 0 +18 18 0 0 +19 19 0 0 +20 20 0 0 +21 21 0 0 +22 22 0 0 +23 \N 0 0 +24 24 0 0 +25 str_25 0 0 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.on.reference new file mode 100644 index 000000000000..c6253f897cb3 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_memory.on.reference @@ -0,0 +1,152 @@ +initial insert +alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +insert after alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +alter modify column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +insert after alter modify column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +alter modify column 2 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +insert after alter modify column 2 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +19 19 \N \N +20 20 20 \N +21 21 str_21 \N +22 22 1970-01-23 \N +alter modify column 3 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N +5 5 \N \N +6 6 \N \N +7 7 \N \N +8 8 \N \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 \N \N +13 13 \N \N +14 14 \N \N +15 15 \N \N +16 16 \N \N +17 17 \N \N +18 18 \N \N +19 19 \N \N +20 20 \N \N +21 21 \N \N +22 22 \N \N +insert after alter modify column 3 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N +5 5 \N \N +6 6 \N \N +7 7 \N \N +8 8 \N \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 \N \N +13 13 \N \N +14 14 \N \N +15 15 \N \N +16 16 \N \N +17 17 \N \N +18 18 \N \N +19 19 \N \N +20 20 \N \N +21 21 \N \N +22 22 \N \N +23 \N \N \N +24 24 \N \N +25 str_25 \N \N diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.off.reference new file mode 100644 index 000000000000..2f77f462617d --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.off.reference @@ -0,0 +1,152 @@ +initial insert +alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +insert after alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +alter modify column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +insert after alter modify column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +alter modify column 2 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +insert after alter modify column 2 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +15 15 \N 0 +16 16 16 0 +17 17 str_17 0 +18 18 1970-01-19 0 +19 19 \N 0 +20 20 20 0 +21 21 str_21 0 +22 22 1970-01-23 0 +alter modify column 3 +0 0 0 0 +1 1 0 0 +2 2 0 0 +3 3 0 0 +4 4 0 0 +5 5 0 0 +6 6 0 0 +7 7 0 0 +8 8 0 0 +9 9 0 0 +10 10 0 0 +11 11 0 0 +12 12 0 0 +13 13 0 0 +14 14 0 0 +15 15 0 0 +16 16 0 0 +17 17 0 0 +18 18 0 0 +19 19 0 0 +20 20 0 0 +21 21 0 0 +22 22 0 0 +insert after alter modify column 3 +0 0 0 0 +1 1 0 0 +2 2 0 0 +3 3 0 0 +4 4 0 0 +5 5 0 0 +6 6 0 0 +7 7 0 0 +8 8 0 0 +9 9 0 0 +10 10 0 0 +11 11 0 0 +12 12 0 0 +13 13 0 0 +14 14 0 0 +15 15 0 0 +16 16 0 0 +17 17 0 0 +18 18 0 0 +19 19 0 0 +20 20 0 0 +21 21 0 0 +22 22 0 0 +23 \N 0 0 +24 24 0 0 +25 str_25 0 0 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.on.reference new file mode 100644 index 000000000000..c6253f897cb3 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_1_wide_merge_tree.on.reference @@ -0,0 +1,152 @@ +initial insert +alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +insert after alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +alter modify column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +insert after alter modify column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +alter modify column 2 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +insert after alter modify column 2 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +15 15 \N \N +16 16 16 \N +17 17 str_17 \N +18 18 1970-01-19 \N +19 19 \N \N +20 20 20 \N +21 21 str_21 \N +22 22 1970-01-23 \N +alter modify column 3 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N +5 5 \N \N +6 6 \N \N +7 7 \N \N +8 8 \N \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 \N \N +13 13 \N \N +14 14 \N \N +15 15 \N \N +16 16 \N \N +17 17 \N \N +18 18 \N \N +19 19 \N \N +20 20 \N \N +21 21 \N \N +22 22 \N \N +insert after alter modify column 3 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 \N \N +4 4 \N \N +5 5 \N \N +6 6 \N \N +7 7 \N \N +8 8 \N \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 \N \N +13 13 \N \N +14 14 \N \N +15 15 \N \N +16 16 \N \N +17 17 \N \N +18 18 \N \N +19 19 \N \N +20 20 \N \N +21 21 \N \N +22 22 \N \N +23 \N \N \N +24 24 \N \N +25 str_25 \N \N diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.off.reference new file mode 100644 index 000000000000..a5286e39e642 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.off.reference @@ -0,0 +1,75 @@ +initial insert +alter add column +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +insert after alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +alter rename column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +insert nested dynamic +0 0 \N 0 [] [] [] +1 1 \N 0 [] [] [] +2 2 \N 0 [] [] [] +3 3 3 0 [] [] [] +4 4 4 0 [] [] [] +5 5 5 0 [] [] [] +6 6 str_6 0 [] [] [] +7 7 str_7 0 [] [] [] +8 8 str_8 0 [] [] [] +9 9 \N 0 [] [] [] +10 10 \N 0 [] [] [] +11 11 \N 0 [] [] [] +12 12 12 0 [] [] [] +13 13 str_13 0 [] [] [] +14 14 \N 0 [] [] [] +15 15 [15] 0 [15] [NULL] [NULL] +16 16 ['str_16'] 0 [NULL] ['str_16'] [NULL] +17 17 [17] 0 [17] [NULL] [NULL] +alter rename column 2 +0 0 \N 0 [] [] [] +1 1 \N 0 [] [] [] +2 2 \N 0 [] [] [] +3 3 3 0 [] [] [] +4 4 4 0 [] [] [] +5 5 5 0 [] [] [] +6 6 str_6 0 [] [] [] +7 7 str_7 0 [] [] [] +8 8 str_8 0 [] [] [] +9 9 \N 0 [] [] [] +10 10 \N 0 [] [] [] +11 11 \N 0 [] [] [] +12 12 12 0 [] [] [] +13 13 str_13 0 [] [] [] +14 14 \N 0 [] [] [] +15 15 [15] 0 [15] [NULL] [NULL] +16 16 ['str_16'] 0 [NULL] ['str_16'] [NULL] +17 17 [17] 0 [17] [NULL] [NULL] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.on.reference new file mode 100644 index 000000000000..28db8876fb0c --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_compact_merge_tree.on.reference @@ -0,0 +1,75 @@ +initial insert +alter add column +0 0 \N \N +1 1 \N \N +2 2 \N \N +insert after alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +alter rename column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +insert nested dynamic +0 0 \N \N [] [] [] +1 1 \N \N [] [] [] +2 2 \N \N [] [] [] +3 3 3 \N [] [] [] +4 4 4 \N [] [] [] +5 5 5 \N [] [] [] +6 6 str_6 \N [] [] [] +7 7 str_7 \N [] [] [] +8 8 str_8 \N [] [] [] +9 9 \N \N [] [] [] +10 10 \N \N [] [] [] +11 11 \N \N [] [] [] +12 12 12 \N [] [] [] +13 13 str_13 \N [] [] [] +14 14 \N \N [] [] [] +15 15 [15] \N [15] [NULL] [NULL] +16 16 ['str_16'] \N [NULL] ['str_16'] [NULL] +17 17 [17] \N [17] [NULL] [NULL] +alter rename column 2 +0 0 \N \N [] [] [] +1 1 \N \N [] [] [] +2 2 \N \N [] [] [] +3 3 3 \N [] [] [] +4 4 4 \N [] [] [] +5 5 5 \N [] [] [] +6 6 str_6 \N [] [] [] +7 7 str_7 \N [] [] [] +8 8 str_8 \N [] [] [] +9 9 \N \N [] [] [] +10 10 \N \N [] [] [] +11 11 \N \N [] [] [] +12 12 12 \N [] [] [] +13 13 str_13 \N [] [] [] +14 14 \N \N [] [] [] +15 15 [15] \N [15] [NULL] [NULL] +16 16 ['str_16'] \N [NULL] ['str_16'] [NULL] +17 17 [17] \N [17] [NULL] [NULL] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.off.reference new file mode 100644 index 000000000000..a5286e39e642 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.off.reference @@ -0,0 +1,75 @@ +initial insert +alter add column +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +insert after alter add column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +alter rename column 1 +0 0 \N 0 +1 1 \N 0 +2 2 \N 0 +3 3 3 0 +4 4 4 0 +5 5 5 0 +6 6 str_6 0 +7 7 str_7 0 +8 8 str_8 0 +9 9 \N 0 +10 10 \N 0 +11 11 \N 0 +12 12 12 0 +13 13 str_13 0 +14 14 \N 0 +insert nested dynamic +0 0 \N 0 [] [] [] +1 1 \N 0 [] [] [] +2 2 \N 0 [] [] [] +3 3 3 0 [] [] [] +4 4 4 0 [] [] [] +5 5 5 0 [] [] [] +6 6 str_6 0 [] [] [] +7 7 str_7 0 [] [] [] +8 8 str_8 0 [] [] [] +9 9 \N 0 [] [] [] +10 10 \N 0 [] [] [] +11 11 \N 0 [] [] [] +12 12 12 0 [] [] [] +13 13 str_13 0 [] [] [] +14 14 \N 0 [] [] [] +15 15 [15] 0 [15] [NULL] [NULL] +16 16 ['str_16'] 0 [NULL] ['str_16'] [NULL] +17 17 [17] 0 [17] [NULL] [NULL] +alter rename column 2 +0 0 \N 0 [] [] [] +1 1 \N 0 [] [] [] +2 2 \N 0 [] [] [] +3 3 3 0 [] [] [] +4 4 4 0 [] [] [] +5 5 5 0 [] [] [] +6 6 str_6 0 [] [] [] +7 7 str_7 0 [] [] [] +8 8 str_8 0 [] [] [] +9 9 \N 0 [] [] [] +10 10 \N 0 [] [] [] +11 11 \N 0 [] [] [] +12 12 12 0 [] [] [] +13 13 str_13 0 [] [] [] +14 14 \N 0 [] [] [] +15 15 [15] 0 [15] [NULL] [NULL] +16 16 ['str_16'] 0 [NULL] ['str_16'] [NULL] +17 17 [17] 0 [17] [NULL] [NULL] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.on.reference new file mode 100644 index 000000000000..28db8876fb0c --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03040_dynamic_type_alters_2_wide_merge_tree.on.reference @@ -0,0 +1,75 @@ +initial insert +alter add column +0 0 \N \N +1 1 \N \N +2 2 \N \N +insert after alter add column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +alter rename column 1 +0 0 \N \N +1 1 \N \N +2 2 \N \N +3 3 3 \N +4 4 4 \N +5 5 5 \N +6 6 str_6 \N +7 7 str_7 \N +8 8 str_8 \N +9 9 \N \N +10 10 \N \N +11 11 \N \N +12 12 12 \N +13 13 str_13 \N +14 14 \N \N +insert nested dynamic +0 0 \N \N [] [] [] +1 1 \N \N [] [] [] +2 2 \N \N [] [] [] +3 3 3 \N [] [] [] +4 4 4 \N [] [] [] +5 5 5 \N [] [] [] +6 6 str_6 \N [] [] [] +7 7 str_7 \N [] [] [] +8 8 str_8 \N [] [] [] +9 9 \N \N [] [] [] +10 10 \N \N [] [] [] +11 11 \N \N [] [] [] +12 12 12 \N [] [] [] +13 13 str_13 \N [] [] [] +14 14 \N \N [] [] [] +15 15 [15] \N [15] [NULL] [NULL] +16 16 ['str_16'] \N [NULL] ['str_16'] [NULL] +17 17 [17] \N [17] [NULL] [NULL] +alter rename column 2 +0 0 \N \N [] [] [] +1 1 \N \N [] [] [] +2 2 \N \N [] [] [] +3 3 3 \N [] [] [] +4 4 4 \N [] [] [] +5 5 5 \N [] [] [] +6 6 str_6 \N [] [] [] +7 7 str_7 \N [] [] [] +8 8 str_8 \N [] [] [] +9 9 \N \N [] [] [] +10 10 \N \N [] [] [] +11 11 \N \N [] [] [] +12 12 12 \N [] [] [] +13 13 str_13 \N [] [] [] +14 14 \N \N [] [] [] +15 15 [15] \N [15] [NULL] [NULL] +16 16 ['str_16'] \N [NULL] ['str_16'] [NULL] +17 17 [17] \N [17] [NULL] [NULL] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.off.reference new file mode 100644 index 000000000000..0dab4ea0d207 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.off.reference @@ -0,0 +1,56 @@ +MergeTree compact +initial insert +alter add column +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +check table +1 +MergeTree wide +initial insert +alter add column +3 None +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 +insert after alter add column +4 String +4 UInt64 +7 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +check table +1 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.on.reference new file mode 100644 index 000000000000..b1ea186a9171 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03041_dynamic_type_check_table.on.reference @@ -0,0 +1,56 @@ +MergeTree compact +initial insert +alter add column +3 None +0 0 \N \N \N \N +1 1 \N \N \N \N +2 2 \N \N \N \N +insert after alter add column +4 String +4 UInt64 +7 None +0 0 \N \N \N \N \N +1 1 \N \N \N \N \N +2 2 \N \N \N \N \N +3 3 3 \N 3 \N \N +4 4 4 \N 4 \N \N +5 5 5 \N 5 \N \N +6 6 str_6 str_6 \N \N \N +7 7 str_7 str_7 \N \N \N +8 8 str_8 str_8 \N \N \N +9 9 \N \N \N \N \N +10 10 \N \N \N \N \N +11 11 \N \N \N \N \N +12 12 12 \N 12 \N \N +13 13 str_13 str_13 \N \N \N +14 14 \N \N \N \N \N +check table +1 +MergeTree wide +initial insert +alter add column +3 None +0 0 \N \N \N \N +1 1 \N \N \N \N +2 2 \N \N \N \N +insert after alter add column +4 String +4 UInt64 +7 None +0 0 \N \N \N \N \N +1 1 \N \N \N \N \N +2 2 \N \N \N \N \N +3 3 3 \N 3 \N \N +4 4 4 \N 4 \N \N +5 5 5 \N 5 \N \N +6 6 str_6 str_6 \N \N \N +7 7 str_7 str_7 \N \N \N +8 8 str_8 str_8 \N \N \N +9 9 \N \N \N \N \N +10 10 \N \N \N \N \N +11 11 \N \N \N \N \N +12 12 12 \N 12 \N \N +13 13 str_13 str_13 \N \N \N +14 14 \N \N \N \N \N +check table +1 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.off.reference new file mode 100644 index 000000000000..45861bf4d8d8 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.off.reference @@ -0,0 +1,4 @@ + ┌─dynamicType(d)──────────────┬─d─────────────────────────────────────────┬─d.Nested(x U⋯ Dynamic).x─┬─d.Nested(x UInt32, y Dynamic).y───┬─dynamicType(a⋯namic).y, 1))─┬─d.Nested(x U⋯c).y.String─┬─d.Nested(x UInt3⋯, Array(String))─┐ +1. │ Nested(x UInt32, y Dynamic) │ [(1,'aa'),(2,'bb')] │ [1,2] │ ['aa','bb'] │ String │ ['aa','bb'] │ [(0,[]),(0,[])] │ +2. │ Nested(x UInt32, y Dynamic) │ [(1,(2,['aa','bb'])),(5,(6,['ee','ff']))] │ [1,5] │ [(2,['aa','bb']),(6,['ee','ff'])] │ Tuple(Int64, Array(String)) │ [NULL,NULL] │ [(2,['aa','bb']),(6,['ee','ff'])] │ + └─────────────────────────────┴───────────────────────────────────────────┴──────────────────────────┴───────────────────────────────────┴─────────────────────────────┴──────────────────────────┴───────────────────────────────────┘ diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.on.reference new file mode 100644 index 000000000000..327b065ea9b0 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03162_dynamic_type_nested.on.reference @@ -0,0 +1,4 @@ + ┌─dynamicType(d)──────────────┬─d─────────────────────────────────────────┬─d.Nested(x U⋯ Dynamic).x─┬─d.Nested(x UInt32, y Dynamic).y───┬─dynamicType(a⋯namic).y, 1))─┬─d.Nested(x U⋯c).y.String─┬─d.Nested(x UInt3⋯, Array(String))─┐ +1. │ Nested(x UInt32, y Dynamic) │ [(1,'aa'),(2,'bb')] │ [1,2] │ ['aa','bb'] │ String │ ['aa','bb'] │ [NULL,NULL] │ +2. │ Nested(x UInt32, y Dynamic) │ [(1,(2,['aa','bb'])),(5,(6,['ee','ff']))] │ [1,5] │ [(2,['aa','bb']),(6,['ee','ff'])] │ Tuple(Int64, Array(String)) │ [NULL,NULL] │ [(2,['aa','bb']),(6,['ee','ff'])] │ + └─────────────────────────────┴───────────────────────────────────────────┴──────────────────────────┴───────────────────────────────────┴─────────────────────────────┴──────────────────────────┴───────────────────────────────────┘ diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.off.reference new file mode 100644 index 000000000000..bb97cee11ea0 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.off.reference @@ -0,0 +1,39 @@ +--- +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +1 Nullable(UInt32) +\N Nullable(UInt32) +1 Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +--- +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +1 Nullable(UInt32) +\N Nullable(UInt32) +1 Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +--- +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +1 Nullable(UInt32) +\N Nullable(UInt32) +1 Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.on.reference new file mode 100644 index 000000000000..3925aef4a948 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03290_nullable_json.on.reference @@ -0,0 +1,39 @@ +--- +(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +1 Nullable(UInt32) +\N Nullable(UInt32) +1 Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +--- +(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +1 Nullable(UInt32) +\N Nullable(UInt32) +1 Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +--- +(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +1 Nullable(UInt32) +\N Nullable(UInt32) +1 Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) +\N Nullable(UInt32) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.off.reference new file mode 100644 index 000000000000..7bb1758fd6be --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.off.reference @@ -0,0 +1,2 @@ +['v.variant_discr','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Ea','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Eb'] +['v.variant_discr','v.Tuple(a UInt32, b UInt32)%2Ea','v.Tuple(a UInt32, b UInt32)%2Eb'] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.on.reference new file mode 100644 index 000000000000..3e7ba3ec7ff3 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03369_variant_escape_filename_merge_tree.on.reference @@ -0,0 +1,2 @@ +['v.variant_discr','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Ea','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Eb',''] +['v.variant_discr','v.Tuple(a UInt32, b UInt32)%2Ea','v.Tuple(a UInt32, b UInt32)%2Eb',''] diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.off.reference new file mode 100644 index 000000000000..bb5e6213fbe8 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.off.reference @@ -0,0 +1,28 @@ +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Nullable(UInt64) 5 +Nullable(UInt64) \N +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Nullable(UInt64) 5 +Nullable(UInt64) \N +Tuple(Nullable(UInt64), Nullable(String)) (1,'x') +Tuple(Nullable(UInt64), Nullable(String)) (NULL,NULL) +Nullable(UInt64) 10 +Nullable(UInt64) \N +Tuple(Nullable(UInt64), Nullable(String)) (1,'x') +Tuple(Nullable(UInt64), Nullable(String)) (NULL,NULL) +Nullable(UInt64) 10 +Nullable(UInt64) \N +LowCardinality(Nullable(String)) x +LowCardinality(Nullable(String)) x +LowCardinality(Nullable(String)) x +LowCardinality(Nullable(String)) x diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.on.reference new file mode 100644 index 000000000000..3eeab4e173f7 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03913_tuple_inside_nullable_subcolumns.on.reference @@ -0,0 +1,28 @@ +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) \N +Nullable(UInt64) 5 +Nullable(UInt64) \N +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) \N +Nullable(UInt64) 5 +Nullable(UInt64) \N +Nullable(Tuple(Nullable(UInt64), Nullable(String))) (1,'x') +Nullable(Tuple(Nullable(UInt64), Nullable(String))) \N +Nullable(UInt64) 10 +Nullable(UInt64) \N +Nullable(Tuple(Nullable(UInt64), Nullable(String))) (1,'x') +Nullable(Tuple(Nullable(UInt64), Nullable(String))) \N +Nullable(UInt64) 10 +Nullable(UInt64) \N +LowCardinality(Nullable(String)) x +LowCardinality(Nullable(String)) x +LowCardinality(Nullable(String)) x +LowCardinality(Nullable(String)) x diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.off.reference new file mode 100644 index 000000000000..1d4e6edc5cf9 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.off.reference @@ -0,0 +1,8 @@ +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.on.reference new file mode 100644 index 000000000000..452b0139ddab --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03915_tuple_inside_nullable_variant_dynamic_element.on.reference @@ -0,0 +1,8 @@ +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) (1,'x') +Nullable(Tuple(UInt64, String)) \N +Nullable(Tuple(UInt64, String)) (1,'x') diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.off.reference new file mode 100644 index 000000000000..807737157d4b --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.off.reference @@ -0,0 +1,8 @@ +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.on.reference new file mode 100644 index 000000000000..7968a9e5314f --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03916_tuple_inside_nullable_json_subcolumns.on.reference @@ -0,0 +1,8 @@ +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.off.reference new file mode 100644 index 000000000000..67af267f1621 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.off.reference @@ -0,0 +1,4 @@ +Tuple(\n x UInt32,\n y String) (1,'aa') Nullable(UInt32) 1 Nullable(String) aa +Tuple(\n x UInt32,\n y String) (0,'') Nullable(UInt32) \N Nullable(String) \N +Tuple(\n x UInt32,\n y String) (1,'aa') UInt32 1 String aa +Tuple(\n x UInt32,\n y String) (0,'') UInt32 0 String diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.on.reference new file mode 100644 index 000000000000..89ce3d018982 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/03917_tuple_inside_nullable_tuple_subcolumns.on.reference @@ -0,0 +1,4 @@ +Nullable(Tuple(x UInt32, y String)) (1,'aa') Nullable(UInt32) 1 Nullable(String) aa +Nullable(Tuple(x UInt32, y String)) \N Nullable(UInt32) \N Nullable(String) \N +Nullable(Tuple(x UInt32, y String)) (1,'aa') Nullable(UInt32) 1 Nullable(String) aa +Nullable(Tuple(x UInt32, y String)) \N Nullable(UInt32) \N Nullable(String) \N diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_off_only.off.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_off_only.off.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_on_only.on.reference b/tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_on_only.on.reference new file mode 100644 index 000000000000..6799396b3f3a --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/expected/special/03913_tuple_inside_nullable_subcolumns_on_only.on.reference @@ -0,0 +1,4 @@ +UInt8 1 +UInt8 1 +UInt8 1 +UInt8 1 diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/02731_analyzer_join_resolve_nested.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/02731_analyzer_join_resolve_nested.sql new file mode 100644 index 000000000000..a1ad540053db --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/02731_analyzer_join_resolve_nested.sql @@ -0,0 +1,44 @@ +-- Tuple-related queries from tests/queries/0_stateless/02731_analyzer_join_resolve_nested.sql.j2. + +DROP TABLE IF EXISTS ttta; +DROP TABLE IF EXISTS tttb; + +CREATE TABLE ttta +( + x Int32, + t Tuple(t Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String), s String) +) ENGINE = MergeTree ORDER BY x; + +INSERT INTO ttta VALUES + (1, ((((1, 's'), 's'), 's'), 's')), + (2, ((((2, 's'), 's'), 's'), 's')); + +CREATE TABLE tttb +( + x Int32, + t Tuple(t Tuple(t Tuple(t Tuple(t Int32, s String), s String), s String), s String) +) ENGINE = MergeTree ORDER BY x; + +INSERT INTO tttb VALUES + (2, ((((2, 's'), 's'), 's'), 's')), + (3, ((((3, 's'), 's'), 's'), 's')); + +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +SET join_use_nulls = 0; + +SELECT t.*, t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); +SELECT t.t.*, t.t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); +SELECT t.t.t.*, t.t.t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); +SELECT t.t.t.t.*, t.t.t.t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); + +SET join_use_nulls = 1; + +SELECT t.*, t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); +SELECT t.t.*, t.t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); +SELECT t.t.t.*, t.t.t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); +SELECT t.t.t.t.*, t.t.t.t.* APPLY toTypeName FROM ttta FULL JOIN tttb USING (t); + +DROP TABLE IF EXISTS ttta; +DROP TABLE IF EXISTS tttb; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/02940_variant_text_deserialization.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/02940_variant_text_deserialization.sql new file mode 100644 index 000000000000..a6601e12bb4e --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/02940_variant_text_deserialization.sql @@ -0,0 +1,9 @@ +-- Tuple-related queries from tests/queries/0_stateless/02940_variant_text_deserialization.sql. + +SET allow_experimental_variant_type = 1; +SET allow_suspicious_variant_types = 1; +SET session_timezone = 'UTC'; + +SELECT 'Tuple'; +SELECT v, variantElement(v, 'Tuple(a UInt64, b UInt64)') FROM format(JSONEachRow, 'v Variant(String, Tuple(a UInt64, b UInt64))', '{"v" : null}, {"v" : "string"}, {"v" : {"a" : 42, "b" : null}}, {"v" : {"a" : 44, "d" : 32}}') FORMAT JSONEachRow; +SELECT v, variantElement(v, 'Tuple(a UInt64, b UInt64)') FROM format(JSONEachRow, 'v Variant(String, Tuple(a UInt64, b UInt64))', '{"v" : null}, {"v" : "string"}, {"v" : {"a" : 42, "b" : null}}, {"v" : {"a" : 44, "d" : 32}}') SETTINGS input_format_json_defaults_for_missing_elements_in_named_tuple=0; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/02941_variant_type_1.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/02941_variant_type_1.sql new file mode 100644 index 000000000000..e914b2c67391 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/02941_variant_type_1.sql @@ -0,0 +1,59 @@ +-- Tuple-related queries from tests/queries/0_stateless/02941_variant_type_1.sh. + +SET allow_experimental_variant_type = 1; +SET allow_suspicious_variant_types = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + id UInt64, + v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64)) +) ENGINE = Memory; + +INSERT INTO test SELECT number, NULL FROM numbers(3); +INSERT INTO test SELECT number + 3, number FROM numbers(3); +INSERT INTO test SELECT number + 6, ('str_' || toString(number))::Variant(String) FROM numbers(3); +INSERT INTO test SELECT number + 9, ('lc_str_' || toString(number))::LowCardinality(String) FROM numbers(3); +INSERT INTO test SELECT number + 12, tuple(number, number + 1)::Tuple(a UInt32, b UInt32) FROM numbers(3); +INSERT INTO test SELECT number + 15, range(number + 1)::Array(UInt64) FROM numbers(3); + +SELECT 'test1'; +SELECT v.`Tuple(a UInt32, b UInt32)` FROM test ORDER BY id; +SELECT v.`Tuple(a UInt32, b UInt32)`.a FROM test ORDER BY id; +SELECT v.`Tuple(a UInt32, b UInt32)`.b FROM test ORDER BY id; + +TRUNCATE TABLE test; + +INSERT INTO test SELECT number, NULL FROM numbers(3); +INSERT INTO test SELECT number + 3, number % 2 ? NULL : number FROM numbers(3); +INSERT INTO test SELECT number + 6, number % 2 ? NULL : ('str_' || toString(number))::Variant(String) FROM numbers(3); +INSERT INTO test SELECT number + 9, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(('lc_str_' || toString(number))::LowCardinality(String), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') FROM numbers(3); +INSERT INTO test SELECT number + 12, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(tuple(number, number + 1)::Tuple(a UInt32, b UInt32), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') FROM numbers(3); +INSERT INTO test SELECT number + 15, number % 2 ? CAST(NULL, 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') : CAST(range(number + 1)::Array(UInt64), 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))') FROM numbers(3); + +SELECT 'test2'; +SELECT v.`Tuple(a UInt32, b UInt32)` FROM test ORDER BY id; +SELECT v.`Tuple(a UInt32, b UInt32)`.a FROM test ORDER BY id; +SELECT v.`Tuple(a UInt32, b UInt32)`.b FROM test ORDER BY id; + +TRUNCATE TABLE test; + +INSERT INTO test WITH 'Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))' AS type +SELECT + number, + multiIf( + number % 6 == 0, CAST(NULL, type), + number % 6 == 1, CAST(('str_' || toString(number))::Variant(String), type), + number % 6 == 2, CAST(number, type), + number % 6 == 3, CAST(('lc_str_' || toString(number))::LowCardinality(String), type), + number % 6 == 4, CAST(tuple(number, number + 1)::Tuple(a UInt32, b UInt32), type), + CAST(range(number + 1)::Array(UInt64), type) + ) AS res +FROM numbers(18); + +SELECT 'test3'; +SELECT v.`Tuple(a UInt32, b UInt32)` FROM test ORDER BY id; +SELECT v.`Tuple(a UInt32, b UInt32)`.a FROM test ORDER BY id; +SELECT v.`Tuple(a UInt32, b UInt32)`.b FROM test ORDER BY id; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_shared_subcolumns_small.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_shared_subcolumns_small.sql new file mode 100644 index 000000000000..76623264117e --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_shared_subcolumns_small.sql @@ -0,0 +1,78 @@ +-- Tuple-related queries from tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2. + +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; +SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; + +DROP TABLE IF EXISTS test; + +SELECT 'Memory'; +CREATE TABLE test +( + id UInt64, + d Dynamic(max_types=2) +) +ENGINE = Memory; + +INSERT INTO test SELECT number, number FROM numbers(10); +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10, 10); +INSERT INTO test SELECT number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) FROM numbers(20, 10); +INSERT INTO test SELECT number, NULL FROM numbers(30, 10); +INSERT INTO test SELECT number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) FROM numbers(40, 40); +INSERT INTO test SELECT number, if(number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) FROM numbers(10, 10); +INSERT INTO test SELECT number, if(number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) FROM numbers(10, 10); + +SELECT count() FROM test WHERE not empty(d.`Tuple(a Array(Dynamic))`.a.String); +SELECT d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 FROM test ORDER BY id, d; +SELECT d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a FROM test ORDER BY id, d; + +DROP TABLE test; + +SELECT 'MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000'; +CREATE TABLE test +( + id UInt64, + d Dynamic(max_types=2) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_rows_for_wide_part = 1000000000, min_bytes_for_wide_part = 10000000000; + +INSERT INTO test SELECT number, number FROM numbers(10); +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10, 10); +INSERT INTO test SELECT number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) FROM numbers(20, 10); +INSERT INTO test SELECT number, NULL FROM numbers(30, 10); +INSERT INTO test SELECT number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) FROM numbers(40, 40); +INSERT INTO test SELECT number, if(number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) FROM numbers(10, 10); +INSERT INTO test SELECT number, if(number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) FROM numbers(10, 10); + +SELECT count() FROM test WHERE not empty(d.`Tuple(a Array(Dynamic))`.a.String); +SELECT d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 FROM test ORDER BY id, d; +SELECT d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a FROM test ORDER BY id, d; + +DROP TABLE test; + +SELECT 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1'; +CREATE TABLE test +( + id UInt64, + d Dynamic(max_types=2) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +INSERT INTO test SELECT number, number FROM numbers(10); +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10, 10); +INSERT INTO test SELECT number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) FROM numbers(20, 10); +INSERT INTO test SELECT number, NULL FROM numbers(30, 10); +INSERT INTO test SELECT number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) FROM numbers(40, 40); +INSERT INTO test SELECT number, if(number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) FROM numbers(10, 10); +INSERT INTO test SELECT number, if(number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) FROM numbers(10, 10); + +SELECT count() FROM test WHERE not empty(d.`Tuple(a Array(Dynamic))`.a.String); +SELECT d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 FROM test ORDER BY id, d; +SELECT d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a FROM test ORDER BY id, d; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_subcolumns_small.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_subcolumns_small.sql new file mode 100644 index 000000000000..cd6996f4a779 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03036_dynamic_read_subcolumns_small.sql @@ -0,0 +1,75 @@ +-- Tuple-related queries from tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2. + +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; +SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; + +DROP TABLE IF EXISTS test; + +SELECT 'Memory'; +CREATE TABLE test +( + id UInt64, + d Dynamic +) +ENGINE = Memory; + +INSERT INTO test SELECT number, number FROM numbers(10); +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10, 10); +INSERT INTO test SELECT number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) FROM numbers(20, 10); +INSERT INTO test SELECT number, NULL FROM numbers(30, 10); +INSERT INTO test SELECT number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) FROM numbers(40, 40); +INSERT INTO test SELECT number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) FROM numbers(10, 10); + +SELECT count() FROM test WHERE not empty(d.`Tuple(a Array(Dynamic))`.a.String); +SELECT d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 FROM test ORDER BY id, d; +SELECT d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a FROM test ORDER BY id, d; + +DROP TABLE test; + +SELECT 'MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000'; +CREATE TABLE test +( + id UInt64, + d Dynamic +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_rows_for_wide_part = 1000000000, min_bytes_for_wide_part = 10000000000; + +INSERT INTO test SELECT number, number FROM numbers(10); +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10, 10); +INSERT INTO test SELECT number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) FROM numbers(20, 10); +INSERT INTO test SELECT number, NULL FROM numbers(30, 10); +INSERT INTO test SELECT number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) FROM numbers(40, 40); +INSERT INTO test SELECT number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) FROM numbers(10, 10); + +SELECT count() FROM test WHERE not empty(d.`Tuple(a Array(Dynamic))`.a.String); +SELECT d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 FROM test ORDER BY id, d; +SELECT d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a FROM test ORDER BY id, d; + +DROP TABLE test; + +SELECT 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1'; +CREATE TABLE test +( + id UInt64, + d Dynamic +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +INSERT INTO test SELECT number, number FROM numbers(10); +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10, 10); +INSERT INTO test SELECT number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) FROM numbers(20, 10); +INSERT INTO test SELECT number, NULL FROM numbers(30, 10); +INSERT INTO test SELECT number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) FROM numbers(40, 40); +INSERT INTO test SELECT number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) FROM numbers(10, 10); + +SELECT count() FROM test WHERE not empty(d.`Tuple(a Array(Dynamic))`.a.String); +SELECT d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 FROM test ORDER BY id, d; +SELECT d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a FROM test ORDER BY id, d; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_compact_merge_tree.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_compact_merge_tree.sql new file mode 100644 index 000000000000..288424258bf9 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_compact_merge_tree.sql @@ -0,0 +1,55 @@ +-- Tuple-related queries from tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql. + +SET allow_experimental_dynamic_type = 1; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY x +SETTINGS min_rows_for_wide_part = 100000000, min_bytes_for_wide_part = 1000000000; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column 1'; +ALTER TABLE test ADD COLUMN d Dynamic(max_types = 3) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column 1'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 1'; +ALTER TABLE test MODIFY COLUMN d Dynamic(max_types = 0) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 1'; +INSERT INTO test SELECT number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) FROM numbers(15, 4); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 2'; +ALTER TABLE test MODIFY COLUMN d Dynamic(max_types = 2) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 2'; +INSERT INTO test SELECT number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) FROM numbers(19, 4); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 3'; +ALTER TABLE test MODIFY COLUMN y Dynamic SETTINGS mutations_sync = 1; +SELECT x, y, y.`Tuple(a UInt64)`.a, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 3'; +INSERT INTO test SELECT number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL FROM numbers(23, 3); +SELECT x, y, y.`Tuple(a UInt64)`.a, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_memory.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_memory.sql new file mode 100644 index 000000000000..22be698767cb --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_memory.sql @@ -0,0 +1,53 @@ +-- Tuple-related queries from tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.sql. + +SET allow_experimental_dynamic_type = 1; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = Memory; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column 1'; +ALTER TABLE test ADD COLUMN d Dynamic(max_types = 3) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column 1'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 1'; +ALTER TABLE test MODIFY COLUMN d Dynamic(max_types = 1) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 1'; +INSERT INTO test SELECT number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) FROM numbers(15, 4); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 2'; +ALTER TABLE test MODIFY COLUMN d Dynamic(max_types = 3) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 2'; +INSERT INTO test SELECT number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) FROM numbers(19, 4); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 3'; +ALTER TABLE test MODIFY COLUMN y Dynamic SETTINGS mutations_sync = 1; +SELECT x, y, y.`Tuple(a UInt64)`.a, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 3'; +INSERT INTO test SELECT number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL FROM numbers(23, 3); +SELECT x, y, y.`Tuple(a UInt64)`.a, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_wide_merge_tree.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_wide_merge_tree.sql new file mode 100644 index 000000000000..3d6ee5ba0087 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_1_wide_merge_tree.sql @@ -0,0 +1,55 @@ +-- Tuple-related queries from tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.sql. + +SET allow_experimental_dynamic_type = 1; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY x +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column 1'; +ALTER TABLE test ADD COLUMN d Dynamic(max_types = 3) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column 1'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 1'; +ALTER TABLE test MODIFY COLUMN d Dynamic(max_types = 1) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 1'; +INSERT INTO test SELECT number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) FROM numbers(15, 4); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 2'; +ALTER TABLE test MODIFY COLUMN d Dynamic(max_types = 3) SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 2'; +INSERT INTO test SELECT number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) FROM numbers(19, 4); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter modify column 3'; +ALTER TABLE test MODIFY COLUMN y Dynamic SETTINGS mutations_sync = 1; +SELECT x, y, y.`Tuple(a UInt64)`.a, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter modify column 3'; +INSERT INTO test SELECT number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL FROM numbers(23, 3); +SELECT x, y, y.`Tuple(a UInt64)`.a, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_compact_merge_tree.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_compact_merge_tree.sql new file mode 100644 index 000000000000..6d7dbcce9768 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_compact_merge_tree.sql @@ -0,0 +1,43 @@ +-- Tuple-related queries from tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.sql. + +SET allow_experimental_dynamic_type = 1; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY x +SETTINGS min_rows_for_wide_part = 100000000, min_bytes_for_wide_part = 1000000000; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column'; +ALTER TABLE test ADD COLUMN d Dynamic SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column 1'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter rename column 1'; +ALTER TABLE test RENAME COLUMN d TO d1 SETTINGS mutations_sync = 1; +SELECT x, y, d1, d1.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert nested dynamic'; +INSERT INTO test SELECT number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) FROM numbers(15, 3); +SELECT x, y, d1, d1.`Tuple(a UInt64)`.a, d1.`Array(Dynamic)`.UInt64, d1.`Array(Dynamic)`.String, d1.`Array(Dynamic)`.Date FROM test ORDER BY x; + +SELECT 'alter rename column 2'; +ALTER TABLE test RENAME COLUMN d1 TO d2 SETTINGS mutations_sync = 1; +SELECT x, y, d2, d2.`Tuple(a UInt64)`.a, d2.`Array(Dynamic)`.UInt64, d2.`Array(Dynamic)`.String, d2.`Array(Dynamic)`.Date FROM test ORDER BY x; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_wide_merge_tree.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_wide_merge_tree.sql new file mode 100644 index 000000000000..a03ada8e1656 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03040_dynamic_type_alters_2_wide_merge_tree.sql @@ -0,0 +1,45 @@ +-- Tuple-related queries from tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.sql. + +SET allow_experimental_dynamic_type = 1; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY x +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column'; +ALTER TABLE test ADD COLUMN d Dynamic SETTINGS mutations_sync = 1; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column 1'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +OPTIMIZE TABLE test FINAL; +SELECT x, y, d, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'alter rename column 1'; +ALTER TABLE test RENAME COLUMN d TO d1 SETTINGS mutations_sync = 1; +SELECT x, y, d1, d1.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert nested dynamic'; +INSERT INTO test SELECT number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) FROM numbers(15, 3); +OPTIMIZE TABLE test FINAL; +SELECT x, y, d1, d1.`Tuple(a UInt64)`.a, d1.`Array(Dynamic)`.UInt64, d1.`Array(Dynamic)`.String, d1.`Array(Dynamic)`.Date FROM test ORDER BY x; + +SELECT 'alter rename column 2'; +ALTER TABLE test RENAME COLUMN d1 TO d2 SETTINGS mutations_sync = 1; +SELECT x, y, d2, d2.`Tuple(a UInt64)`.a, d2.`Array(Dynamic)`.UInt64, d2.`Array(Dynamic)`.String, d2.`Array(Dynamic)`.Date FROM test ORDER BY x; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03041_dynamic_type_check_table.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03041_dynamic_type_check_table.sql new file mode 100644 index 000000000000..e860bab8240f --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03041_dynamic_type_check_table.sql @@ -0,0 +1,69 @@ +-- Tuple-related queries from tests/queries/0_stateless/03041_dynamic_type_check_table.sh. + +SET allow_experimental_dynamic_type = 1; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; + +DROP TABLE IF EXISTS test; + +SELECT 'MergeTree compact'; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY x +SETTINGS min_rows_for_wide_part = 100000000, min_bytes_for_wide_part = 1000000000; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column'; +ALTER TABLE test ADD COLUMN d Dynamic(max_types = 2) SETTINGS mutations_sync = 1; +SELECT count(), dynamicType(d) FROM test GROUP BY dynamicType(d) ORDER BY count(), dynamicType(d); +SELECT x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +SELECT count(), dynamicType(d) FROM test GROUP BY dynamicType(d) ORDER BY count(), dynamicType(d); +SELECT x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'check table'; +CHECK TABLE test SETTINGS check_query_single_value_result = 1; + +DROP TABLE test; + +SELECT 'MergeTree wide'; +CREATE TABLE test +( + x UInt64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY x +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +SELECT 'initial insert'; +INSERT INTO test SELECT number, number FROM numbers(3); + +SELECT 'alter add column'; +ALTER TABLE test ADD COLUMN d Dynamic(max_types = 2) SETTINGS mutations_sync = 1; +SELECT count(), dynamicType(d) FROM test GROUP BY dynamicType(d) ORDER BY count(), dynamicType(d); +SELECT x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'insert after alter add column'; +INSERT INTO test SELECT number, number, number FROM numbers(3, 3); +INSERT INTO test SELECT number, number, 'str_' || toString(number) FROM numbers(6, 3); +INSERT INTO test SELECT number, number, NULL FROM numbers(9, 3); +INSERT INTO test SELECT number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) FROM numbers(12, 3); +SELECT count(), dynamicType(d) FROM test GROUP BY dynamicType(d) ORDER BY count(), dynamicType(d); +SELECT x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a FROM test ORDER BY x; + +SELECT 'check table'; +CHECK TABLE test SETTINGS check_query_single_value_result = 1; + +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03162_dynamic_type_nested.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03162_dynamic_type_nested.sql new file mode 100644 index 000000000000..a439bc861565 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03162_dynamic_type_nested.sql @@ -0,0 +1,25 @@ +-- Tuple-related queries from tests/queries/0_stateless/03162_dynamic_type_nested.sql. + +SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; +SET output_format_pretty_named_tuples_as_json = 0; + +DROP TABLE IF EXISTS t; +CREATE TABLE t (d Dynamic) ENGINE = Memory; + +INSERT INTO t VALUES ([(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y Dynamic)); +INSERT INTO t VALUES ([(1, (2, ['aa', 'bb'])), (5, (6, ['ee', 'ff']))]::Nested(x UInt32, y Dynamic)); + +SELECT + dynamicType(d), + d, + d.`Nested(x UInt32, y Dynamic)`.x, + d.`Nested(x UInt32, y Dynamic)`.y, + dynamicType(d.`Nested(x UInt32, y Dynamic)`.y[1]), + d.`Nested(x UInt32, y Dynamic)`.y.`String`, + d.`Nested(x UInt32, y Dynamic)`.y.`Tuple(Int64, Array(String))` +FROM t +ORDER BY d +FORMAT PrettyCompactMonoBlock; + +DROP TABLE t; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03290_nullable_json.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03290_nullable_json.sql new file mode 100644 index 000000000000..abc44d4c8e6f --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03290_nullable_json.sql @@ -0,0 +1,29 @@ +-- Tuple-related queries from tests/queries/0_stateless/03290_nullable_json.sql.j2. + +SET enable_json_type = 1; + +DROP TABLE IF EXISTS test; + +SELECT '---'; +CREATE TABLE test (json Nullable(JSON(a UInt32, b Array(UInt32), c Nullable(UInt32), d Tuple(e UInt32, f Nullable(UInt32))))) ENGINE=Memory; +INSERT INTO test SELECT number % 2 ? NULL : '{"a" : 1, "b" : [1, 2, 3], "c" : null, "d" : {"e" : 1, "f" : null}, "x" : 42, "y" : [1, 2, 3]}' FROM numbers(4); +SELECT json.d AS path, toTypeName(path) FROM test; +SELECT json.d.e AS path, toTypeName(path) FROM test; +SELECT json.d.f AS path, toTypeName(path) FROM test; +DROP TABLE test; + +SELECT '---'; +CREATE TABLE test (json Nullable(JSON(a UInt32, b Array(UInt32), c Nullable(UInt32), d Tuple(e UInt32, f Nullable(UInt32))))) ENGINE=MergeTree ORDER BY tuple() SETTINGS min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000; +INSERT INTO test SELECT number % 2 ? NULL : '{"a" : 1, "b" : [1, 2, 3], "c" : null, "d" : {"e" : 1, "f" : null}, "x" : 42, "y" : [1, 2, 3]}' FROM numbers(4); +SELECT json.d AS path, toTypeName(path) FROM test; +SELECT json.d.e AS path, toTypeName(path) FROM test; +SELECT json.d.f AS path, toTypeName(path) FROM test; +DROP TABLE test; + +SELECT '---'; +CREATE TABLE test (json Nullable(JSON(a UInt32, b Array(UInt32), c Nullable(UInt32), d Tuple(e UInt32, f Nullable(UInt32))))) ENGINE=MergeTree ORDER BY tuple() SETTINGS min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +INSERT INTO test SELECT number % 2 ? NULL : '{"a" : 1, "b" : [1, 2, 3], "c" : null, "d" : {"e" : 1, "f" : null}, "x" : 42, "y" : [1, 2, 3]}' FROM numbers(4); +SELECT json.d AS path, toTypeName(path) FROM test; +SELECT json.d.e AS path, toTypeName(path) FROM test; +SELECT json.d.f AS path, toTypeName(path) FROM test; +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03369_variant_escape_filename_merge_tree.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03369_variant_escape_filename_merge_tree.sql new file mode 100644 index 000000000000..7165df8b85ec --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03369_variant_escape_filename_merge_tree.sql @@ -0,0 +1,34 @@ +-- Tuple-related queries from tests/queries/0_stateless/03369_variant_escape_filename_merge_tree.sql. + +SET enable_variant_type = 1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + v Variant(Tuple(a UInt32, b UInt32)) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS + min_rows_for_wide_part = 0, + min_bytes_for_wide_part = 0, + escape_variant_subcolumn_filenames = 1, + replace_long_file_name_to_hash = 0; +INSERT INTO test SELECT tuple(1, 2)::Tuple(a UInt32, b UInt32); +SELECT filenames FROM system.parts_columns WHERE table = 'test' AND database = currentDatabase(); +DROP TABLE test; + +CREATE TABLE test +( + v Variant(Tuple(a UInt32, b UInt32)) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS + min_rows_for_wide_part = 0, + min_bytes_for_wide_part = 0, + escape_variant_subcolumn_filenames = 0, + replace_long_file_name_to_hash = 0; +INSERT INTO test SELECT tuple(1, 2)::Tuple(a UInt32, b UInt32); +SELECT filenames FROM system.parts_columns WHERE table = 'test' AND database = currentDatabase(); +DROP TABLE test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03913_tuple_inside_nullable_subcolumns.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03913_tuple_inside_nullable_subcolumns.sql new file mode 100644 index 000000000000..d99b9ba08c01 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03913_tuple_inside_nullable_subcolumns.sql @@ -0,0 +1,53 @@ +-- Tuple-related queries from tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.sql. + +SET enable_analyzer = 1; + +SET allow_experimental_nullable_tuple_type = 0; + +SELECT toTypeName(v.`Tuple(UInt64, String)`), v.`Tuple(UInt64, String)` FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT 42::Dynamic AS d); +SELECT toTypeName(j.c.:`Tuple(UInt64, String)`), j.c.:`Tuple(UInt64, String)` FROM (SELECT CAST('{"a":1}', 'JSON(a UInt64)') AS j); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT (1, 'x')::Tuple(UInt64, String)::Dynamic AS d); + +SET allow_experimental_nullable_tuple_type = 1; + +SELECT toTypeName(v.`Tuple(UInt64, String)`), v.`Tuple(UInt64, String)` FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT 42::Dynamic AS d); +SELECT toTypeName(j.c.:`Tuple(UInt64, String)`), j.c.:`Tuple(UInt64, String)` FROM (SELECT CAST('{"a":1}', 'JSON(a UInt64)') AS j); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT (1, 'x')::Tuple(UInt64, String)::Dynamic AS d); + +DROP TABLE IF EXISTS test_variant; +CREATE TABLE test_variant (v Variant(Tuple(UInt64, String), UInt64)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_variant VALUES (CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)')); +INSERT INTO test_variant VALUES (CAST(toUInt64(5), 'Variant(Tuple(UInt64, String), UInt64)')); + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String)'); +SELECT toTypeName(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64'); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)'); +SELECT toTypeName(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64'); +DROP TABLE test_variant; + +DROP TABLE IF EXISTS test_dynamic; +CREATE TABLE test_dynamic (d Dynamic(max_types=1)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_dynamic VALUES (CAST(toUInt64(10), 'Dynamic(max_types=1)')); +INSERT INTO test_dynamic VALUES (CAST(tuple(toUInt64(1), 'x'), 'Dynamic(max_types=1)')); + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))'); +SELECT toTypeName(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64'); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))'); +SELECT toTypeName(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64'); +DROP TABLE test_dynamic; + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'LowCardinality(String)')), getSubcolumn(v, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Variant(LowCardinality(String), UInt64) AS v); +SELECT toTypeName(getSubcolumn(d, 'LowCardinality(String)')), getSubcolumn(d, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Dynamic AS d); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'LowCardinality(String)')), getSubcolumn(v, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Variant(LowCardinality(String), UInt64) AS v); +SELECT toTypeName(getSubcolumn(d, 'LowCardinality(String)')), getSubcolumn(d, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Dynamic AS d); diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03915_tuple_inside_nullable_variant_dynamic_element.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03915_tuple_inside_nullable_variant_dynamic_element.sql new file mode 100644 index 000000000000..b0e616b4b38c --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03915_tuple_inside_nullable_variant_dynamic_element.sql @@ -0,0 +1,23 @@ +-- Tuple-related queries from tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.sql. + +SET allow_experimental_nullable_tuple_type = 0; + +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Dynamic') AS d); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Dynamic') AS d); + +SET allow_experimental_nullable_tuple_type = 1; + +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Dynamic') AS d); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Dynamic') AS d); diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03916_tuple_inside_nullable_json_subcolumns.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03916_tuple_inside_nullable_json_subcolumns.sql new file mode 100644 index 000000000000..8c0b3452429b --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03916_tuple_inside_nullable_json_subcolumns.sql @@ -0,0 +1,43 @@ +-- Tuple-related queries from tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.sql. + +SET enable_json_type = 1; + +SET allow_experimental_nullable_tuple_type = 0; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + json Nullable(JSON( + a UInt32, + b Array(UInt32), + c Nullable(UInt32), + d Tuple(e UInt32, f Nullable(UInt32)) + )) +) ENGINE = Memory; + +INSERT INTO test +SELECT NULL +FROM numbers(4); + +SELECT json.d AS path, toTypeName(path) FROM test; + +SET allow_experimental_nullable_tuple_type = 1; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + json Nullable(JSON( + a UInt32, + b Array(UInt32), + c Nullable(UInt32), + d Nullable(Tuple(e UInt32, f Nullable(UInt32))) + )) +) ENGINE = Memory; + +INSERT INTO test +SELECT NULL +FROM numbers(4); + +SELECT json.d AS path, toTypeName(path) FROM test; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/03917_tuple_inside_nullable_tuple_subcolumns.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/03917_tuple_inside_nullable_tuple_subcolumns.sql new file mode 100644 index 000000000000..b020c8227a09 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/03917_tuple_inside_nullable_tuple_subcolumns.sql @@ -0,0 +1,26 @@ +-- Tuple-related queries from tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.sql. + +SET allow_experimental_nullable_tuple_type = 1; + +DROP TABLE IF EXISTS x; +CREATE TABLE x +( + t Nullable(Tuple(a Tuple(x UInt32, y String), b String)) +) ENGINE = Memory; + +INSERT INTO x VALUES (((1, 'aa'), 'B')), (NULL); + +SELECT + toTypeName(t.a), t.a, + toTypeName(t.a.x), t.a.x, + toTypeName(t.a.y), t.a.y +FROM x; + +SELECT + toTypeName(tupleElement(t, 'a')), + tupleElement(t, 'a'), + toTypeName(tupleElement(tupleElement(t, 'a'), 'x')), + tupleElement(tupleElement(t, 'a'), 'x'), + toTypeName(tupleElement(tupleElement(t, 'a'), 'y')), + tupleElement(tupleElement(t, 'a'), 'y') +FROM x; diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_off_only.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_off_only.sql new file mode 100644 index 000000000000..aea60956fd81 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_off_only.sql @@ -0,0 +1,12 @@ +-- These tuple .null subcolumns should fail when +-- allow_nullable_tuple_in_extracted_subcolumns = 0. + +SET enable_analyzer = 1; + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM (SELECT 42::Dynamic AS d); -- { serverError ILLEGAL_COLUMN } + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM (SELECT 42::Dynamic AS d); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_on_only.sql b/tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_on_only.sql new file mode 100644 index 000000000000..e8b56059bd38 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/queries/special/03913_tuple_inside_nullable_subcolumns_on_only.sql @@ -0,0 +1,12 @@ +-- These tuple .null subcolumns should succeed when +-- allow_nullable_tuple_in_extracted_subcolumns = 1. + +SET enable_analyzer = 1; + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM (SELECT 42::Dynamic AS d); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM (SELECT 42::Dynamic AS d); diff --git a/tests/integration/test_nullable_tuple_subcolumns/test.py b/tests/integration/test_nullable_tuple_subcolumns/test.py new file mode 100644 index 000000000000..e38f065667c7 --- /dev/null +++ b/tests/integration/test_nullable_tuple_subcolumns/test.py @@ -0,0 +1,164 @@ +import difflib +from pathlib import Path + +import pytest + +from helpers.cluster import ClickHouseCluster + + +BASE_DIR = Path(__file__).resolve().parent +QUERIES_DIR = BASE_DIR / "queries" +SPECIAL_QUERIES_DIR = QUERIES_DIR / "special" +EXPECTED_DIR = BASE_DIR / "expected" +SPECIAL_EXPECTED_DIR = EXPECTED_DIR / "special" + +# Contents of this file are frm the tests/queries/0_stateless/ directory, with only tuple related queries kept. +BASE_CASES = [ + "02731_analyzer_join_resolve_nested", + "02940_variant_text_deserialization", + "02941_variant_type_1", + "03036_dynamic_read_subcolumns_small", + "03036_dynamic_read_shared_subcolumns_small", + "03040_dynamic_type_alters_1_compact_merge_tree", + "03040_dynamic_type_alters_1_memory", + "03040_dynamic_type_alters_1_wide_merge_tree", + "03040_dynamic_type_alters_2_compact_merge_tree", + "03040_dynamic_type_alters_2_wide_merge_tree", + "03041_dynamic_type_check_table", + "03162_dynamic_type_nested", + "03290_nullable_json", + "03369_variant_escape_filename_merge_tree", + "03913_tuple_inside_nullable_subcolumns", + "03915_tuple_inside_nullable_variant_dynamic_element", + "03916_tuple_inside_nullable_json_subcolumns", + "03917_tuple_inside_nullable_tuple_subcolumns", +] + +# When the setting is disabled, some queries throw errors; when it is enabled, they do not. +# So we keep them separate so we can check expected errors using trailing `serverError` in the .sql file. +SPECIAL_OFF_ONLY_CASES = [ + "03913_tuple_inside_nullable_subcolumns_off_only", +] +SPECIAL_ON_ONLY_CASES = [ + "03913_tuple_inside_nullable_subcolumns_on_only", +] + +cluster = ClickHouseCluster(__file__) +node_off = cluster.add_instance( + "node_off", + user_configs=["configs/allow_nullable_tuple_subcolumns_off.xml"], +) +node_on = cluster.add_instance( + "node_on", + user_configs=["configs/allow_nullable_tuple_subcolumns_on.xml"], +) + + +def _assert_reference(reference_path: Path, actual: str) -> None: + expected = reference_path.read_text(encoding="utf-8") + if actual == expected: + return + + diff_lines = list( + difflib.unified_diff( + expected.splitlines(), + actual.splitlines(), + fromfile=f"{reference_path} (expected)", + tofile="actual", + lineterm="", + ) + ) + max_diff_lines = 200 + if len(diff_lines) > max_diff_lines: + diff_lines = diff_lines[:max_diff_lines] + ["... (diff truncated)"] + + raise AssertionError( + f"Reference mismatch for {reference_path}.\n" + "\n".join(diff_lines) + ) + + +def _run_case(node, mode: str, case: str, special: bool = False) -> None: + sql_dir = SPECIAL_QUERIES_DIR if special else QUERIES_DIR + expected_dir = SPECIAL_EXPECTED_DIR if special else EXPECTED_DIR + sql_file = sql_dir / f"{case}.sql" + container_sql_file = f"/tmp/{case}.sql" + container_out_file = f"/tmp/{case}.out" + container_err_file = f"/tmp/{case}.err" + container_rc_file = f"/tmp/{case}.rc" + + node.copy_file_to_container(str(sql_file), container_sql_file) + node.exec_in_container( + [ + "bash", + "-lc", + ( + "/usr/bin/clickhouse client " + f"--queries-file {container_sql_file} " + f"> {container_out_file} 2> {container_err_file}; " + f"echo -n $? > {container_rc_file}" + ), + ], + nothrow=True, + ) + client_rc = node.exec_in_container(["bash", "-lc", f"cat {container_rc_file}"], nothrow=True).strip() + + if client_rc != "0": + stderr = node.exec_in_container(["bash", "-lc", f"cat {container_err_file}"], nothrow=True) + raise AssertionError( + f"Case '{case}' failed in mode '{mode}' with exit code {client_rc}.\n{stderr}" + ) + + actual = node.exec_in_container(["bash", "-lc", f"cat {container_out_file}"]) + + reference_path = expected_dir / f"{case}.{mode}.reference" + _assert_reference(reference_path, actual) + + +def _run_mode(node, mode: str) -> None: + for case in BASE_CASES: + _run_case(node, mode, case) + + special_cases = SPECIAL_OFF_ONLY_CASES if mode == "off" else SPECIAL_ON_ONLY_CASES + for case in special_cases: + _run_case(node, mode, case, special=True) + + +def _assert_references_exist() -> None: + expected_files = [] + for case in BASE_CASES: + expected_files.append(EXPECTED_DIR / f"{case}.off.reference") + expected_files.append(EXPECTED_DIR / f"{case}.on.reference") + for case in SPECIAL_OFF_ONLY_CASES: + expected_files.append(SPECIAL_EXPECTED_DIR / f"{case}.off.reference") + for case in SPECIAL_ON_ONLY_CASES: + expected_files.append(SPECIAL_EXPECTED_DIR / f"{case}.on.reference") + + missing = [str(path) for path in expected_files if not path.exists()] + assert not missing, ( + "Missing reference files:\n" + + "\n".join(missing) + + "\nAdd the missing .reference files." + ) + + +@pytest.fixture(scope="module", autouse=True) +def check_references(): + _assert_references_exist() + yield + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield + finally: + cluster.shutdown() + + +def test_queries_for_off_server_mode(started_cluster): + _run_mode(node_off, "off") + + +def test_queries_for_on_server_mode(started_cluster): + _run_mode(node_on, "on") diff --git a/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.reference b/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.reference index ae99f04442c8..1a7e90a589dc 100644 --- a/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.reference +++ b/tests/queries/0_stateless/02731_analyzer_join_resolve_nested.reference @@ -227,15 +227,15 @@ 1 Nullable(UInt32) 2 Nullable(UInt32) \N Nullable(UInt32) -(((1,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t Int64, s String), s String), s String)) Nullable(String) -(((2,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t Int64, s String), s String), s String)) Nullable(String) -(((3,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t Int64, s String), s String), s String)) Nullable(String) -((1,'s'),'s') s Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) -((2,'s'),'s') s Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) -\N \N Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) -(1,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) -(2,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) -\N \N Nullable(Tuple(t UInt32, s String)) Nullable(String) +(((1,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) Nullable(String) +(((2,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) Nullable(String) +(((3,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t Int64,\n s String),\n s String),\n s String) Nullable(String) +((1,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +((2,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +((0,''),'') \N Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +(1,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(2,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(0,'') \N Tuple(\n t UInt32,\n s String) Nullable(String) 1 s Nullable(UInt32) Nullable(String) 2 s Nullable(UInt32) Nullable(String) \N \N Nullable(UInt32) Nullable(String) @@ -248,15 +248,15 @@ 1 Nullable(UInt32) 2 Nullable(UInt32) \N Nullable(UInt32) -(((1,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String)) Nullable(String) -(((2,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String)) Nullable(String) -\N \N Nullable(Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String)) Nullable(String) -((1,'s'),'s') s Nullable(Tuple(t Tuple(t Int64, s String), s String)) Nullable(String) -((2,'s'),'s') s Nullable(Tuple(t Tuple(t Int64, s String), s String)) Nullable(String) -((3,'s'),'s') s Nullable(Tuple(t Tuple(t Int64, s String), s String)) Nullable(String) -(1,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) -(2,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) -\N \N Nullable(Tuple(t UInt32, s String)) Nullable(String) +(((1,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String),\n s String) Nullable(String) +(((2,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String),\n s String) Nullable(String) +(((0,''),''),'') \N Tuple(\n t Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String),\n s String) Nullable(String) +((1,'s'),'s') s Tuple(\n t Tuple(\n t Int64,\n s String),\n s String) Nullable(String) +((2,'s'),'s') s Tuple(\n t Tuple(\n t Int64,\n s String),\n s String) Nullable(String) +((3,'s'),'s') s Tuple(\n t Tuple(\n t Int64,\n s String),\n s String) Nullable(String) +(1,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(2,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(0,'') \N Tuple(\n t UInt32,\n s String) Nullable(String) 1 s Nullable(UInt32) Nullable(String) 2 s Nullable(UInt32) Nullable(String) \N \N Nullable(UInt32) Nullable(String) @@ -278,15 +278,15 @@ 1 Nullable(Int64) 2 Nullable(Int64) 3 Nullable(Int64) -(((1,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String)) Nullable(String) -(((2,'s'),'s'),'s') s Nullable(Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String)) Nullable(String) -\N \N Nullable(Tuple(t Tuple(t Tuple(t UInt32, s String), s String), s String)) Nullable(String) -((1,'s'),'s') s Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) -((2,'s'),'s') s Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) -\N \N Nullable(Tuple(t Tuple(t UInt32, s String), s String)) Nullable(String) -(1,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) -(2,'s') s Nullable(Tuple(t UInt32, s String)) Nullable(String) -\N \N Nullable(Tuple(t UInt32, s String)) Nullable(String) +(((1,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String),\n s String) Nullable(String) +(((2,'s'),'s'),'s') s Tuple(\n t Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String),\n s String) Nullable(String) +(((0,''),''),'') \N Tuple(\n t Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String),\n s String) Nullable(String) +((1,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +((2,'s'),'s') s Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +((0,''),'') \N Tuple(\n t Tuple(\n t UInt32,\n s String),\n s String) Nullable(String) +(1,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(2,'s') s Tuple(\n t UInt32,\n s String) Nullable(String) +(0,'') \N Tuple(\n t UInt32,\n s String) Nullable(String) 1 s Nullable(UInt32) Nullable(String) 2 s Nullable(UInt32) Nullable(String) \N \N Nullable(UInt32) Nullable(String) diff --git a/tests/queries/0_stateless/02940_variant_text_deserialization.reference b/tests/queries/0_stateless/02940_variant_text_deserialization.reference index 38873f40a159..7b5f9f4f7518 100644 --- a/tests/queries/0_stateless/02940_variant_text_deserialization.reference +++ b/tests/queries/0_stateless/02940_variant_text_deserialization.reference @@ -125,14 +125,14 @@ Map {"v":{"a":42,"b":43,"c":0},"variantElement(v, 'Map(String, UInt64)')":{"a":42,"b":43,"c":0}} {"v":"{\"c\" : 44, \"d\" : [1,2,3]}","variantElement(v, 'Map(String, UInt64)')":{}} Tuple -{"v":null,"variantElement(v, 'Tuple(a UInt64, b UInt64)')":null} -{"v":"string","variantElement(v, 'Tuple(a UInt64, b UInt64)')":null} +{"v":null,"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":0,"b":0}} +{"v":"string","variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":0,"b":0}} {"v":{"a":42,"b":0},"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":42,"b":0}} {"v":{"a":44,"b":0},"variantElement(v, 'Tuple(a UInt64, b UInt64)')":{"a":44,"b":0}} -\N \N -string \N +\N (0,0) +string (0,0) (42,0) (42,0) -{"a" : 44, "d" : 32} \N +{"a" : 44, "d" : 32} (0,0) Array {"v":null,"variantElement(v, 'Array(UInt64)')":[]} {"v":"string","variantElement(v, 'Array(UInt64)')":[]} diff --git a/tests/queries/0_stateless/02941_variant_type_1.reference b/tests/queries/0_stateless/02941_variant_type_1.reference index 8c2a78d6c475..53e5a5568219 100644 --- a/tests/queries/0_stateless/02941_variant_type_1.reference +++ b/tests/queries/0_stateless/02941_variant_type_1.reference @@ -73,60 +73,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) (1,2) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 1 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 2 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -238,60 +238,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) -\N +(0,0) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 -\N 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 -\N +0 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -403,60 +403,60 @@ lc_str_9 lc_str_15 \N \N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) (4,5) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (10,11) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (16,17) -\N -\N -\N -\N -\N +(0,0) +0 +0 +0 +0 4 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 10 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 16 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 5 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 11 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 17 -\N +0 [] [] [] @@ -569,60 +569,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) (1,2) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 1 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 2 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -733,60 +733,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) (1,2) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 1 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 2 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -898,60 +898,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) -\N +(0,0) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 -\N 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 -\N +0 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -1062,60 +1062,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) -\N +(0,0) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 -\N 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 -\N +0 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -1227,60 +1227,60 @@ lc_str_9 lc_str_15 \N \N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) (4,5) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (10,11) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (16,17) -\N -\N -\N -\N -\N +(0,0) +0 +0 +0 +0 4 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 10 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 16 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 5 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 11 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 17 -\N +0 [] [] [] @@ -1357,94 +1357,94 @@ str_13 \N \N \N -2 -\N -\N -\N -\N -\N -8 -\N -\N -\N -\N -\N -14 -\N -\N -\N -\N -\N -\N -lc_str_3 -\N -\N -\N -\N -\N -lc_str_9 -\N -\N -\N -\N -\N -lc_str_15 -\N -\N -\N -\N -\N -\N -(4,5) -\N -\N -\N -\N -\N -(10,11) -\N -\N -\N -\N -\N -(16,17) -\N -\N +2 \N \N \N -4 \N \N +8 \N \N \N -10 \N \N +14 \N \N \N -16 \N \N \N +lc_str_3 \N \N -5 \N \N \N +lc_str_9 \N \N -11 \N \N \N +lc_str_15 \N \N +(0,0) +(0,0) +(0,0) +(0,0) +(4,5) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(10,11) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(16,17) +(0,0) +0 +0 +0 +0 +4 +0 +0 +0 +0 +0 +10 +0 +0 +0 +0 +0 +16 +0 +0 +0 +0 +0 +5 +0 +0 +0 +0 +0 +11 +0 +0 +0 +0 +0 17 -\N +0 [] [] [] @@ -1557,60 +1557,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) (1,2) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 1 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 2 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -1721,60 +1721,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) (1,2) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 1 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 2 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -1886,60 +1886,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) -\N +(0,0) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 -\N -2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N 1 -\N +0 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -2050,60 +2050,60 @@ lc_str_2 \N \N \N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (0,1) -\N +(0,0) (2,3) -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 -\N 2 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 1 -\N +0 3 -\N -\N -\N +0 +0 +0 [] [] [] @@ -2215,60 +2215,60 @@ lc_str_9 lc_str_15 \N \N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) (4,5) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (10,11) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (16,17) -\N -\N -\N -\N -\N +(0,0) +0 +0 +0 +0 4 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 10 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 16 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 5 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 11 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 17 -\N +0 [] [] [] @@ -2379,60 +2379,60 @@ lc_str_9 lc_str_15 \N \N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) (4,5) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (10,11) -\N -\N -\N -\N -\N +(0,0) +(0,0) +(0,0) +(0,0) +(0,0) (16,17) -\N -\N -\N -\N -\N +(0,0) +0 +0 +0 +0 4 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 10 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 16 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 5 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 11 -\N -\N -\N -\N -\N +0 +0 +0 +0 +0 17 -\N +0 [] [] [] diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 index 71a1ef656c7f..de12c6b8737e 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 @@ -519,106 +519,106 @@ str_79 \N \N [] 0 [] 77 \N [] 0 [] [] \N \N [] 0 [] [] \N \N [] 0 [] [] -0 \N [] [] -1 \N [] [] -2 \N [] [] -3 \N [] [] -4 \N [] [] -5 \N [] [] -6 \N [] [] -7 \N [] [] -8 \N [] [] -9 \N [] [] -str_10 \N [] [] -10 \N [] [] -10 \N [] [] -[[0,1]] \N [] [] -str_11 \N [] [] -str_11 \N [] [] -str_12 \N [] [] -12 \N [] [] -12 \N [] [] -str_13 \N [] [] -13 \N [] [] -13 \N [] [] -str_14 \N [] [] -14 \N [] [] -14 \N [] [] -str_15 \N [] [] -15 \N [] [] -15 \N [] [] -[[0,1,2,3,4,5,6]] \N [] [] -str_16 \N [] [] -str_16 \N [] [] -str_17 \N [] [] -17 \N [] [] -17 \N [] [] -str_18 \N [] [] -18 \N [] [] -18 \N [] [] -str_19 \N [] [] -19 \N [] [] -19 \N [] [] -[20] \N [] [20] -['str_21','str_21'] \N [] [NULL,NULL] -[22,22,22] \N [] [22,22,22] -[23,23,23,23] \N [] [23,23,23,23] -[24,24,24,24,24] \N [] [24,24,24,24,24] -[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] -[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] -[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] -[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -[40] \N [] [40] -41 \N [] [] -\N \N [] [] -str_43 \N [] [] -[44,44,44,44,44] \N [] [44,44,44,44,44] -45 \N [] [] -\N \N [] [] -str_47 \N [] [] -['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -49 \N [] [] -\N \N [] [] -str_51 \N [] [] -[52,52,52] \N [] [52,52,52] -53 \N [] [] -\N \N [] [] -str_55 \N [] [] -[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] -57 \N [] [] -\N \N [] [] -str_59 \N [] [] -[60] \N [] [60] -61 \N [] [] -\N \N [] [] -str_63 \N [] [] -[64,64,64,64,64] \N [] [64,64,64,64,64] -65 \N [] [] -\N \N [] [] -str_67 \N [] [] -[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] -69 \N [] [] -\N \N [] [] -str_71 \N [] [] -[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] -73 \N [] [] -\N \N [] [] -str_75 \N [] [] -[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] -77 \N [] [] -\N \N [] [] -str_79 \N [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] [] 0 [] [] 0 [] [] 0 [] @@ -1340,106 +1340,106 @@ str_79 \N \N [] 0 [] 77 \N [] 0 [] [] \N \N [] 0 [] [] \N \N [] 0 [] [] -0 \N [] [] -1 \N [] [] -2 \N [] [] -3 \N [] [] -4 \N [] [] -5 \N [] [] -6 \N [] [] -7 \N [] [] -8 \N [] [] -9 \N [] [] -str_10 \N [] [] -10 \N [] [] -10 \N [] [] -[[0,1]] \N [] [] -str_11 \N [] [] -str_11 \N [] [] -str_12 \N [] [] -12 \N [] [] -12 \N [] [] -str_13 \N [] [] -13 \N [] [] -13 \N [] [] -str_14 \N [] [] -14 \N [] [] -14 \N [] [] -str_15 \N [] [] -15 \N [] [] -15 \N [] [] -[[0,1,2,3,4,5,6]] \N [] [] -str_16 \N [] [] -str_16 \N [] [] -str_17 \N [] [] -17 \N [] [] -17 \N [] [] -str_18 \N [] [] -18 \N [] [] -18 \N [] [] -str_19 \N [] [] -19 \N [] [] -19 \N [] [] -[20] \N [] [20] -['str_21','str_21'] \N [] [NULL,NULL] -[22,22,22] \N [] [22,22,22] -[23,23,23,23] \N [] [23,23,23,23] -[24,24,24,24,24] \N [] [24,24,24,24,24] -[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] -[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] -[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] -[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -[40] \N [] [40] -41 \N [] [] -\N \N [] [] -str_43 \N [] [] -[44,44,44,44,44] \N [] [44,44,44,44,44] -45 \N [] [] -\N \N [] [] -str_47 \N [] [] -['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -49 \N [] [] -\N \N [] [] -str_51 \N [] [] -[52,52,52] \N [] [52,52,52] -53 \N [] [] -\N \N [] [] -str_55 \N [] [] -[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] -57 \N [] [] -\N \N [] [] -str_59 \N [] [] -[60] \N [] [60] -61 \N [] [] -\N \N [] [] -str_63 \N [] [] -[64,64,64,64,64] \N [] [64,64,64,64,64] -65 \N [] [] -\N \N [] [] -str_67 \N [] [] -[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] -69 \N [] [] -\N \N [] [] -str_71 \N [] [] -[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] -73 \N [] [] -\N \N [] [] -str_75 \N [] [] -[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] -77 \N [] [] -\N \N [] [] -str_79 \N [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] [] 0 [] [] 0 [] [] 0 [] @@ -2161,106 +2161,106 @@ str_79 \N \N [] 0 [] 77 \N [] 0 [] [] \N \N [] 0 [] [] \N \N [] 0 [] [] -0 \N [] [] -1 \N [] [] -2 \N [] [] -3 \N [] [] -4 \N [] [] -5 \N [] [] -6 \N [] [] -7 \N [] [] -8 \N [] [] -9 \N [] [] -str_10 \N [] [] -10 \N [] [] -10 \N [] [] -[[0,1]] \N [] [] -str_11 \N [] [] -str_11 \N [] [] -str_12 \N [] [] -12 \N [] [] -12 \N [] [] -str_13 \N [] [] -13 \N [] [] -13 \N [] [] -str_14 \N [] [] -14 \N [] [] -14 \N [] [] -str_15 \N [] [] -15 \N [] [] -15 \N [] [] -[[0,1,2,3,4,5,6]] \N [] [] -str_16 \N [] [] -str_16 \N [] [] -str_17 \N [] [] -17 \N [] [] -17 \N [] [] -str_18 \N [] [] -18 \N [] [] -18 \N [] [] -str_19 \N [] [] -19 \N [] [] -19 \N [] [] -[20] \N [] [20] -['str_21','str_21'] \N [] [NULL,NULL] -[22,22,22] \N [] [22,22,22] -[23,23,23,23] \N [] [23,23,23,23] -[24,24,24,24,24] \N [] [24,24,24,24,24] -[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] -[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] -[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] -[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -[40] \N [] [40] -41 \N [] [] -\N \N [] [] -str_43 \N [] [] -[44,44,44,44,44] \N [] [44,44,44,44,44] -45 \N [] [] -\N \N [] [] -str_47 \N [] [] -['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -49 \N [] [] -\N \N [] [] -str_51 \N [] [] -[52,52,52] \N [] [52,52,52] -53 \N [] [] -\N \N [] [] -str_55 \N [] [] -[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] -57 \N [] [] -\N \N [] [] -str_59 \N [] [] -[60] \N [] [60] -61 \N [] [] -\N \N [] [] -str_63 \N [] [] -[64,64,64,64,64] \N [] [64,64,64,64,64] -65 \N [] [] -\N \N [] [] -str_67 \N [] [] -[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] -69 \N [] [] -\N \N [] [] -str_71 \N [] [] -[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] -73 \N [] [] -\N \N [] [] -str_75 \N [] [] -[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] -77 \N [] [] -\N \N [] [] -str_79 \N [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] [] 0 [] [] 0 [] [] 0 [] diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 index dbdba8754286..d6add681f515 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.reference.j2 @@ -465,96 +465,96 @@ str_79 \N \N [] 0 [] 77 \N [] 0 [] [] \N \N [] 0 [] [] \N \N [] 0 [] [] -0 \N [] [] -1 \N [] [] -2 \N [] [] -3 \N [] [] -4 \N [] [] -5 \N [] [] -6 \N [] [] -7 \N [] [] -8 \N [] [] -9 \N [] [] -[[0]] \N [] [] -str_10 \N [] [] -[[0,1]] \N [] [] -str_11 \N [] [] -[[0,1,2]] \N [] [] -str_12 \N [] [] -[[0,1,2,3]] \N [] [] -str_13 \N [] [] -[[0,1,2,3,4]] \N [] [] -str_14 \N [] [] -[[0,1,2,3,4,5]] \N [] [] -str_15 \N [] [] -[[0,1,2,3,4,5,6]] \N [] [] -str_16 \N [] [] -[[0,1,2,3,4,5,6,7]] \N [] [] -str_17 \N [] [] -[[0,1,2,3,4,5,6,7,8]] \N [] [] -str_18 \N [] [] -[[0,1,2,3,4,5,6,7,8,9]] \N [] [] -str_19 \N [] [] -[20] \N [] [20] -['str_21','str_21'] \N [] [NULL,NULL] -[22,22,22] \N [] [22,22,22] -[23,23,23,23] \N [] [23,23,23,23] -[24,24,24,24,24] \N [] [24,24,24,24,24] -[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] -[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] -[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] -[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -[40] \N [] [40] -41 \N [] [] -\N \N [] [] -str_43 \N [] [] -[44,44,44,44,44] \N [] [44,44,44,44,44] -45 \N [] [] -\N \N [] [] -str_47 \N [] [] -['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -49 \N [] [] -\N \N [] [] -str_51 \N [] [] -[52,52,52] \N [] [52,52,52] -53 \N [] [] -\N \N [] [] -str_55 \N [] [] -[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] -57 \N [] [] -\N \N [] [] -str_59 \N [] [] -[60] \N [] [60] -61 \N [] [] -\N \N [] [] -str_63 \N [] [] -[64,64,64,64,64] \N [] [64,64,64,64,64] -65 \N [] [] -\N \N [] [] -str_67 \N [] [] -[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] -69 \N [] [] -\N \N [] [] -str_71 \N [] [] -[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] -73 \N [] [] -\N \N [] [] -str_75 \N [] [] -[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] -77 \N [] [] -\N \N [] [] -str_79 \N [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +[[0]] 0 [] [] +str_10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +[[0,1,2]] 0 [] [] +str_12 0 [] [] +[[0,1,2,3]] 0 [] [] +str_13 0 [] [] +[[0,1,2,3,4]] 0 [] [] +str_14 0 [] [] +[[0,1,2,3,4,5]] 0 [] [] +str_15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +[[0,1,2,3,4,5,6,7]] 0 [] [] +str_17 0 [] [] +[[0,1,2,3,4,5,6,7,8]] 0 [] [] +str_18 0 [] [] +[[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] [] 0 [] [] 0 [] [] 0 [] @@ -1202,96 +1202,96 @@ str_79 \N \N [] 0 [] 77 \N [] 0 [] [] \N \N [] 0 [] [] \N \N [] 0 [] [] -0 \N [] [] -1 \N [] [] -2 \N [] [] -3 \N [] [] -4 \N [] [] -5 \N [] [] -6 \N [] [] -7 \N [] [] -8 \N [] [] -9 \N [] [] -[[0]] \N [] [] -str_10 \N [] [] -[[0,1]] \N [] [] -str_11 \N [] [] -[[0,1,2]] \N [] [] -str_12 \N [] [] -[[0,1,2,3]] \N [] [] -str_13 \N [] [] -[[0,1,2,3,4]] \N [] [] -str_14 \N [] [] -[[0,1,2,3,4,5]] \N [] [] -str_15 \N [] [] -[[0,1,2,3,4,5,6]] \N [] [] -str_16 \N [] [] -[[0,1,2,3,4,5,6,7]] \N [] [] -str_17 \N [] [] -[[0,1,2,3,4,5,6,7,8]] \N [] [] -str_18 \N [] [] -[[0,1,2,3,4,5,6,7,8,9]] \N [] [] -str_19 \N [] [] -[20] \N [] [20] -['str_21','str_21'] \N [] [NULL,NULL] -[22,22,22] \N [] [22,22,22] -[23,23,23,23] \N [] [23,23,23,23] -[24,24,24,24,24] \N [] [24,24,24,24,24] -[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] -[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] -[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] -[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -[40] \N [] [40] -41 \N [] [] -\N \N [] [] -str_43 \N [] [] -[44,44,44,44,44] \N [] [44,44,44,44,44] -45 \N [] [] -\N \N [] [] -str_47 \N [] [] -['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -49 \N [] [] -\N \N [] [] -str_51 \N [] [] -[52,52,52] \N [] [52,52,52] -53 \N [] [] -\N \N [] [] -str_55 \N [] [] -[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] -57 \N [] [] -\N \N [] [] -str_59 \N [] [] -[60] \N [] [60] -61 \N [] [] -\N \N [] [] -str_63 \N [] [] -[64,64,64,64,64] \N [] [64,64,64,64,64] -65 \N [] [] -\N \N [] [] -str_67 \N [] [] -[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] -69 \N [] [] -\N \N [] [] -str_71 \N [] [] -[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] -73 \N [] [] -\N \N [] [] -str_75 \N [] [] -[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] -77 \N [] [] -\N \N [] [] -str_79 \N [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +[[0]] 0 [] [] +str_10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +[[0,1,2]] 0 [] [] +str_12 0 [] [] +[[0,1,2,3]] 0 [] [] +str_13 0 [] [] +[[0,1,2,3,4]] 0 [] [] +str_14 0 [] [] +[[0,1,2,3,4,5]] 0 [] [] +str_15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +[[0,1,2,3,4,5,6,7]] 0 [] [] +str_17 0 [] [] +[[0,1,2,3,4,5,6,7,8]] 0 [] [] +str_18 0 [] [] +[[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] [] 0 [] [] 0 [] [] 0 [] @@ -1939,96 +1939,96 @@ str_79 \N \N [] 0 [] 77 \N [] 0 [] [] \N \N [] 0 [] [] \N \N [] 0 [] [] -0 \N [] [] -1 \N [] [] -2 \N [] [] -3 \N [] [] -4 \N [] [] -5 \N [] [] -6 \N [] [] -7 \N [] [] -8 \N [] [] -9 \N [] [] -[[0]] \N [] [] -str_10 \N [] [] -[[0,1]] \N [] [] -str_11 \N [] [] -[[0,1,2]] \N [] [] -str_12 \N [] [] -[[0,1,2,3]] \N [] [] -str_13 \N [] [] -[[0,1,2,3,4]] \N [] [] -str_14 \N [] [] -[[0,1,2,3,4,5]] \N [] [] -str_15 \N [] [] -[[0,1,2,3,4,5,6]] \N [] [] -str_16 \N [] [] -[[0,1,2,3,4,5,6,7]] \N [] [] -str_17 \N [] [] -[[0,1,2,3,4,5,6,7,8]] \N [] [] -str_18 \N [] [] -[[0,1,2,3,4,5,6,7,8,9]] \N [] [] -str_19 \N [] [] -[20] \N [] [20] -['str_21','str_21'] \N [] [NULL,NULL] -[22,22,22] \N [] [22,22,22] -[23,23,23,23] \N [] [23,23,23,23] -[24,24,24,24,24] \N [] [24,24,24,24,24] -[25,25,25,25,25,25] \N [] [25,25,25,25,25,25] -[26,26,26,26,26,26,26] \N [] [26,26,26,26,26,26,26] -[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -[28,28,28,28,28,28,28,28,28] \N [] [28,28,28,28,28,28,28,28,28] -[29,29,29,29,29,29,29,29,29,29] \N [] [29,29,29,29,29,29,29,29,29,29] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -\N \N [] [] -[40] \N [] [40] -41 \N [] [] -\N \N [] [] -str_43 \N [] [] -[44,44,44,44,44] \N [] [44,44,44,44,44] -45 \N [] [] -\N \N [] [] -str_47 \N [] [] -['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] -49 \N [] [] -\N \N [] [] -str_51 \N [] [] -[52,52,52] \N [] [52,52,52] -53 \N [] [] -\N \N [] [] -str_55 \N [] [] -[56,56,56,56,56,56,56] \N [] [56,56,56,56,56,56,56] -57 \N [] [] -\N \N [] [] -str_59 \N [] [] -[60] \N [] [60] -61 \N [] [] -\N \N [] [] -str_63 \N [] [] -[64,64,64,64,64] \N [] [64,64,64,64,64] -65 \N [] [] -\N \N [] [] -str_67 \N [] [] -[68,68,68,68,68,68,68,68,68] \N [] [68,68,68,68,68,68,68,68,68] -69 \N [] [] -\N \N [] [] -str_71 \N [] [] -[NULL,NULL,NULL] \N [] [NULL,NULL,NULL] -73 \N [] [] -\N \N [] [] -str_75 \N [] [] -[76,76,76,76,76,76,76] \N [] [76,76,76,76,76,76,76] -77 \N [] [] -\N \N [] [] -str_79 \N [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +[[0]] 0 [] [] +str_10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +[[0,1,2]] 0 [] [] +str_12 0 [] [] +[[0,1,2,3]] 0 [] [] +str_13 0 [] [] +[[0,1,2,3,4]] 0 [] [] +str_14 0 [] [] +[[0,1,2,3,4,5]] 0 [] [] +str_15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +[[0,1,2,3,4,5,6,7]] 0 [] [] +str_17 0 [] [] +[[0,1,2,3,4,5,6,7,8]] 0 [] [] +str_18 0 [] [] +[[0,1,2,3,4,5,6,7,8,9]] 0 [] [] +str_19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] [] 0 [] [] 0 [] [] 0 [] diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference index c1f59e9cbc6d..9386548c74d1 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference @@ -1,95 +1,95 @@ initial insert alter add column 1 3 None false -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 1 4 String false 4 UInt64 false 7 None false -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 alter modify column 1 4 String true 4 UInt64 true 7 None false -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 insert after alter modify column 1 1 Date true 5 String true 5 UInt64 true 8 None false -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 alter modify column 2 1 Date true 5 String true 5 UInt64 true 8 None false -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 insert after alter modify column 2 1 String false 1 UInt64 false @@ -97,29 +97,29 @@ insert after alter modify column 2 5 String true 5 UInt64 true 9 None false -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N -19 19 \N \N \N \N \N -20 20 20 \N 20 \N \N -21 21 str_21 str_21 \N \N \N -22 22 1970-01-23 \N \N 1970-01-23 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +19 19 \N \N \N \N 0 +20 20 20 \N 20 \N 0 +21 21 str_21 str_21 \N \N 0 +22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 1 String false 1 UInt64 false @@ -127,29 +127,29 @@ alter modify column 3 5 String true 5 UInt64 true 9 None false -0 0 0 \N \N \N \N \N \N -1 1 1 \N \N \N \N \N \N -2 2 2 \N \N \N \N \N \N -3 3 3 \N \N \N 3 \N \N -4 4 4 \N \N \N 4 \N \N -5 5 5 \N \N \N 5 \N \N -6 6 6 \N \N str_6 \N \N \N -7 7 7 \N \N str_7 \N \N \N -8 8 8 \N \N str_8 \N \N \N -9 9 9 \N \N \N \N \N \N -10 10 10 \N \N \N \N \N \N -11 11 11 \N \N \N \N \N \N -12 12 12 \N \N \N 12 \N \N -13 13 13 \N \N str_13 \N \N \N -14 14 14 \N \N \N \N \N \N -15 15 15 \N \N \N \N \N \N -16 16 16 \N \N \N 16 \N \N -17 17 17 \N \N str_17 \N \N \N -18 18 18 \N \N \N \N 1970-01-19 \N -19 19 19 \N \N \N \N \N \N -20 20 20 \N \N \N 20 \N \N -21 21 21 \N \N str_21 \N \N \N -22 22 22 \N \N \N \N 1970-01-23 \N +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 \N 16 \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 1 String false 1 UInt64 false @@ -157,29 +157,29 @@ insert after alter modify column 3 5 String true 5 UInt64 true 12 None false -0 0 0 \N \N \N \N \N \N -1 1 1 \N \N \N \N \N \N -2 2 2 \N \N \N \N \N \N -3 3 3 \N \N \N 3 \N \N -4 4 4 \N \N \N 4 \N \N -5 5 5 \N \N \N 5 \N \N -6 6 6 \N \N str_6 \N \N \N -7 7 7 \N \N str_7 \N \N \N -8 8 8 \N \N str_8 \N \N \N -9 9 9 \N \N \N \N \N \N -10 10 10 \N \N \N \N \N \N -11 11 11 \N \N \N \N \N \N -12 12 12 \N \N \N 12 \N \N -13 13 13 \N \N str_13 \N \N \N -14 14 14 \N \N \N \N \N \N -15 15 15 \N \N \N \N \N \N -16 16 16 \N \N \N 16 \N \N -17 17 17 \N \N str_17 \N \N \N -18 18 18 \N \N \N \N 1970-01-19 \N -19 19 19 \N \N \N \N \N \N -20 20 20 \N \N \N 20 \N \N -21 21 21 \N \N str_21 \N \N \N -22 22 22 \N \N \N \N 1970-01-23 \N -23 \N \N \N \N \N \N \N \N -24 24 24 \N \N \N \N \N \N -25 str_25 \N str_25 \N \N \N \N \N +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 \N 16 \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +23 \N \N \N 0 \N \N \N 0 +24 24 24 \N 0 \N \N \N 0 +25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference index 5e0ccd65ba1e..d7123288280e 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference @@ -1,179 +1,179 @@ initial insert alter add column 1 3 None -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 alter modify column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 insert after alter modify column 1 1 Date 5 String 5 UInt64 8 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 alter modify column 2 1 Date 5 String 5 UInt64 8 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 insert after alter modify column 2 2 Date 6 String 6 UInt64 9 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N -19 19 \N \N \N \N \N -20 20 20 \N 20 \N \N -21 21 str_21 str_21 \N \N \N -22 22 1970-01-23 \N \N 1970-01-23 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +19 19 \N \N \N \N 0 +20 20 20 \N 20 \N 0 +21 21 str_21 str_21 \N \N 0 +22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 2 Date 6 String 6 UInt64 9 None -0 0 0 \N \N \N \N \N \N -1 1 1 \N \N \N \N \N \N -2 2 2 \N \N \N \N \N \N -3 3 3 \N \N \N 3 \N \N -4 4 4 \N \N \N 4 \N \N -5 5 5 \N \N \N 5 \N \N -6 6 6 \N \N str_6 \N \N \N -7 7 7 \N \N str_7 \N \N \N -8 8 8 \N \N str_8 \N \N \N -9 9 9 \N \N \N \N \N \N -10 10 10 \N \N \N \N \N \N -11 11 11 \N \N \N \N \N \N -12 12 12 \N \N \N 12 \N \N -13 13 13 \N \N str_13 \N \N \N -14 14 14 \N \N \N \N \N \N -15 15 15 \N \N \N \N \N \N -16 16 16 \N \N \N 16 \N \N -17 17 17 \N \N str_17 \N \N \N -18 18 18 \N \N \N \N 1970-01-19 \N -19 19 19 \N \N \N \N \N \N -20 20 20 \N \N \N 20 \N \N -21 21 21 \N \N str_21 \N \N \N -22 22 22 \N \N \N \N 1970-01-23 \N +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 \N 16 \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 2 Date 6 String 6 UInt64 12 None -0 0 0 \N \N \N \N \N \N -1 1 1 \N \N \N \N \N \N -2 2 2 \N \N \N \N \N \N -3 3 3 \N \N \N 3 \N \N -4 4 4 \N \N \N 4 \N \N -5 5 5 \N \N \N 5 \N \N -6 6 6 \N \N str_6 \N \N \N -7 7 7 \N \N str_7 \N \N \N -8 8 8 \N \N str_8 \N \N \N -9 9 9 \N \N \N \N \N \N -10 10 10 \N \N \N \N \N \N -11 11 11 \N \N \N \N \N \N -12 12 12 \N \N \N 12 \N \N -13 13 13 \N \N str_13 \N \N \N -14 14 14 \N \N \N \N \N \N -15 15 15 \N \N \N \N \N \N -16 16 16 \N \N \N 16 \N \N -17 17 17 \N \N str_17 \N \N \N -18 18 18 \N \N \N \N 1970-01-19 \N -19 19 19 \N \N \N \N \N \N -20 20 20 \N \N \N 20 \N \N -21 21 21 \N \N str_21 \N \N \N -22 22 22 \N \N \N \N 1970-01-23 \N -23 \N \N \N \N \N \N \N \N -24 24 24 \N \N \N \N \N \N -25 str_25 \N str_25 \N \N \N \N \N +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 \N 16 \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +23 \N \N \N 0 \N \N \N 0 +24 24 24 \N 0 \N \N \N 0 +25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference index 5e0ccd65ba1e..d7123288280e 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference @@ -1,179 +1,179 @@ initial insert alter add column 1 3 None -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 alter modify column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 insert after alter modify column 1 1 Date 5 String 5 UInt64 8 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 alter modify column 2 1 Date 5 String 5 UInt64 8 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 insert after alter modify column 2 2 Date 6 String 6 UInt64 9 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N -15 15 \N \N \N \N \N -16 16 16 \N 16 \N \N -17 17 str_17 str_17 \N \N \N -18 18 1970-01-19 \N \N 1970-01-19 \N -19 19 \N \N \N \N \N -20 20 20 \N 20 \N \N -21 21 str_21 str_21 \N \N \N -22 22 1970-01-23 \N \N 1970-01-23 \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +19 19 \N \N \N \N 0 +20 20 20 \N 20 \N 0 +21 21 str_21 str_21 \N \N 0 +22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 2 Date 6 String 6 UInt64 9 None -0 0 0 \N \N \N \N \N \N -1 1 1 \N \N \N \N \N \N -2 2 2 \N \N \N \N \N \N -3 3 3 \N \N \N 3 \N \N -4 4 4 \N \N \N 4 \N \N -5 5 5 \N \N \N 5 \N \N -6 6 6 \N \N str_6 \N \N \N -7 7 7 \N \N str_7 \N \N \N -8 8 8 \N \N str_8 \N \N \N -9 9 9 \N \N \N \N \N \N -10 10 10 \N \N \N \N \N \N -11 11 11 \N \N \N \N \N \N -12 12 12 \N \N \N 12 \N \N -13 13 13 \N \N str_13 \N \N \N -14 14 14 \N \N \N \N \N \N -15 15 15 \N \N \N \N \N \N -16 16 16 \N \N \N 16 \N \N -17 17 17 \N \N str_17 \N \N \N -18 18 18 \N \N \N \N 1970-01-19 \N -19 19 19 \N \N \N \N \N \N -20 20 20 \N \N \N 20 \N \N -21 21 21 \N \N str_21 \N \N \N -22 22 22 \N \N \N \N 1970-01-23 \N +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 \N 16 \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 2 Date 6 String 6 UInt64 12 None -0 0 0 \N \N \N \N \N \N -1 1 1 \N \N \N \N \N \N -2 2 2 \N \N \N \N \N \N -3 3 3 \N \N \N 3 \N \N -4 4 4 \N \N \N 4 \N \N -5 5 5 \N \N \N 5 \N \N -6 6 6 \N \N str_6 \N \N \N -7 7 7 \N \N str_7 \N \N \N -8 8 8 \N \N str_8 \N \N \N -9 9 9 \N \N \N \N \N \N -10 10 10 \N \N \N \N \N \N -11 11 11 \N \N \N \N \N \N -12 12 12 \N \N \N 12 \N \N -13 13 13 \N \N str_13 \N \N \N -14 14 14 \N \N \N \N \N \N -15 15 15 \N \N \N \N \N \N -16 16 16 \N \N \N 16 \N \N -17 17 17 \N \N str_17 \N \N \N -18 18 18 \N \N \N \N 1970-01-19 \N -19 19 19 \N \N \N \N \N \N -20 20 20 \N \N \N 20 \N \N -21 21 21 \N \N str_21 \N \N \N -22 22 22 \N \N \N \N 1970-01-23 \N -23 \N \N \N \N \N \N \N \N -24 24 24 \N \N \N \N \N \N -25 str_25 \N str_25 \N \N \N \N \N +0 0 0 \N 0 \N \N \N 0 +1 1 1 \N 0 \N \N \N 0 +2 2 2 \N 0 \N \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 +6 6 6 \N 0 str_6 \N \N 0 +7 7 7 \N 0 str_7 \N \N 0 +8 8 8 \N 0 str_8 \N \N 0 +9 9 9 \N 0 \N \N \N 0 +10 10 10 \N 0 \N \N \N 0 +11 11 11 \N 0 \N \N \N 0 +12 12 12 \N 0 \N 12 \N 0 +13 13 13 \N 0 str_13 \N \N 0 +14 14 14 \N 0 \N \N \N 0 +15 15 15 \N 0 \N \N \N 0 +16 16 16 \N 0 \N 16 \N 0 +17 17 17 \N 0 str_17 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 +19 19 19 \N 0 \N \N \N 0 +20 20 20 \N 0 \N 20 \N 0 +21 21 21 \N 0 str_21 \N \N 0 +22 22 22 \N 0 \N \N 1970-01-23 0 +23 \N \N \N 0 \N \N \N 0 +24 24 24 \N 0 \N \N \N 0 +25 str_25 \N str_25 0 \N \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference index dbdc85e170f4..a2f2a19805db 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_2_compact_merge_tree.reference @@ -1,90 +1,90 @@ initial insert alter add column 3 None -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 alter rename column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 insert nested dynamic 3 Array(Dynamic) 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N [] [] [] -1 1 \N \N \N \N \N [] [] [] -2 2 \N \N \N \N \N [] [] [] -3 3 3 \N 3 \N \N [] [] [] -4 4 4 \N 4 \N \N [] [] [] -5 5 5 \N 5 \N \N [] [] [] -6 6 str_6 str_6 \N \N \N [] [] [] -7 7 str_7 str_7 \N \N \N [] [] [] -8 8 str_8 str_8 \N \N \N [] [] [] -9 9 \N \N \N \N \N [] [] [] -10 10 \N \N \N \N \N [] [] [] -11 11 \N \N \N \N \N [] [] [] -12 12 12 \N 12 \N \N [] [] [] -13 13 str_13 str_13 \N \N \N [] [] [] -14 14 \N \N \N \N \N [] [] [] -15 15 [15] \N \N \N \N [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N \N [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N \N [17] [NULL] [NULL] +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] alter rename column 2 3 Array(Dynamic) 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N [] [] [] -1 1 \N \N \N \N \N [] [] [] -2 2 \N \N \N \N \N [] [] [] -3 3 3 \N 3 \N \N [] [] [] -4 4 4 \N 4 \N \N [] [] [] -5 5 5 \N 5 \N \N [] [] [] -6 6 str_6 str_6 \N \N \N [] [] [] -7 7 str_7 str_7 \N \N \N [] [] [] -8 8 str_8 str_8 \N \N \N [] [] [] -9 9 \N \N \N \N \N [] [] [] -10 10 \N \N \N \N \N [] [] [] -11 11 \N \N \N \N \N [] [] [] -12 12 12 \N 12 \N \N [] [] [] -13 13 str_13 str_13 \N \N \N [] [] [] -14 14 \N \N \N \N \N [] [] [] -15 15 [15] \N \N \N \N [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N \N [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N \N [17] [NULL] [NULL] +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference index dbdc85e170f4..a2f2a19805db 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_2_wide_merge_tree.reference @@ -1,90 +1,90 @@ initial insert alter add column 3 None -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 alter rename column 1 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 insert nested dynamic 3 Array(Dynamic) 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N [] [] [] -1 1 \N \N \N \N \N [] [] [] -2 2 \N \N \N \N \N [] [] [] -3 3 3 \N 3 \N \N [] [] [] -4 4 4 \N 4 \N \N [] [] [] -5 5 5 \N 5 \N \N [] [] [] -6 6 str_6 str_6 \N \N \N [] [] [] -7 7 str_7 str_7 \N \N \N [] [] [] -8 8 str_8 str_8 \N \N \N [] [] [] -9 9 \N \N \N \N \N [] [] [] -10 10 \N \N \N \N \N [] [] [] -11 11 \N \N \N \N \N [] [] [] -12 12 12 \N 12 \N \N [] [] [] -13 13 str_13 str_13 \N \N \N [] [] [] -14 14 \N \N \N \N \N [] [] [] -15 15 [15] \N \N \N \N [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N \N [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N \N [17] [NULL] [NULL] +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] alter rename column 2 3 Array(Dynamic) 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N [] [] [] -1 1 \N \N \N \N \N [] [] [] -2 2 \N \N \N \N \N [] [] [] -3 3 3 \N 3 \N \N [] [] [] -4 4 4 \N 4 \N \N [] [] [] -5 5 5 \N 5 \N \N [] [] [] -6 6 str_6 str_6 \N \N \N [] [] [] -7 7 str_7 str_7 \N \N \N [] [] [] -8 8 str_8 str_8 \N \N \N [] [] [] -9 9 \N \N \N \N \N [] [] [] -10 10 \N \N \N \N \N [] [] [] -11 11 \N \N \N \N \N [] [] [] -12 12 12 \N 12 \N \N [] [] [] -13 13 str_13 str_13 \N \N \N [] [] [] -14 14 \N \N \N \N \N [] [] [] -15 15 [15] \N \N \N \N [15] [NULL] [NULL] -16 16 ['str_16'] \N \N \N \N [NULL] ['str_16'] [NULL] -17 17 [17] \N \N \N \N [17] [NULL] [NULL] +0 0 \N \N \N \N 0 [] [] [] +1 1 \N \N \N \N 0 [] [] [] +2 2 \N \N \N \N 0 [] [] [] +3 3 3 \N 3 \N 0 [] [] [] +4 4 4 \N 4 \N 0 [] [] [] +5 5 5 \N 5 \N 0 [] [] [] +6 6 str_6 str_6 \N \N 0 [] [] [] +7 7 str_7 str_7 \N \N 0 [] [] [] +8 8 str_8 str_8 \N \N 0 [] [] [] +9 9 \N \N \N \N 0 [] [] [] +10 10 \N \N \N \N 0 [] [] [] +11 11 \N \N \N \N 0 [] [] [] +12 12 12 \N 12 \N 0 [] [] [] +13 13 str_13 str_13 \N \N 0 [] [] [] +14 14 \N \N \N \N 0 [] [] [] +15 15 [15] \N \N \N 0 [15] [NULL] [NULL] +16 16 ['str_16'] \N \N \N 0 [NULL] ['str_16'] [NULL] +17 17 [17] \N \N \N 0 [17] [NULL] [NULL] diff --git a/tests/queries/0_stateless/03041_dynamic_type_check_table.reference b/tests/queries/0_stateless/03041_dynamic_type_check_table.reference index b1ea186a9171..0dab4ea0d207 100644 --- a/tests/queries/0_stateless/03041_dynamic_type_check_table.reference +++ b/tests/queries/0_stateless/03041_dynamic_type_check_table.reference @@ -2,55 +2,55 @@ MergeTree compact initial insert alter add column 3 None -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 check table 1 MergeTree wide initial insert alter add column 3 None -0 0 \N \N \N \N -1 1 \N \N \N \N -2 2 \N \N \N \N +0 0 \N \N \N 0 +1 1 \N \N \N 0 +2 2 \N \N \N 0 insert after alter add column 4 String 4 UInt64 7 None -0 0 \N \N \N \N \N -1 1 \N \N \N \N \N -2 2 \N \N \N \N \N -3 3 3 \N 3 \N \N -4 4 4 \N 4 \N \N -5 5 5 \N 5 \N \N -6 6 str_6 str_6 \N \N \N -7 7 str_7 str_7 \N \N \N -8 8 str_8 str_8 \N \N \N -9 9 \N \N \N \N \N -10 10 \N \N \N \N \N -11 11 \N \N \N \N \N -12 12 12 \N 12 \N \N -13 13 str_13 str_13 \N \N \N -14 14 \N \N \N \N \N +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 check table 1 diff --git a/tests/queries/0_stateless/03162_dynamic_type_nested.reference b/tests/queries/0_stateless/03162_dynamic_type_nested.reference index 327b065ea9b0..45861bf4d8d8 100644 --- a/tests/queries/0_stateless/03162_dynamic_type_nested.reference +++ b/tests/queries/0_stateless/03162_dynamic_type_nested.reference @@ -1,4 +1,4 @@ ┌─dynamicType(d)──────────────┬─d─────────────────────────────────────────┬─d.Nested(x U⋯ Dynamic).x─┬─d.Nested(x UInt32, y Dynamic).y───┬─dynamicType(a⋯namic).y, 1))─┬─d.Nested(x U⋯c).y.String─┬─d.Nested(x UInt3⋯, Array(String))─┐ -1. │ Nested(x UInt32, y Dynamic) │ [(1,'aa'),(2,'bb')] │ [1,2] │ ['aa','bb'] │ String │ ['aa','bb'] │ [NULL,NULL] │ +1. │ Nested(x UInt32, y Dynamic) │ [(1,'aa'),(2,'bb')] │ [1,2] │ ['aa','bb'] │ String │ ['aa','bb'] │ [(0,[]),(0,[])] │ 2. │ Nested(x UInt32, y Dynamic) │ [(1,(2,['aa','bb'])),(5,(6,['ee','ff']))] │ [1,5] │ [(2,['aa','bb']),(6,['ee','ff'])] │ Tuple(Int64, Array(String)) │ [NULL,NULL] │ [(2,['aa','bb']),(6,['ee','ff'])] │ └─────────────────────────────┴───────────────────────────────────────────┴──────────────────────────┴───────────────────────────────────┴─────────────────────────────┴──────────────────────────┴───────────────────────────────────┘ diff --git a/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.reference b/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.reference index eaf7b1254f98..df54be3d2123 100644 --- a/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.reference +++ b/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.reference @@ -1 +1,10 @@ +-- { echo } + +set enable_json_type=1; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; +set allow_experimental_nullable_tuple_type=0; +select materialize('{"a" : [[1, {}], null]}')::JSON as json, getSubcolumn(json, 'a'), dynamicType(getSubcolumn(json, 'a')); +{"a":[[1,{}],[null,{}]]} [(1,'{}'),(NULL,'{}')] Array(Tuple(Nullable(Int64), JSON(max_dynamic_types=16, max_dynamic_paths=256))) +set allow_experimental_nullable_tuple_type=1; +select materialize('{"a" : [[1, {}], null]}')::JSON as json, getSubcolumn(json, 'a'), dynamicType(getSubcolumn(json, 'a')); {"a":[[1,{}],null]} [(1,'{}'),NULL] Array(Nullable(Tuple(Int64, JSON(max_dynamic_types=16, max_dynamic_paths=256)))) diff --git a/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.sql b/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.sql index c34df1f7d30c..6ed47739d3bc 100644 --- a/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.sql +++ b/tests/queries/0_stateless/03229_json_null_as_default_for_tuple.sql @@ -1,4 +1,12 @@ +-- { echo } + set enable_json_type=1; set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; +set allow_experimental_nullable_tuple_type=0; + +select materialize('{"a" : [[1, {}], null]}')::JSON as json, getSubcolumn(json, 'a'), dynamicType(getSubcolumn(json, 'a')); + +set allow_experimental_nullable_tuple_type=1; + select materialize('{"a" : [[1, {}], null]}')::JSON as json, getSubcolumn(json, 'a'), dynamicType(getSubcolumn(json, 'a')); diff --git a/tests/queries/0_stateless/03290_nullable_json.reference b/tests/queries/0_stateless/03290_nullable_json.reference index 4471f65780fb..086bfe789593 100644 --- a/tests/queries/0_stateless/03290_nullable_json.reference +++ b/tests/queries/0_stateless/03290_nullable_json.reference @@ -11,10 +11,10 @@ \N Nullable(UInt32) \N Nullable(UInt32) \N Nullable(UInt32) -(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) -\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) -(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) -\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) 1 Nullable(UInt32) \N Nullable(UInt32) 1 Nullable(UInt32) @@ -52,10 +52,10 @@ \N Nullable(UInt32) \N Nullable(UInt32) \N Nullable(UInt32) -(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) -\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) -(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) -\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) 1 Nullable(UInt32) \N Nullable(UInt32) 1 Nullable(UInt32) @@ -93,10 +93,10 @@ \N Nullable(UInt32) \N Nullable(UInt32) \N Nullable(UInt32) -(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) -\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) -(1,NULL) Nullable(Tuple(e UInt32, f Nullable(UInt32))) -\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(1,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) 1 Nullable(UInt32) \N Nullable(UInt32) 1 Nullable(UInt32) diff --git a/tests/queries/0_stateless/03369_variant_escape_filename_merge_tree.reference b/tests/queries/0_stateless/03369_variant_escape_filename_merge_tree.reference index 3e7ba3ec7ff3..7bb1758fd6be 100644 --- a/tests/queries/0_stateless/03369_variant_escape_filename_merge_tree.reference +++ b/tests/queries/0_stateless/03369_variant_escape_filename_merge_tree.reference @@ -1,2 +1,2 @@ -['v.variant_discr','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Ea','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Eb',''] -['v.variant_discr','v.Tuple(a UInt32, b UInt32)%2Ea','v.Tuple(a UInt32, b UInt32)%2Eb',''] +['v.variant_discr','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Ea','v.Tuple%28a%20UInt32%2C%20b%20UInt32%29%2Eb'] +['v.variant_discr','v.Tuple(a UInt32, b UInt32)%2Ea','v.Tuple(a UInt32, b UInt32)%2Eb'] diff --git a/tests/queries/0_stateless/03710_tuple_inside_nullable_function_tuple_element.reference b/tests/queries/0_stateless/03710_tuple_inside_nullable_function_tuple_element.reference index 49865383de75..0ff0972e26a1 100644 --- a/tests/queries/0_stateless/03710_tuple_inside_nullable_function_tuple_element.reference +++ b/tests/queries/0_stateless/03710_tuple_inside_nullable_function_tuple_element.reference @@ -28,10 +28,10 @@ Default value with NULL tuple Nullable(Int32) Nested Nullable(Tuple) - outer nullable (1,'inner') -Nullable(Tuple(Int32, String)) +Tuple(Int32, String) Nested Nullable(Tuple) - NULL outer -\N -Nullable(Tuple(Int32, String)) +(0,'') +Tuple(Int32, String) Regular Tuple with Nullable element \N Nullable(String) diff --git a/tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.reference b/tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.reference new file mode 100644 index 000000000000..3f168fc303dd --- /dev/null +++ b/tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.reference @@ -0,0 +1,76 @@ +-- Regardless of the setting allow_experimental_nullable_tuple_type, the output should be same. +-- The behavior is controlled by `allow_nullable_tuple_in_extracted_subcolumns` from global context. + +-- { echo } + +SET enable_analyzer = 1; +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(v.`Tuple(UInt64, String)`), v.`Tuple(UInt64, String)` FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT 42::Dynamic AS d); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(j.c.:`Tuple(UInt64, String)`), j.c.:`Tuple(UInt64, String)` FROM (SELECT CAST('{"a":1}', 'JSON(a UInt64)') AS j); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT (1, 'x')::Tuple(UInt64, String)::Dynamic AS d); +Tuple(UInt64, String) (1,'x') +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(v.`Tuple(UInt64, String)`), v.`Tuple(UInt64, String)` FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT 42::Dynamic AS d); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(j.c.:`Tuple(UInt64, String)`), j.c.:`Tuple(UInt64, String)` FROM (SELECT CAST('{"a":1}', 'JSON(a UInt64)') AS j); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT (1, 'x')::Tuple(UInt64, String)::Dynamic AS d); +Tuple(UInt64, String) (1,'x') +DROP TABLE IF EXISTS test_variant; +CREATE TABLE test_variant (v Variant(Tuple(UInt64, String), UInt64)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_variant VALUES (CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)')); +INSERT INTO test_variant VALUES (CAST(toUInt64(5), 'Variant(Tuple(UInt64, String), UInt64)')); +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String)'); +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64'); +Nullable(UInt64) 5 +Nullable(UInt64) \N +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)'); +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (1,'x') +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64'); +Nullable(UInt64) 5 +Nullable(UInt64) \N +DROP TABLE test_variant; +DROP TABLE IF EXISTS test_dynamic; +CREATE TABLE test_dynamic (d Dynamic(max_types=1)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_dynamic VALUES (CAST(toUInt64(10), 'Dynamic(max_types=1)')); +INSERT INTO test_dynamic VALUES (CAST(tuple(toUInt64(1), 'x'), 'Dynamic(max_types=1)')); +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))'); +Tuple(Nullable(UInt64), Nullable(String)) (1,'x') +Tuple(Nullable(UInt64), Nullable(String)) (NULL,NULL) +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64'); +Nullable(UInt64) 10 +Nullable(UInt64) \N +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))'); +Tuple(Nullable(UInt64), Nullable(String)) (1,'x') +Tuple(Nullable(UInt64), Nullable(String)) (NULL,NULL) +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64'); +Nullable(UInt64) 10 +Nullable(UInt64) \N +DROP TABLE test_dynamic; +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'LowCardinality(String)')), getSubcolumn(v, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Variant(LowCardinality(String), UInt64) AS v); +LowCardinality(Nullable(String)) x +SELECT toTypeName(getSubcolumn(d, 'LowCardinality(String)')), getSubcolumn(d, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Dynamic AS d); +LowCardinality(Nullable(String)) x +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'LowCardinality(String)')), getSubcolumn(v, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Variant(LowCardinality(String), UInt64) AS v); +LowCardinality(Nullable(String)) x +SELECT toTypeName(getSubcolumn(d, 'LowCardinality(String)')), getSubcolumn(d, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Dynamic AS d); +LowCardinality(Nullable(String)) x diff --git a/tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.sql b/tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.sql new file mode 100644 index 000000000000..c0d3b508555b --- /dev/null +++ b/tests/queries/0_stateless/03913_tuple_inside_nullable_subcolumns.sql @@ -0,0 +1,60 @@ +-- Regardless of the setting allow_experimental_nullable_tuple_type, the output should be same. +-- The behavior is controlled by `allow_nullable_tuple_in_extracted_subcolumns` from global context. + +-- { echo } + +SET enable_analyzer = 1; + +SET allow_experimental_nullable_tuple_type = 0; + +SELECT toTypeName(v.`Tuple(UInt64, String)`), v.`Tuple(UInt64, String)` FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT 42::Dynamic AS d); +SELECT toTypeName(j.c.:`Tuple(UInt64, String)`), j.c.:`Tuple(UInt64, String)` FROM (SELECT CAST('{"a":1}', 'JSON(a UInt64)') AS j); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT (1, 'x')::Tuple(UInt64, String)::Dynamic AS d); + +SET allow_experimental_nullable_tuple_type = 1; + +SELECT toTypeName(v.`Tuple(UInt64, String)`), v.`Tuple(UInt64, String)` FROM (SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT 42::Dynamic AS d); +SELECT toTypeName(j.c.:`Tuple(UInt64, String)`), j.c.:`Tuple(UInt64, String)` FROM (SELECT CAST('{"a":1}', 'JSON(a UInt64)') AS j); +SELECT toTypeName(d.`Tuple(UInt64, String)`), d.`Tuple(UInt64, String)` FROM (SELECT (1, 'x')::Tuple(UInt64, String)::Dynamic AS d); + +DROP TABLE IF EXISTS test_variant; +CREATE TABLE test_variant (v Variant(Tuple(UInt64, String), UInt64)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_variant VALUES (CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)')); +INSERT INTO test_variant VALUES (CAST(toUInt64(5), 'Variant(Tuple(UInt64, String), UInt64)')); + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String)'); +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64'); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'Tuple(UInt64, String)')), getSubcolumn(v, 'Tuple(UInt64, String)'); +SELECT toTypeName(getSubcolumn(v, 'Tuple(UInt64, String).null')), getSubcolumn(v, 'Tuple(UInt64, String).null') FROM test_variant ORDER BY getSubcolumn(v, 'Tuple(UInt64, String).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64') FROM test_variant ORDER BY isNull(getSubcolumn(v, 'UInt64')), getSubcolumn(v, 'UInt64'); +DROP TABLE test_variant; + +DROP TABLE IF EXISTS test_dynamic; +CREATE TABLE test_dynamic (d Dynamic(max_types=1)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_dynamic VALUES (CAST(toUInt64(10), 'Dynamic(max_types=1)')); +INSERT INTO test_dynamic VALUES (CAST(tuple(toUInt64(1), 'x'), 'Dynamic(max_types=1)')); + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))'); +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64'); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String))'); +SELECT toTypeName(getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null')), getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null') FROM test_dynamic ORDER BY getSubcolumn(d, 'Tuple(Nullable(UInt64), Nullable(String)).null'); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64') FROM test_dynamic ORDER BY isNull(getSubcolumn(d, 'UInt64')), getSubcolumn(d, 'UInt64'); +DROP TABLE test_dynamic; + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(getSubcolumn(v, 'LowCardinality(String)')), getSubcolumn(v, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Variant(LowCardinality(String), UInt64) AS v); +SELECT toTypeName(getSubcolumn(d, 'LowCardinality(String)')), getSubcolumn(d, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Dynamic AS d); + +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(getSubcolumn(v, 'LowCardinality(String)')), getSubcolumn(v, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Variant(LowCardinality(String), UInt64) AS v); +SELECT toTypeName(getSubcolumn(d, 'LowCardinality(String)')), getSubcolumn(d, 'LowCardinality(String)') FROM (SELECT CAST('x', 'LowCardinality(String)')::Dynamic AS d); diff --git a/tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.reference b/tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.sql b/tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.sql new file mode 100644 index 000000000000..e82093b4e5b6 --- /dev/null +++ b/tests/queries/0_stateless/03914_dynamic_illegal_subcolumn.sql @@ -0,0 +1,14 @@ +SELECT + toTypeName(getSubcolumn(d, 'Tuple(UInt64, String).null')), + getSubcolumn(d, 'Tuple(UInt64, String).null') +FROM (SELECT 42::Dynamic AS d); -- { serverError ILLEGAL_COLUMN } + +SELECT + toTypeName(getSubcolumn(d, 'Array(UInt64).null')), + getSubcolumn(d, 'Array(UInt64).null') +FROM (SELECT 42::Dynamic AS d); -- { serverError ILLEGAL_COLUMN } + +SELECT + toTypeName(getSubcolumn(d, 'Map(String, UInt64).null')), + getSubcolumn(d, 'Map(String, UInt64).null') +FROM (SELECT 42::Dynamic AS d); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.reference b/tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.reference new file mode 100644 index 000000000000..0f705c2a22f9 --- /dev/null +++ b/tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.reference @@ -0,0 +1,31 @@ +-- Regardless of the setting allow_experimental_nullable_tuple_type, the output should be same. +-- The behavior is controlled by `allow_nullable_tuple_in_extracted_subcolumns` from global context. + +-- { echo } + +SET allow_experimental_nullable_tuple_type = 0; +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +Tuple(UInt64, String) (1,'x') +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Dynamic') AS d); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Dynamic') AS d); +Tuple(UInt64, String) (1,'x') +SET allow_experimental_nullable_tuple_type = 1; +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +Tuple(UInt64, String) (1,'x') +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Dynamic') AS d); +Tuple(UInt64, String) (0,'') +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Dynamic') AS d); +Tuple(UInt64, String) (1,'x') diff --git a/tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.sql b/tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.sql new file mode 100644 index 000000000000..d50fe5c84334 --- /dev/null +++ b/tests/queries/0_stateless/03915_tuple_inside_nullable_variant_dynamic_element.sql @@ -0,0 +1,26 @@ +-- Regardless of the setting allow_experimental_nullable_tuple_type, the output should be same. +-- The behavior is controlled by `allow_nullable_tuple_in_extracted_subcolumns` from global context. + +-- { echo } + +SET allow_experimental_nullable_tuple_type = 0; + +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Dynamic') AS d); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Dynamic') AS d); + +SET allow_experimental_nullable_tuple_type = 1; + +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(variantElement(v, 'Tuple(UInt64, String)')), variantElement(v, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Variant(Tuple(UInt64, String), UInt64)') AS v); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(toUInt64(42), 'Dynamic') AS d); +SELECT toTypeName(dynamicElement(d, 'Tuple(UInt64, String)')), dynamicElement(d, 'Tuple(UInt64, String)') +FROM (SELECT CAST(tuple(toUInt64(1), 'x'), 'Dynamic') AS d); diff --git a/tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.reference b/tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.reference new file mode 100644 index 000000000000..4ff0043f29e3 --- /dev/null +++ b/tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.reference @@ -0,0 +1,40 @@ +-- { echo } + +SET allow_experimental_nullable_tuple_type = 0; +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + json Nullable(JSON( + a UInt32, + b Array(UInt32), + c Nullable(UInt32), + d Tuple(e UInt32, f Nullable(UInt32)) + )) +) ENGINE = Memory; +INSERT INTO test +SELECT NULL +FROM numbers(4); +SELECT json.d AS path, toTypeName(path) FROM test; +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +(0,NULL) Tuple(\n e UInt32,\n f Nullable(UInt32)) +SET allow_experimental_nullable_tuple_type = 1; +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + json Nullable(JSON( + a UInt32, + b Array(UInt32), + c Nullable(UInt32), + d Nullable(Tuple(e UInt32, f Nullable(UInt32))) + )) +) ENGINE = Memory; +INSERT INTO test +SELECT NULL +FROM numbers(4); +SELECT json.d AS path, toTypeName(path) FROM test; +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) +\N Nullable(Tuple(e UInt32, f Nullable(UInt32))) diff --git a/tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.sql b/tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.sql new file mode 100644 index 000000000000..ad89a0a2d83d --- /dev/null +++ b/tests/queries/0_stateless/03916_tuple_inside_nullable_json_subcolumns.sql @@ -0,0 +1,42 @@ +-- { echo } + +SET allow_experimental_nullable_tuple_type = 0; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + json Nullable(JSON( + a UInt32, + b Array(UInt32), + c Nullable(UInt32), + d Tuple(e UInt32, f Nullable(UInt32)) + )) +) ENGINE = Memory; + +INSERT INTO test +SELECT NULL +FROM numbers(4); + +SELECT json.d AS path, toTypeName(path) FROM test; + +SET allow_experimental_nullable_tuple_type = 1; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + json Nullable(JSON( + a UInt32, + b Array(UInt32), + c Nullable(UInt32), + d Nullable(Tuple(e UInt32, f Nullable(UInt32))) + )) +) ENGINE = Memory; + +INSERT INTO test +SELECT NULL +FROM numbers(4); + +SELECT json.d AS path, toTypeName(path) FROM test; + diff --git a/tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.reference b/tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.reference new file mode 100644 index 000000000000..67af267f1621 --- /dev/null +++ b/tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.reference @@ -0,0 +1,4 @@ +Tuple(\n x UInt32,\n y String) (1,'aa') Nullable(UInt32) 1 Nullable(String) aa +Tuple(\n x UInt32,\n y String) (0,'') Nullable(UInt32) \N Nullable(String) \N +Tuple(\n x UInt32,\n y String) (1,'aa') UInt32 1 String aa +Tuple(\n x UInt32,\n y String) (0,'') UInt32 0 String diff --git a/tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.sql b/tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.sql new file mode 100644 index 000000000000..321ccaacc5f1 --- /dev/null +++ b/tests/queries/0_stateless/03917_tuple_inside_nullable_tuple_subcolumns.sql @@ -0,0 +1,24 @@ +SET allow_experimental_nullable_tuple_type = 1; + +DROP TABLE IF EXISTS x; +CREATE TABLE x +( + t Nullable(Tuple(a Tuple(x UInt32, y String), b String)) +) ENGINE = Memory; + +INSERT INTO x VALUES (((1, 'aa'), 'B')), (NULL); + +SELECT + toTypeName(t.a), t.a, + toTypeName(t.a.x), t.a.x, + toTypeName(t.a.y), t.a.y +FROM x; + +SELECT + toTypeName(tupleElement(t, 'a')), + tupleElement(t, 'a'), + toTypeName(tupleElement(tupleElement(t, 'a'), 'x')), + tupleElement(tupleElement(t, 'a'), 'x'), + toTypeName(tupleElement(tupleElement(t, 'a'), 'y')), + tupleElement(tupleElement(t, 'a'), 'y') +FROM x; diff --git a/tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.reference b/tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.reference new file mode 100644 index 000000000000..a5c7dda95af0 --- /dev/null +++ b/tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.reference @@ -0,0 +1,2 @@ +Tuple(UInt64, String) (0,'') +Tuple(UInt64, String) (0,'') diff --git a/tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.sql b/tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.sql new file mode 100644 index 000000000000..8ae1a0510320 --- /dev/null +++ b/tests/queries/0_stateless/03918_allow_nullable_tuple_in_extracted_subcolumns_not_changeable.sql @@ -0,0 +1,22 @@ +-- The setting `allow_nullable_tuple_in_extracted_subcolumns` can only be changed via server restart + +SET enable_analyzer = 1; +SET allow_nullable_tuple_in_extracted_subcolumns = 1; + +SELECT + toTypeName(v.`Tuple(UInt64, String)`), + v.`Tuple(UInt64, String)` +FROM +( + SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v +); + +SET allow_nullable_tuple_in_extracted_subcolumns = 0; + +SELECT + toTypeName(v.`Tuple(UInt64, String)`), + v.`Tuple(UInt64, String)` +FROM +( + SELECT 42::Variant(Tuple(UInt64, String), UInt64) AS v +); From ad23ffca444585925c7e188b084e90299bd965e5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 6 Mar 2026 13:36:15 +0000 Subject: [PATCH 25/53] Backport #98514 to 26.1: Fix unexpected result with read_in_order_use_virtual_row --- .../Optimizations/optimizeReadInOrder.cpp | 2 +- ...ow_conversions_join_column_names.reference | 16 +++++++++++ ...tual_row_conversions_join_column_names.sql | 27 +++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.reference create mode 100644 tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.sql diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index b5f497845de1..06052cfa7784 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -428,7 +428,7 @@ const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::N args.push_back(&dag.addColumn({child->column, child->result_type, child->result_name})); } - return &dag.addFunction(node->function_base, std::move(args), {}); + return &dag.addFunction(node->function_base, std::move(args), node->result_name); } struct SortingInputOrder diff --git a/tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.reference b/tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.reference new file mode 100644 index 000000000000..3d454cd4efb8 --- /dev/null +++ b/tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.reference @@ -0,0 +1,16 @@ +-10 +-1 +0 +0 +1 +10 +- +0 1 +0 1 +0 2 +0 2 +- +0 1 +0 1 +0 2 +0 2 diff --git a/tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.sql b/tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.sql new file mode 100644 index 000000000000..30e1af99e362 --- /dev/null +++ b/tests/queries/0_stateless/04001_virtual_row_conversions_join_column_names.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +SET allow_suspicious_low_cardinality_types = 1; +SET enable_analyzer = 1; +CREATE TABLE t0 (c0 LowCardinality(Int)) ENGINE = MergeTree() ORDER BY (c0); +CREATE TABLE t1 (c0 Nullable(Int)) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO TABLE t0 (c0) VALUES (0), (1); +INSERT INTO TABLE t0 (c0) VALUES (-10), (10); +INSERT INTO TABLE t0 (c0) VALUES (0), (-1); +INSERT INTO TABLE t1 (c0) VALUES (1), (2); + +SET read_in_order_use_virtual_row = 1; + + +SELECT CAST(c0, 'Int32') a FROM t0 ORDER BY a; + +SELECT '-'; +SELECT * FROM t0 JOIN t1 ON t1.c0.null = t0.c0 +ORDER BY t0.c0, t1.c0; + +SELECT '-'; + +SELECT * FROM t0 JOIN t1 ON t1.c0.null = t0.c0 +ORDER BY t0.c0, t1.c0 +SETTINGS join_algorithm = 'full_sorting_merge'; From 64fb4d1115658e28bb9773196ac03c49844874af Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 9 Mar 2026 11:26:20 +0000 Subject: [PATCH 26/53] Backport #98482 to 26.1: Allow `CAST(... AS Nullable(T))` monotonicity to enable read-in-order optimization and primary key pruning --- src/Functions/FunctionsConversion.cpp | 19 +++- .../cast_nullable_read_in_order.xml | 13 +++ ...4002_cast_nullable_read_in_order.reference | 66 ++++++++++++++ .../04002_cast_nullable_read_in_order.sql | 60 ++++++++++++ ...t_nullable_read_in_order_explain.reference | 83 +++++++++++++++++ ...03_cast_nullable_read_in_order_explain.sql | 91 +++++++++++++++++++ ...lable_monotonicity_key_condition.reference | 16 ++++ ...st_nullable_monotonicity_key_condition.sql | 15 +++ 8 files changed, 361 insertions(+), 2 deletions(-) create mode 100644 tests/performance/cast_nullable_read_in_order.xml create mode 100644 tests/queries/0_stateless/04002_cast_nullable_read_in_order.reference create mode 100644 tests/queries/0_stateless/04002_cast_nullable_read_in_order.sql create mode 100644 tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.reference create mode 100644 tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.sql create mode 100644 tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.reference create mode 100644 tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.sql diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index b1dbb07b99fc..f7c47054ff76 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -61,8 +61,23 @@ FunctionBasePtr createFunctionBaseCast( detail::FunctionCast::MonotonicityForRange monotonicity; + auto monotonicity_result_type = recursiveRemoveLowCardinality(return_type); + + /// Monotonicity for CAST is determined by conversion to the nested target type. + /// Nullable only wraps the conversion result and does not change the order of successfully converted values. + /// We remove Nullable here so CAST(..., Nullable(T)) can reuse T monotonicity metadata in optimizeReadInOrder + /// and KeyCondition function-chain analysis. This is not a problem because both of them still validate monotonicity + /// on actual argument types/ranges using getMonotonicityForRange. + /// We do not do this for accurateCastOrNull because failed conversions can produce NULL from non-NULL input values. + /// For example, ordered Float64 values (1.0, 1.1, 1.2, 1.25, 1.3, 1.5) become + /// (1.0, NULL, NULL, 1.25, NULL, 1.5) with accurateCastOrNull(..., 'Float32'). + /// This can violate monotonicity assumptions used by optimizeReadInOrder/KeyCondition + /// and can produce incorrect ORDER BY results. + if (cast_type != CastType::accurateOrNull) + monotonicity_result_type = removeNullable(monotonicity_result_type); + if (isEnum(arguments.front().type) - && castTypeToEither(return_type.get(), [&](auto & type) + && castTypeToEither(monotonicity_result_type.get(), [&](auto & type) { monotonicity = detail::FunctionTo>::Type::Monotonic::get; return true; @@ -74,7 +89,7 @@ FunctionBasePtr createFunctionBaseCast( DataTypeInt8, DataTypeInt16, DataTypeInt32, DataTypeInt64, DataTypeInt128, DataTypeInt256, DataTypeFloat32, DataTypeFloat64, DataTypeDate, DataTypeDate32, DataTypeDateTime, DataTypeDateTime64, DataTypeTime, DataTypeTime64, - DataTypeString>(recursiveRemoveLowCardinality(return_type).get(), [&](auto & type) + DataTypeString>(monotonicity_result_type.get(), [&](auto & type) { monotonicity = detail::FunctionTo>::Type::Monotonic::get; return true; diff --git a/tests/performance/cast_nullable_read_in_order.xml b/tests/performance/cast_nullable_read_in_order.xml new file mode 100644 index 000000000000..f0386174dd0a --- /dev/null +++ b/tests/performance/cast_nullable_read_in_order.xml @@ -0,0 +1,13 @@ + + + 1 + + + CREATE TABLE IF NOT EXISTS test_nullable_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x + INSERT INTO test_nullable_order_by SELECT number, number FROM numbers(30000000) + + SELECT y FROM test_nullable_order_by ORDER BY x::UInt64 LIMIT 100000 FORMAT Null + SELECT y FROM test_nullable_order_by ORDER BY x::Nullable(UInt64) LIMIT 100000 FORMAT Null + + DROP TABLE IF EXISTS test_nullable_order_by + diff --git a/tests/queries/0_stateless/04002_cast_nullable_read_in_order.reference b/tests/queries/0_stateless/04002_cast_nullable_read_in_order.reference new file mode 100644 index 000000000000..50c9407f3ac3 --- /dev/null +++ b/tests/queries/0_stateless/04002_cast_nullable_read_in_order.reference @@ -0,0 +1,66 @@ +-- { echo } + +SET optimize_read_in_order = 1; +DROP TABLE IF EXISTS test_nullable_order_by; +CREATE TABLE test_nullable_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_nullable_order_by SELECT number, number FROM numbers(1e6); +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_order_by ORDER BY x::UInt64 LIMIT 1000000000 +); +499999500000 +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_order_by ORDER BY x::Nullable(UInt64) LIMIT 1000000000 +); +499999500000 +DROP TABLE IF EXISTS test_accurate_cast_or_null; +CREATE TABLE test_accurate_cast_or_null (x Date32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null SELECT toDate32('1969-12-29') + number, number FROM numbers(6); +SELECT x, accurateCastOrNull(x, 'Date') AS d +FROM test_accurate_cast_or_null +ORDER BY accurateCastOrNull(x, 'Date'), x +LIMIT 10; +1970-01-01 1970-01-01 +1970-01-02 1970-01-02 +1970-01-03 1970-01-03 +1969-12-29 2149-06-04 +1969-12-30 2149-06-05 +1969-12-31 2149-06-06 +SELECT x, accurateCastOrNull(toInt64(x), 'Date') AS d, toTypeName(d) +FROM test_accurate_cast_or_null +ORDER BY accurateCastOrNull(toInt64(x), 'Date'), x +LIMIT 10; +1970-01-01 1970-01-01 Nullable(Date) +1970-01-02 1970-01-02 Nullable(Date) +1970-01-03 1970-01-03 Nullable(Date) +1969-12-29 \N Nullable(Date) +1969-12-30 \N Nullable(Date) +1969-12-31 \N Nullable(Date) +DROP TABLE IF EXISTS test_accurate_cast_or_null_float; +CREATE TABLE test_accurate_cast_or_null_float (x Float64, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null_float VALUES + (1.0, 1), (1.1, 2), (1.2, 3), (1.25, 4), (1.3, 5), (1.5, 6); +SELECT x, accurateCastOrNull(x, 'Float32') AS d +FROM test_accurate_cast_or_null_float +ORDER BY accurateCastOrNull(x, 'Float32'), x +LIMIT 10; +1 1 +1.25 1.25 +1.5 1.5 +1.1 \N +1.2 \N +1.3 \N +DROP TABLE IF EXISTS test_nullable_sorting_key_order_by; +CREATE TABLE test_nullable_sorting_key_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x::Nullable(UInt32) SETTINGS allow_nullable_key = 1; +INSERT INTO test_nullable_sorting_key_order_by SELECT number, number FROM numbers(1e6); +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::UInt32 LIMIT 1000000000 +); +499999500000 +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::Nullable(UInt32) LIMIT 1000000000 +); +499999500000 diff --git a/tests/queries/0_stateless/04002_cast_nullable_read_in_order.sql b/tests/queries/0_stateless/04002_cast_nullable_read_in_order.sql new file mode 100644 index 000000000000..cd9ccec1b284 --- /dev/null +++ b/tests/queries/0_stateless/04002_cast_nullable_read_in_order.sql @@ -0,0 +1,60 @@ +-- { echo } + +SET optimize_read_in_order = 1; + +DROP TABLE IF EXISTS test_nullable_order_by; +CREATE TABLE test_nullable_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_nullable_order_by SELECT number, number FROM numbers(1e6); + +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_order_by ORDER BY x::UInt64 LIMIT 1000000000 +); + + +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_order_by ORDER BY x::Nullable(UInt64) LIMIT 1000000000 +); + + +DROP TABLE IF EXISTS test_accurate_cast_or_null; +CREATE TABLE test_accurate_cast_or_null (x Date32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null SELECT toDate32('1969-12-29') + number, number FROM numbers(6); + +SELECT x, accurateCastOrNull(x, 'Date') AS d +FROM test_accurate_cast_or_null +ORDER BY accurateCastOrNull(x, 'Date'), x +LIMIT 10; + +SELECT x, accurateCastOrNull(toInt64(x), 'Date') AS d, toTypeName(d) +FROM test_accurate_cast_or_null +ORDER BY accurateCastOrNull(toInt64(x), 'Date'), x +LIMIT 10; + + +DROP TABLE IF EXISTS test_accurate_cast_or_null_float; +CREATE TABLE test_accurate_cast_or_null_float (x Float64, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null_float VALUES + (1.0, 1), (1.1, 2), (1.2, 3), (1.25, 4), (1.3, 5), (1.5, 6); + +SELECT x, accurateCastOrNull(x, 'Float32') AS d +FROM test_accurate_cast_or_null_float +ORDER BY accurateCastOrNull(x, 'Float32'), x +LIMIT 10; + + +DROP TABLE IF EXISTS test_nullable_sorting_key_order_by; +CREATE TABLE test_nullable_sorting_key_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x::Nullable(UInt32) SETTINGS allow_nullable_key = 1; +INSERT INTO test_nullable_sorting_key_order_by SELECT number, number FROM numbers(1e6); + +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::UInt32 LIMIT 1000000000 +); + + +SELECT sum(y) FROM +( + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::Nullable(UInt32) LIMIT 1000000000 +); diff --git a/tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.reference b/tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.reference new file mode 100644 index 000000000000..dfbb67e70c76 --- /dev/null +++ b/tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.reference @@ -0,0 +1,83 @@ +-- { echo } + +SET optimize_read_in_order = 1; +DROP TABLE IF EXISTS test_nullable_order_by; +CREATE TABLE test_nullable_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_nullable_order_by SELECT number, number FROM numbers(1e6); +SELECT ltrim(explain) +FROM +( + EXPLAIN actions=1 + SELECT y FROM test_nullable_order_by ORDER BY x::UInt64 LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; +ReadType: InOrder +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT y FROM test_nullable_order_by ORDER BY x::Nullable(UInt64) LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; +ReadType: InOrder +DROP TABLE IF EXISTS test_accurate_cast_or_null; +CREATE TABLE test_accurate_cast_or_null (x Date32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null SELECT toDate32('1969-12-29') + number, number FROM numbers(6); +-- Should not use read in order optimization because of `accurateCastOrNull` +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT x, accurateCastOrNull(x, 'Date') AS d + FROM test_accurate_cast_or_null + ORDER BY accurateCastOrNull(x, 'Date') + LIMIT 10 +) +WHERE explain LIKE '%ReadType%'; +ReadType: Default +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT x, accurateCastOrNull(toInt64(x), 'Date') AS d, toTypeName(d) + FROM test_accurate_cast_or_null + ORDER BY accurateCastOrNull(toInt64(x), 'Date') + LIMIT 10 +) +WHERE explain LIKE '%ReadType%'; +ReadType: Default +DROP TABLE IF EXISTS test_accurate_cast_or_null_float; +CREATE TABLE test_accurate_cast_or_null_float (x Float64, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null_float VALUES + (1.0, 1), (1.1, 2), (1.2, 3), (1.25, 4), (1.3, 5), (1.5, 6); +-- Should not use read in order optimization because of `accurateCastOrNull` +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT x, accurateCastOrNull(x, 'Float32') AS d + FROM test_accurate_cast_or_null_float + ORDER BY accurateCastOrNull(x, 'Float32') + LIMIT 10 +) +WHERE explain LIKE '%ReadType%'; +ReadType: Default +DROP TABLE IF EXISTS test_nullable_sorting_key_order_by; +CREATE TABLE test_nullable_sorting_key_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x::Nullable(UInt32) SETTINGS allow_nullable_key = 1; +INSERT INTO test_nullable_sorting_key_order_by SELECT number, number FROM numbers(1e6); +SELECT ltrim(explain) +FROM +( + EXPLAIN actions=1 + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::UInt32 LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; +ReadType: Default +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::Nullable(UInt32) LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; +ReadType: InOrder diff --git a/tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.sql b/tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.sql new file mode 100644 index 000000000000..26adb47f12f4 --- /dev/null +++ b/tests/queries/0_stateless/04003_cast_nullable_read_in_order_explain.sql @@ -0,0 +1,91 @@ +-- { echo } + +SET optimize_read_in_order = 1; + +DROP TABLE IF EXISTS test_nullable_order_by; +CREATE TABLE test_nullable_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_nullable_order_by SELECT number, number FROM numbers(1e6); + +SELECT ltrim(explain) +FROM +( + EXPLAIN actions=1 + SELECT y FROM test_nullable_order_by ORDER BY x::UInt64 LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; + + +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT y FROM test_nullable_order_by ORDER BY x::Nullable(UInt64) LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; + +DROP TABLE IF EXISTS test_accurate_cast_or_null; +CREATE TABLE test_accurate_cast_or_null (x Date32, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null SELECT toDate32('1969-12-29') + number, number FROM numbers(6); + +-- Should not use read in order optimization because of `accurateCastOrNull` +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT x, accurateCastOrNull(x, 'Date') AS d + FROM test_accurate_cast_or_null + ORDER BY accurateCastOrNull(x, 'Date') + LIMIT 10 +) +WHERE explain LIKE '%ReadType%'; + +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT x, accurateCastOrNull(toInt64(x), 'Date') AS d, toTypeName(d) + FROM test_accurate_cast_or_null + ORDER BY accurateCastOrNull(toInt64(x), 'Date') + LIMIT 10 +) +WHERE explain LIKE '%ReadType%'; + + +DROP TABLE IF EXISTS test_accurate_cast_or_null_float; +CREATE TABLE test_accurate_cast_or_null_float (x Float64, y UInt32) ENGINE=MergeTree ORDER BY x; +INSERT INTO test_accurate_cast_or_null_float VALUES + (1.0, 1), (1.1, 2), (1.2, 3), (1.25, 4), (1.3, 5), (1.5, 6); + +-- Should not use read in order optimization because of `accurateCastOrNull` +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT x, accurateCastOrNull(x, 'Float32') AS d + FROM test_accurate_cast_or_null_float + ORDER BY accurateCastOrNull(x, 'Float32') + LIMIT 10 +) +WHERE explain LIKE '%ReadType%'; + + +DROP TABLE IF EXISTS test_nullable_sorting_key_order_by; +CREATE TABLE test_nullable_sorting_key_order_by (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x::Nullable(UInt32) SETTINGS allow_nullable_key = 1; +INSERT INTO test_nullable_sorting_key_order_by SELECT number, number FROM numbers(1e6); + +SELECT ltrim(explain) +FROM +( + EXPLAIN actions=1 + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::UInt32 LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; + + +SELECT ltrim(explain) +FROM +( + EXPLAIN actions = 1 + SELECT y FROM test_nullable_sorting_key_order_by ORDER BY x::Nullable(UInt32) LIMIT 1000000000 +) +WHERE explain LIKE '%ReadType%'; diff --git a/tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.reference b/tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.reference new file mode 100644 index 000000000000..0568323e10d8 --- /dev/null +++ b/tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.reference @@ -0,0 +1,16 @@ +499 +Expression ((Project names + Projection)) + AggregatingProjection + Expression (Before GROUP BY) + Filter ((WHERE + Change column names to column identifiers)) + ReadFromMergeTree (default.test_nullable_filter) + Indexes: + PrimaryKey + Keys: + x + Condition: (CAST(x, \'Nullable(UInt64)\') in [501, +Inf)) + Parts: 1/1 + Granules: 500/1000 + Search Algorithm: binary search + Ranges: 1 + ReadFromPreparedSource (_exact_count_projection) diff --git a/tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.sql b/tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.sql new file mode 100644 index 000000000000..10ce39c4c6dc --- /dev/null +++ b/tests/queries/0_stateless/04004_cast_nullable_monotonicity_key_condition.sql @@ -0,0 +1,15 @@ +-- Tags: no-replicated-database, no-parallel-replicas, no-random-merge-tree-settings +-- EXPLAIN output may differ + +DROP TABLE IF EXISTS test_nullable_filter; +CREATE TABLE test_nullable_filter (x UInt32, y UInt32) ENGINE=MergeTree ORDER BY x SETTINGS index_granularity = 1; +INSERT INTO test_nullable_filter SELECT number, number FROM numbers(1000); + +SELECT count() +FROM test_nullable_filter +WHERE x::Nullable(UInt64) > 500; + +EXPLAIN indexes = 1 +SELECT count() +FROM test_nullable_filter +WHERE x::Nullable(UInt64) > 500; From 37c830bf5a21d187d9101c8452236d2437914955 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 9 Mar 2026 15:33:10 +0000 Subject: [PATCH 27/53] Backport #98755 to 26.1: Fix tuple subcolumn access by name for external tables --- src/Server/TCPHandler.cpp | 2 +- src/Storages/buildQueryTreeForShard.cpp | 2 +- ...4_external_table_tuple_subcolumn.reference | 6 ++++ .../04024_external_table_tuple_subcolumn.sh | 29 +++++++++++++++++++ 4 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/04024_external_table_tuple_subcolumn.reference create mode 100755 tests/queries/0_stateless/04024_external_table_tuple_subcolumn.sh diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 05fdfdd997b7..1b4519e8f38b 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -2472,7 +2472,7 @@ bool TCPHandler::processData(QueryState & state, bool scalar) else { NamesAndTypesList columns = block.getNamesAndTypesList(); - auto temporary_table = TemporaryTableHolder(state.query_context, ColumnsDescription(columns, /*with_subcolumns=*/false), {}); + auto temporary_table = TemporaryTableHolder(state.query_context, ColumnsDescription(columns), {}); storage = temporary_table.getTable(); state.query_context->addExternalTable(temporary_id.table_name, std::move(temporary_table)); } diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index 939dcfdfaa1a..273858fc3982 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -441,7 +441,7 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, auto external_storage_holder = TemporaryTableHolder( mutable_context, - ColumnsDescription(columns, false), + ColumnsDescription(columns), ConstraintsDescription{}, nullptr /*query*/, true /*create_for_global_subquery*/); diff --git a/tests/queries/0_stateless/04024_external_table_tuple_subcolumn.reference b/tests/queries/0_stateless/04024_external_table_tuple_subcolumn.reference new file mode 100644 index 000000000000..8769e079703f --- /dev/null +++ b/tests/queries/0_stateless/04024_external_table_tuple_subcolumn.reference @@ -0,0 +1,6 @@ +Access by name +2e5d8c78-4e4e-488f-84c5-31222482eaa6 2 +Access by index +2e5d8c78-4e4e-488f-84c5-31222482eaa6 2 +Nullable subcolumn +0 diff --git a/tests/queries/0_stateless/04024_external_table_tuple_subcolumn.sh b/tests/queries/0_stateless/04024_external_table_tuple_subcolumn.sh new file mode 100755 index 000000000000..6cf50a3a1128 --- /dev/null +++ b/tests/queries/0_stateless/04024_external_table_tuple_subcolumn.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "Access by name" +echo "('2e5d8c78-4e4e-488f-84c5-31222482eaa6',2)" | ${CLICKHOUSE_CLIENT} \ + --query "SELECT x.a, x.b FROM _data" \ + --external \ + --file=- \ + --name=_data \ + --structure='x Tuple(a UUID, b Int32)' + +echo "Access by index" +echo "('2e5d8c78-4e4e-488f-84c5-31222482eaa6',2)" | ${CLICKHOUSE_CLIENT} \ + --query "SELECT x.1, x.2 FROM _data" \ + --external \ + --file=- \ + --name=_data \ + --structure='x Tuple(a UUID, b Int32)' + +echo "Nullable subcolumn" +echo "1" | ${CLICKHOUSE_CLIENT} \ + --query "SELECT x.null FROM _data" \ + --external \ + --file=- \ + --name=_data \ + --structure='x Nullable(UInt32)' From 4da2ce8bfd9531ff3ec3c677c0d753c02e81749d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 10 Mar 2026 12:25:45 +0000 Subject: [PATCH 28/53] Backport #99107 to 26.1: Fix rebuild of text indexes on merges with TTL --- src/Storages/MergeTree/TextIndexUtils.cpp | 3 ++ ...t_index_empty_block_during_merge.reference | 1 + ...33_text_index_empty_block_during_merge.sql | 29 +++++++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 tests/queries/0_stateless/04033_text_index_empty_block_during_merge.reference create mode 100644 tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql diff --git a/src/Storages/MergeTree/TextIndexUtils.cpp b/src/Storages/MergeTree/TextIndexUtils.cpp index 302ce16a8c72..e7a99005c56c 100644 --- a/src/Storages/MergeTree/TextIndexUtils.cpp +++ b/src/Storages/MergeTree/TextIndexUtils.cpp @@ -132,6 +132,9 @@ IProcessor::Status BuildTextIndexTransform::prepare() void BuildTextIndexTransform::aggregate(const Block & block) { + if (block.rows() == 0) + return; + /// Threshold for the number of processed tokens to flush the segment. /// Calculating used RAM or number of processed unique tokens adds significant overhead, /// so we use a simple trade-off threshold, which is reasonable in normal scenarios. diff --git a/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.reference b/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.reference new file mode 100644 index 000000000000..e87f3b8e91ca --- /dev/null +++ b/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.reference @@ -0,0 +1 @@ +25000 diff --git a/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql b/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql new file mode 100644 index 000000000000..cfe406152b2f --- /dev/null +++ b/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + key UInt32, + text String, + dt DateTime, +) +ENGINE = MergeTree +ORDER BY id +TTL dt + INTERVAL 1 MONTH DELETE WHERE key < 5 +SETTINGS merge_max_block_size = 1024; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab (id, key, text, dt) +SELECT number, number / 5000, 'hello world', toDateTime('2000-01-01 00:00:00') +FROM numbers(50000); + +ALTER TABLE tab ADD INDEX idx_text(text) TYPE text(tokenizer = 'splitByNonAlpha'); + +SYSTEM START MERGES tab; + +OPTIMIZE TABLE tab FINAL; + +SELECT count() FROM tab WHERE hasAllTokens(text, 'hello'); + +DROP TABLE tab; From e914a9053f46a29d4a7a4b0b9e99c274f9ef71d5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 10 Mar 2026 14:29:27 +0000 Subject: [PATCH 29/53] Backport #99084 to 26.1: Try not to re-caclculate indexes in requestReadingInOrder --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 827dd04f3b68..53957d29b185 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -2617,10 +2617,14 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, updateSortDescription(); - /// Re-calculate analysis result to have correct read_type + /// Set correct read_type /// For some reason for projection it breaks aggregation in order, so skip it if (analyzed_result_ptr && !analyzed_result_ptr->readFromProjection()) - selectRangesToRead(); + { + analyzed_result_ptr->read_type = (query_info.input_order_info->direction > 0) + ? ReadType::InOrder + : ReadType::InReverseOrder; + } return true; } From 678cb7bb3e84679c48476405c0bdccf74e3a64fa Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 10 Mar 2026 16:34:41 +0000 Subject: [PATCH 30/53] Backport #99112 to 26.1: Return unknown filter result on non-built ColumnSet --- .../QueryPlan/Optimizations/Utils.cpp | 20 +++++++++ ...t_join_in_subquery_default_value.reference | 6 +++ ...ny_left_join_in_subquery_default_value.sql | 43 +++++++++++++++++++ 3 files changed, 69 insertions(+) create mode 100644 tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.reference create mode 100644 tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.sql diff --git a/src/Processors/QueryPlan/Optimizations/Utils.cpp b/src/Processors/QueryPlan/Optimizations/Utils.cpp index 2338e0e9b7ba..e23e8a76f702 100644 --- a/src/Processors/QueryPlan/Optimizations/Utils.cpp +++ b/src/Processors/QueryPlan/Optimizations/Utils.cpp @@ -1,6 +1,8 @@ #include +#include #include +#include #include #include @@ -70,6 +72,24 @@ FilterResult filterResultForNotMatchedRows( bool allow_unknown_function_arguments ) { + /// If the filter DAG contains IN subquery sets that are not yet built - we cannot evaluate the filter result + for (const auto & node : filter_dag.getNodes()) + { + if (node.type == ActionsDAG::ActionType::COLUMN && node.column) + { + const ColumnSet * column_set = checkAndGetColumnConstData(node.column.get()); + if (!column_set) + column_set = checkAndGetColumn(node.column.get()); + + if (column_set) + { + auto future_set = column_set->getData(); + if (!future_set || !future_set->get()) + return FilterResult::UNKNOWN; + } + } + } + ActionsDAG::IntermediateExecutionResult filter_input; /// Create constant columns with default values for inputs of the filter DAG diff --git a/tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.reference b/tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.reference new file mode 100644 index 000000000000..1a76c31c91c8 --- /dev/null +++ b/tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.reference @@ -0,0 +1,6 @@ +1 1 found +2 0 +3 0 +a a found +b +c diff --git a/tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.sql b/tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.sql new file mode 100644 index 000000000000..a7d44980a561 --- /dev/null +++ b/tests/queries/0_stateless/04034_any_left_join_in_subquery_default_value.sql @@ -0,0 +1,43 @@ +-- ANY LEFT JOIN should not be incorrectly converted to SEMI JOIN +-- when the IN set contains the default value of the right-side column type + +DROP TABLE IF EXISTS t_left; +DROP TABLE IF EXISTS t_right; +DROP TABLE IF EXISTS t_filter; + +CREATE TABLE t_left (id UInt32, value UInt32) ENGINE = MergeTree ORDER BY id; +CREATE TABLE t_right (id UInt32, data String) ENGINE = MergeTree ORDER BY id; +CREATE TABLE t_filter (id UInt32) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_left VALUES (1, 10), (2, 20), (3, 30); +INSERT INTO t_right VALUES (1, 'found'); +INSERT INTO t_filter VALUES (0), (1); + +SELECT t_left.id, t_right.id, t_right.data +FROM t_left ANY LEFT JOIN t_right ON t_left.id = t_right.id +WHERE t_right.id IN (SELECT id FROM t_filter) +ORDER BY t_left.id; + +DROP TABLE IF EXISTS t_left_s; +DROP TABLE IF EXISTS t_right_s; +DROP TABLE IF EXISTS t_filter_s; + +CREATE TABLE t_left_s (id String, value UInt32) ENGINE = MergeTree ORDER BY id; +CREATE TABLE t_right_s (id String, data String) ENGINE = MergeTree ORDER BY id; +CREATE TABLE t_filter_s (id String) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_left_s VALUES ('a', 1), ('b', 2), ('c', 3); +INSERT INTO t_right_s VALUES ('a', 'found'); +INSERT INTO t_filter_s VALUES (''), ('a'); + +SELECT t_left_s.id, t_right_s.id, t_right_s.data +FROM t_left_s ANY LEFT JOIN t_right_s ON t_left_s.id = t_right_s.id +WHERE t_right_s.id IN (SELECT id FROM t_filter_s) +ORDER BY t_left_s.id; + +DROP TABLE t_left; +DROP TABLE t_right; +DROP TABLE t_filter; +DROP TABLE t_left_s; +DROP TABLE t_right_s; +DROP TABLE t_filter_s; From ba86a2de269fbee7695790595f6c74bed355276a Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 11 Mar 2026 00:54:30 +0100 Subject: [PATCH 31/53] Update 04033_text_index_empty_block_during_merge.sql --- .../0_stateless/04033_text_index_empty_block_during_merge.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql b/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql index cfe406152b2f..f675f580545e 100644 --- a/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql +++ b/tests/queries/0_stateless/04033_text_index_empty_block_during_merge.sql @@ -1,5 +1,7 @@ DROP TABLE IF EXISTS tab; +SET enable_full_text_index = 1; + CREATE TABLE tab ( id UInt32, From 84f3972c60d9f2ffbf1e53f846740cb198c549e1 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 11 Mar 2026 12:26:09 +0000 Subject: [PATCH 32/53] Backport #99210 to 26.1: Do not check grants for every query tree node during `InverseDictionaryLookupPass` --- src/Analyzer/Passes/InverseDictionaryLookupPass.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Analyzer/Passes/InverseDictionaryLookupPass.cpp b/src/Analyzer/Passes/InverseDictionaryLookupPass.cpp index bf31d710cc7c..6434fa6b5f78 100644 --- a/src/Analyzer/Passes/InverseDictionaryLookupPass.cpp +++ b/src/Analyzer/Passes/InverseDictionaryLookupPass.cpp @@ -142,12 +142,6 @@ class InverseDictionaryLookupVisitor : public InDepthQueryTreeVisitorWithContext if (getSettings()[Setting::rewrite_in_to_join]) return; - /// This rewrite turns `dictGet(...)` predicates into `IN (SELECT ... FROM dictionary(...))`. - /// The `dictionary()` table function requires `CREATE TEMPORARY TABLE`; if that grant is missing, - /// skip the optimization to avoid `ACCESS_DENIED`. - if (!getContext()->getAccess()->isGranted(AccessType::CREATE_TEMPORARY_TABLE)) - return; - auto * node_function = node->as(); if (!node_function) @@ -301,6 +295,12 @@ class InverseDictionaryLookupVisitor : public InDepthQueryTreeVisitorWithContext void InverseDictionaryLookupPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context) { + /// This rewrite turns `dictGet(...)` predicates into `IN (SELECT ... FROM dictionary(...))`. + /// The `dictionary()` table function requires `CREATE TEMPORARY TABLE`; if that grant is missing, + /// skip the optimization to avoid `ACCESS_DENIED`. + if (!context->getAccess()->isGranted(AccessType::CREATE_TEMPORARY_TABLE)) + return; + InverseDictionaryLookupVisitor visitor(std::move(context)); visitor.visit(query_tree_node); } From 51321def98a18156e26bec9b35710d4b4c8bc414 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 11 Mar 2026 22:18:03 +0000 Subject: [PATCH 33/53] Backport #99205 to 26.1: Fix max_execution_time not being applied for backup/restore --- src/Backups/BackupsWorker.cpp | 17 +++-- src/Common/FailPoint.cpp | 2 + src/Interpreters/InterpreterSetQuery.cpp | 24 ++++++ .../configs/max_execution_time.xml | 7 ++ .../test_backup_restore_new/test.py | 73 +++++++++++++++++++ 5 files changed, 117 insertions(+), 6 deletions(-) create mode 100644 tests/integration/test_backup_restore_new/configs/max_execution_time.xml diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 19e1043d9abe..35647c98c4f5 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -19,6 +19,7 @@ #if CLICKHOUSE_CLOUD #include #endif +#include #include #include #include @@ -70,6 +71,12 @@ namespace ServerSetting extern const ServerSettingsBool shutdown_wait_backups_and_restores; } +namespace FailPoints +{ + extern const char backup_pause_on_start[]; + extern const char restore_pause_on_start[]; +} + namespace ErrorCodes { extern const int ACCESS_DENIED; @@ -384,10 +391,7 @@ struct BackupsWorker::BackupStarter , backup_context(Context::createCopy(query_context)) { backup_settings = BackupSettings::fromBackupQuery(*backup_query); - backup_context->makeQueryContext(); - backup_context->checkSettingsConstraints(backup_settings.core_settings, SettingSource::QUERY); - backup_context->applySettingsChanges(backup_settings.core_settings); backup_info = BackupInfo::fromAST(*backup_query->backup_name); backup_name_for_logging = backup_info.toStringForLogging(); @@ -463,6 +467,8 @@ struct BackupsWorker::BackupStarter void doBackup() { + FailPointInjection::pauseFailPoint(FailPoints::backup_pause_on_start); + chassert(!backup_coordination); if (on_cluster && !is_internal_backup) { @@ -858,10 +864,7 @@ struct BackupsWorker::RestoreStarter , restore_context(Context::createCopy(query_context)) { restore_settings = RestoreSettings::fromRestoreQuery(*restore_query); - restore_context->makeQueryContext(); - restore_context->checkSettingsConstraints(restore_settings.core_settings, SettingSource::QUERY); - restore_context->applySettingsChanges(restore_settings.core_settings); backup_info = BackupInfo::fromAST(*restore_query->backup_name); backup_name_for_logging = backup_info.toStringForLogging(); @@ -917,6 +920,8 @@ struct BackupsWorker::RestoreStarter void doRestore() { + FailPointInjection::pauseFailPoint(FailPoints::restore_pause_on_start); + chassert(!restore_coordination); if (on_cluster && !is_internal_restore) { diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index cc86a875a39a..f47c1303cba4 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -120,6 +120,8 @@ static struct InitFiu REGULAR(slowdown_parallel_replicas_local_plan_read) \ ONCE(iceberg_writes_cleanup) \ ONCE(backup_add_empty_memory_table) \ + PAUSEABLE_ONCE(backup_pause_on_start) \ + PAUSEABLE_ONCE(restore_pause_on_start) \ PAUSEABLE(sc_state_application_pause) \ PAUSEABLE(sc_state_application_pause_after_fetch) \ REGULAR(sc_intentions_commit_fail) \ diff --git a/src/Interpreters/InterpreterSetQuery.cpp b/src/Interpreters/InterpreterSetQuery.cpp index 22ad3bf32871..7bc5e854b1cd 100644 --- a/src/Interpreters/InterpreterSetQuery.cpp +++ b/src/Interpreters/InterpreterSetQuery.cpp @@ -1,7 +1,10 @@ +#include +#include #include #include #include #include +#include #include #include #include @@ -146,6 +149,27 @@ void InterpreterSetQuery::applySettingsFromQuery(const ASTPtr & ast, ContextMuta if (insert_query->settings_ast) InterpreterSetQuery(insert_query->settings_ast, context_).executeForCurrentContext(/* ignore_setting_constraints= */ false); } + else if (const auto * backup_query = ast->as()) + { + /// BACKUP/RESTORE queries store their settings in ASTBackupQuery::settings (not settings_ast), + /// so they are not handled by the settings_ast branch above. + /// We apply only the core query settings here, before ProcessList::insert, + /// so that the ProcessListElement and CancellationChecker see the correct + /// limits from the start. BACKUP/RESTORE-specific settings (async, password, etc.) + /// are filtered out by BackupSettings/RestoreSettings and are not applied to the context. + if (backup_query->settings) + { + SettingsChanges core_settings = (backup_query->kind == ASTBackupQuery::Kind::BACKUP) + ? BackupSettings::fromBackupQuery(*backup_query).core_settings + : RestoreSettings::fromRestoreQuery(*backup_query).core_settings; + + if (!core_settings.empty()) + { + context_->checkSettingsConstraints(core_settings, SettingSource::QUERY); + context_->applySettingsChanges(core_settings); + } + } + } } void registerInterpreterSetQuery(InterpreterFactory & factory) diff --git a/tests/integration/test_backup_restore_new/configs/max_execution_time.xml b/tests/integration/test_backup_restore_new/configs/max_execution_time.xml new file mode 100644 index 000000000000..c6f6986d824f --- /dev/null +++ b/tests/integration/test_backup_restore_new/configs/max_execution_time.xml @@ -0,0 +1,7 @@ + + + + 0.5 + + + diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index e3262a0614db..13775adf6d1a 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -24,6 +24,12 @@ user_configs=["configs/zookeeper_retries.xml"], external_dirs=["/backups/"], ) +instance_with_short_timeout = cluster.add_instance( + "instance_with_short_timeout", + main_configs=["configs/backups_disk.xml"], + user_configs=["configs/max_execution_time.xml"], + external_dirs=["/backups/"], +) def create_and_fill_table(engine="MergeTree", n=100): @@ -2222,3 +2228,70 @@ def test_incremental_backup_with_checksum_data_file_name(): f"RESTORE TABLE test.table AS test.table2 FROM {incremental_backup_name}" ) assert instance.query("SELECT count(), sum(x) FROM test.table2") == "102\t5081\n" + + +def test_async_backup_restore_with_max_execution_time_zero(): + """ + Regression test: async BACKUP/RESTORE with max_execution_time = 0 in query SETTINGS + was incorrectly cancelled by the max_execution_time from the user's profile, even when + the query explicitly set max_execution_time = 0 to disable the timeout. + + Root cause: QueryStatus cached limits.max_execution_time at construction time from the + original query settings (profile value). BackupsWorker called applySettingsChanges() + to apply BACKUP/RESTORE SETTINGS, updating the context — but not the cached + ProcessListElement, so the old profile-level timeout still fired via checkTimeLimit(). + CancellationChecker was also registered with the old timeout at insert time. + + The test uses a PAUSEABLE_ONCE failpoint to stall the background task long enough + for the profile-level timeout (500ms) to fire, which reliably triggers the bug. + The instance_with_short_timeout node has max_execution_time = 0.5 in its default profile. + """ + import time + + inst = instance_with_short_timeout + backup_name = new_backup_name() + inst.query("CREATE DATABASE IF NOT EXISTS test") + inst.query("CREATE TABLE test.table(x UInt32, y String) ENGINE=MergeTree ORDER BY y PARTITION BY x%10") + inst.query("INSERT INTO test.table SELECT number, toString(number) FROM numbers(100)") + + try: + # Pause backup before it starts so the 500ms profile-level timeout fires. + inst.query("SYSTEM ENABLE FAILPOINT backup_pause_on_start") + [backup_id, _] = inst.query( + f"BACKUP TABLE test.table TO {backup_name}" + " SETTINGS async = 1, max_execution_time = 0", + ).split("\t") + + inst.query("SYSTEM WAIT FAILPOINT backup_pause_on_start PAUSE") + time.sleep(0.7) # exceed the 500ms profile-level timeout + inst.query("SYSTEM NOTIFY FAILPOINT backup_pause_on_start") + + assert_eq_with_retry( + inst, + f"SELECT status, error FROM system.backups WHERE id='{backup_id}'", + TSV([["BACKUP_CREATED", ""]]), + ) + + # Same for RESTORE. + inst.query("DROP TABLE test.table") + inst.query("SYSTEM ENABLE FAILPOINT restore_pause_on_start") + [restore_id, _] = inst.query( + f"RESTORE TABLE test.table FROM {backup_name}" + " SETTINGS async = 1, max_execution_time = 0", + ).split("\t") + + inst.query("SYSTEM WAIT FAILPOINT restore_pause_on_start PAUSE") + time.sleep(0.7) + inst.query("SYSTEM NOTIFY FAILPOINT restore_pause_on_start") + + assert_eq_with_retry( + inst, + f"SELECT status, error FROM system.backups WHERE id='{restore_id}'", + TSV([["RESTORED", ""]]), + ) + + assert inst.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + finally: + inst.query("SYSTEM DISABLE FAILPOINT backup_pause_on_start") + inst.query("SYSTEM DISABLE FAILPOINT restore_pause_on_start") + inst.query("DROP DATABASE IF EXISTS test") From dd4c4b2f4c332215166e2c1e91829fb2022f75aa Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 11 Mar 2026 23:17:28 +0000 Subject: [PATCH 34/53] Backport #99086 to 26.1: Fix crash on memory limit happened while applying patches --- .../MergeTree/PatchParts/PatchJoinCache.cpp | 83 ++++++++++++------- .../MergeTree/PatchParts/PatchJoinCache.h | 1 + 2 files changed, 53 insertions(+), 31 deletions(-) diff --git a/src/Storages/MergeTree/PatchParts/PatchJoinCache.cpp b/src/Storages/MergeTree/PatchParts/PatchJoinCache.cpp index c78a82d26a21..92e002f6be61 100644 --- a/src/Storages/MergeTree/PatchParts/PatchJoinCache.cpp +++ b/src/Storages/MergeTree/PatchParts/PatchJoinCache.cpp @@ -169,6 +169,7 @@ std::vector> PatchJoinCache::Entry::addRangesAsync(cons /// Firstly lookup with read lock, because all needed ranges are likely read already. std::shared_lock lock(mutex); + for (const auto & range : ranges) { auto it = ranges_futures.find(range); @@ -190,6 +191,11 @@ std::vector> PatchJoinCache::Entry::addRangesAsync(cons futures.clear(); std::lock_guard lock(mutex); + /// A previous call to addBlock on this entry may have failed, leaving + /// hash_map in an inconsistent state. Rethrow the original error. + if (error) + std::rethrow_exception(error); + for (const auto & range : ranges) { auto it = ranges_futures.find(range); @@ -253,6 +259,11 @@ void PatchJoinCache::Entry::addBlock(Block read_block) std::lock_guard lock(mutex); + /// A previous call to addBlock on this entry may have failed, leaving + /// hash_map in an inconsistent state. Rethrow the original error. + if (error) + std::rethrow_exception(error); + #ifdef DEBUG_OR_SANITIZER_BUILD for (const auto & block : blocks) assertCompatibleHeader(*block_with_data, *block, "patch join cache"); @@ -277,46 +288,56 @@ void PatchJoinCache::Entry::addBlock(Block read_block) /// to optimize the insertion. It gives average complexity of O(1) instead of O(log n). PatchOffsetsMap::const_iterator last_inserted_it; - for (size_t i = 0; i < num_read_rows; ++i) + try { - UInt64 block_number = block_number_column[i]; - UInt64 block_offset = block_offset_column[i]; - - if (block_number != prev_block_number) + for (size_t i = 0; i < num_read_rows; ++i) { - prev_block_number = block_number; - current_offsets = &hash_map[block_number]; - - min_block = std::min(min_block, block_number); - max_block = std::max(max_block, block_number); - last_inserted_it = current_offsets->end(); - } + UInt64 block_number = block_number_column[i]; + UInt64 block_offset = block_offset_column[i]; - /// try_emplace overload with hint doesn't return 'inserted' flag, - /// so we need to check size before and after emplace. - size_t old_size = current_offsets->size(); - auto it = current_offsets->try_emplace(last_inserted_it, block_offset); - last_inserted_it = it; - bool inserted = current_offsets->size() > old_size; + if (block_number != prev_block_number) + { + prev_block_number = block_number; + current_offsets = &hash_map[block_number]; - if (inserted) - { - it->second = std::make_pair(static_cast(new_block_idx), static_cast(i)); - } - else - { - const auto & [patch_block_index, patch_row_index] = it->second; - const auto & existing_version_column = blocks[patch_block_index]->getByPosition(version_column_position).column; + min_block = std::min(min_block, block_number); + max_block = std::max(max_block, block_number); + last_inserted_it = current_offsets->end(); + } - UInt64 current_version = data_version_column[i]; - UInt64 existing_version = assert_cast(*existing_version_column).getData()[patch_row_index]; - chassert(current_version != existing_version); + /// try_emplace overload with hint doesn't return 'inserted' flag, + /// so we need to check size before and after emplace. + size_t old_size = current_offsets->size(); + auto it = current_offsets->try_emplace(last_inserted_it, block_offset); + last_inserted_it = it; + bool inserted = current_offsets->size() > old_size; - /// Keep only the row with the highest version. - if (current_version > existing_version) + if (inserted) + { it->second = std::make_pair(static_cast(new_block_idx), static_cast(i)); + } + else + { + const auto & [patch_block_index, patch_row_index] = it->second; + const auto & existing_version_column = blocks[patch_block_index]->getByPosition(version_column_position).column; + + UInt64 current_version = data_version_column[i]; + UInt64 existing_version = assert_cast(*existing_version_column).getData()[patch_row_index]; + chassert(current_version != existing_version); + + /// Keep only the row with the highest version. + if (current_version > existing_version) + it->second = std::make_pair(static_cast(new_block_idx), static_cast(i)); + } } } + catch (...) + { + /// Mark the entry as poisoned so that concurrent and future callers + /// do not touch the potentially corrupted hash_map. + error = std::current_exception(); + throw; + } } } diff --git a/src/Storages/MergeTree/PatchParts/PatchJoinCache.h b/src/Storages/MergeTree/PatchParts/PatchJoinCache.h index f2d9d63e8e8e..c69f46d33b3a 100644 --- a/src/Storages/MergeTree/PatchParts/PatchJoinCache.h +++ b/src/Storages/MergeTree/PatchParts/PatchJoinCache.h @@ -77,6 +77,7 @@ struct PatchJoinCache UInt64 min_block = std::numeric_limits::max(); UInt64 max_block = 0; + std::exception_ptr error; mutable SharedMutex mutex; void addBlock(Block read_block); From 85bdb6e64ee494ecf50fd8fde9ba1fd4aa9c7cbe Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 13 Mar 2026 15:30:15 +0000 Subject: [PATCH 35/53] Backport #98770 to 26.1: Fix reverseUTF8 exception on invalid UTF-8 input --- src/Functions/reverseUTF8.cpp | 29 +++++++++++-------- .../04027_reverseUTF8_invalid_utf8.reference | 3 ++ .../04027_reverseUTF8_invalid_utf8.sql | 15 ++++++++++ 3 files changed, 35 insertions(+), 12 deletions(-) create mode 100644 tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.reference create mode 100644 tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.sql diff --git a/src/Functions/reverseUTF8.cpp b/src/Functions/reverseUTF8.cpp index 6d9608565393..1399cde637eb 100644 --- a/src/Functions/reverseUTF8.cpp +++ b/src/Functions/reverseUTF8.cpp @@ -46,26 +46,31 @@ struct ReverseUTF8Impl ColumnString::Offset j = prev_offset; while (j < offsets[i]) { + size_t remaining = offsets[i] - j; + + unsigned int char_len; if (data[j] < 0xC0) - { - res_data[offsets[i] + prev_offset - 1 - j] = data[j]; - j += 1; - } + char_len = 1; else if (data[j] < 0xE0) - { - memcpy(&res_data[offsets[i] + prev_offset - 1 - j - 1], &data[j], 2); - j += 2; - } + char_len = 2; else if (data[j] < 0xF0) + char_len = 3; + else + char_len = 4; + + /// If not enough bytes remaining, treat as single byte (invalid UTF-8). + if (char_len > remaining) + char_len = 1; + + if (char_len == 1) { - memcpy(&res_data[offsets[i] + prev_offset - 1 - j - 2], &data[j], 3); - j += 3; + res_data[offsets[i] + prev_offset - 1 - j] = data[j]; } else { - memcpy(&res_data[offsets[i] + prev_offset - 1 - j - 3], &data[j], 4); - j += 4; + memcpy(&res_data[offsets[i] + prev_offset - j - char_len], &data[j], char_len); } + j += char_len; } prev_offset = offsets[i]; diff --git a/tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.reference b/tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.reference new file mode 100644 index 000000000000..1f0eb51aa4f8 --- /dev/null +++ b/tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.reference @@ -0,0 +1,3 @@ +esuoHkcilC +тевирП +はちにんこ diff --git a/tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.sql b/tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.sql new file mode 100644 index 000000000000..0267c46640d4 --- /dev/null +++ b/tests/queries/0_stateless/04027_reverseUTF8_invalid_utf8.sql @@ -0,0 +1,15 @@ +-- Test that reverseUTF8 does not crash on invalid UTF-8 (truncated multi-byte sequences) +SELECT reverseUTF8(unhex('C0')) FORMAT Null; +SELECT reverseUTF8(unhex('E0')) FORMAT Null; +SELECT reverseUTF8(unhex('F0')) FORMAT Null; +SELECT reverseUTF8(unhex('E0A0')) FORMAT Null; +SELECT reverseUTF8(unhex('F09F')) FORMAT Null; +SELECT reverseUTF8(unhex('F09F98')) FORMAT Null; + +-- The original crash query from the AST fuzzer +SELECT DISTINCT reverseUTF8(maxMergeDistinct(x) IGNORE NULLS), toNullable(1) FROM (SELECT DISTINCT dictHas(tuple(toUInt16(NULL)), 13, toUInt32(6), NULL), CAST(concat(unhex('00001000'), randomString(intDiv(1048576, toNullable(1))), toLowCardinality(toFixedString('\0', 1))), 'AggregateFunction(max, String)') AS x) WITH TOTALS FORMAT Null; + +-- Verify correct behavior on valid UTF-8 +SELECT reverseUTF8('ClickHouse'); +SELECT reverseUTF8('Привет'); +SELECT reverseUTF8('こんにちは'); From 3cc0d1ac98515cace070507b63681deae42651a5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 13 Mar 2026 16:24:36 +0000 Subject: [PATCH 36/53] Backport #99081 to 26.1: Fix segfault in recursive CTE with `remote()` + `view()` --- src/Analyzer/Utils.cpp | 2 +- ...028_recursive_cte_remote_view_segfault.reference | 2 ++ .../04028_recursive_cte_remote_view_segfault.sql | 13 +++++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.reference create mode 100644 tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.sql diff --git a/src/Analyzer/Utils.cpp b/src/Analyzer/Utils.cpp index 08d7d186aa2f..f20baabc9c26 100644 --- a/src/Analyzer/Utils.cpp +++ b/src/Analyzer/Utils.cpp @@ -104,7 +104,7 @@ bool isStorageUsedInTree(const StoragePtr & storage, const IQueryTreeNode * root if (table_node || table_function_node) { const auto & table_storage = table_node ? table_node->getStorage() : table_function_node->getStorage(); - if (table_storage->getStorageID() == storage->getStorageID()) + if (table_storage && table_storage->getStorageID() == storage->getStorageID()) return true; } diff --git a/tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.reference b/tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.reference new file mode 100644 index 000000000000..6ed281c757a9 --- /dev/null +++ b/tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.sql b/tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.sql new file mode 100644 index 000000000000..f397bff986a7 --- /dev/null +++ b/tests/queries/0_stateless/04028_recursive_cte_remote_view_segfault.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest +-- Regression test: recursive CTE with remote() + view() used to segfault +-- because isStorageUsedInTree tried to call getStorageID() on an unresolved +-- view() TableFunctionNode whose storage was null. + +SET enable_analyzer=1; + +WITH RECURSIVE x AS ( + (SELECT 1 FROM remote('127.0.0.1', view(SELECT 1))) + UNION ALL + (SELECT 1) +) +SELECT 1 FROM x; From 387b1e759b303e5b644aeaba015f71857c8b91ff Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 16 Mar 2026 09:31:09 +0100 Subject: [PATCH 37/53] Accept `COMMENT` before `AS SELECT` in view parser for forward compatibility Newer ClickHouse versions (26.2+) may format views as: CREATE VIEW ... COMMENT 'text' AS SELECT ... This makes the parser accept `COMMENT` both before and after `AS SELECT`, so views created on newer versions can be loaded by this release. Ref #97843 Co-Authored-By: Claude Opus 4.6 (1M context) --- src/Parsers/ParserCreateQuery.cpp | 7 ++++++- ...04035_forward_compat_comment_before_as_select.reference | 2 ++ .../04035_forward_compat_comment_before_as_select.sql | 5 +++++ 3 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference create mode 100644 tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.sql diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index c5e15beca4e1..a21470a9da45 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -1610,6 +1610,10 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!sql_security) sql_security_p.parse(pos, sql_security, expected); + /// Accept COMMENT before AS SELECT for forward compatibility with newer versions + /// that may format views as: CREATE VIEW ... COMMENT 'text' AS SELECT ... + auto comment = parseComment(pos, expected); + /// AS SELECT ... if (!s_as.ignore(pos, expected)) return false; @@ -1617,7 +1621,8 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec if (!select_p.parse(pos, select, expected)) return false; - auto comment = parseComment(pos, expected); + if (!comment) + comment = parseComment(pos, expected); auto query = make_intrusive(); node = query; diff --git a/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference b/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference new file mode 100644 index 000000000000..11e3c439fa04 --- /dev/null +++ b/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference @@ -0,0 +1,2 @@ +CREATE VIEW v\nAS (SELECT 1)\nCOMMENT 'test' +CREATE MATERIALIZED VIEW v\nENGINE = MergeTree\nORDER BY c\nAS (SELECT 1 AS c)\nCOMMENT 'test' diff --git a/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.sql b/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.sql new file mode 100644 index 000000000000..0ab1146cb143 --- /dev/null +++ b/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.sql @@ -0,0 +1,5 @@ +-- Forward compatibility: accept COMMENT before AS SELECT (syntax produced by 26.2+). +-- This ensures views created on newer versions can be loaded by this version. + +SELECT formatQuery('CREATE VIEW v COMMENT \'test\' AS SELECT 1'); +SELECT formatQuery('CREATE MATERIALIZED VIEW v ENGINE = MergeTree ORDER BY c COMMENT \'test\' AS SELECT 1 AS c'); From 967095930866205891f0cd8e3ae19d00cef71c6b Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 16 Mar 2026 16:36:11 +0000 Subject: [PATCH 38/53] Backport #98980 to 26.1: Fix query tree pass order for `GROUP BY` rewrites --- src/Analyzer/QueryTreePassManager.cpp | 19 ++++-- ...timize_inverse_dictionary_lookup_basic.sql | 1 + .../04032_query_tree_pass_order.reference | 54 +++++++++++++++++ .../04032_query_tree_pass_order.sql | 60 +++++++++++++++++++ 4 files changed, 128 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/04032_query_tree_pass_order.reference create mode 100644 tests/queries/0_stateless/04032_query_tree_pass_order.sql diff --git a/src/Analyzer/QueryTreePassManager.cpp b/src/Analyzer/QueryTreePassManager.cpp index 46e7b63b0423..8e08b931fa49 100644 --- a/src/Analyzer/QueryTreePassManager.cpp +++ b/src/Analyzer/QueryTreePassManager.cpp @@ -291,6 +291,19 @@ void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze) // toString function. manager.addPass(std::make_unique()); + /// These passes can rewrite a predicate so that it depends on base columns instead of + /// the derived expression that appears in `GROUP BY`. They must run before + /// `OptimizeGroupByFunctionKeysPass` and `OptimizeGroupByInjectiveFunctionsPass`, + /// because those passes remove grouping keys based on the current expression tree. + /// For example, `SELECT toYear(d), toYear(d) = 2024, count() FROM ... GROUP BY ALL`: + /// `OptimizeGroupByFunctionKeysPass` can keep only `toYear(d)` as the grouping key, + /// and if `OptimizeDateOrDateTimeConverterWithPreimagePass` runs later it rewrites + /// `toYear(d) = 2024` to a range on raw `d`. After aggregation only `toYear(d)` and + /// `count()` remain, so the final projection would need `d` that is no longer present. + manager.addPass(std::make_unique()); + manager.addPass(std::make_unique()); + manager.addPass(std::make_unique()); + manager.addPass(std::make_unique()); manager.addPass(std::make_unique()); @@ -301,8 +314,6 @@ void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze) manager.addPass(std::make_unique()); manager.addPass(std::make_unique()); - manager.addPass(std::make_unique()); - manager.addPass(std::make_unique()); manager.addPass(std::make_unique()); @@ -318,12 +329,8 @@ void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze) manager.addPass(std::make_unique()); manager.addPass(std::make_unique()); - manager.addPass(std::make_unique()); - manager.addPass(std::make_unique()); - manager.addPass(std::make_unique()); - manager.addPass(std::make_unique()); } diff --git a/tests/queries/0_stateless/03701_optimize_inverse_dictionary_lookup_basic.sql b/tests/queries/0_stateless/03701_optimize_inverse_dictionary_lookup_basic.sql index 218b8e27d84a..191756168d6f 100644 --- a/tests/queries/0_stateless/03701_optimize_inverse_dictionary_lookup_basic.sql +++ b/tests/queries/0_stateless/03701_optimize_inverse_dictionary_lookup_basic.sql @@ -4,6 +4,7 @@ SET enable_analyzer = 1; SET optimize_inverse_dictionary_lookup = 1; SET optimize_or_like_chain = 0; +SET optimize_rewrite_like_perfect_affix = 0; DROP DICTIONARY IF EXISTS colors; DROP TABLE IF EXISTS ref_colors; diff --git a/tests/queries/0_stateless/04032_query_tree_pass_order.reference b/tests/queries/0_stateless/04032_query_tree_pass_order.reference new file mode 100644 index 000000000000..c85fa376b7ca --- /dev/null +++ b/tests/queries/0_stateless/04032_query_tree_pass_order.reference @@ -0,0 +1,54 @@ +-- { echo } + +DROP DICTIONARY IF EXISTS dict; +CREATE DICTIONARY dict +( + id Int64, + f Int64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(QUERY 'SELECT 1 id, 2 f')) +LAYOUT(flat()) +lifetime(0); +SELECT + dictGet(dict, 'f', dummy), + (dictGet(dict, 'f', dummy) = 1), + count() +FROM system.one +GROUP BY all +SETTINGS optimize_inverse_dictionary_lookup=0; +0 0 1 +SELECT + dictGet(dict, 'f', dummy), + (dictGet(dict, 'f', dummy) = 1), + count() +FROM system.one +GROUP BY all +SETTINGS optimize_inverse_dictionary_lookup=1; +0 0 1 +SELECT + toYear(d), + (toYear(d) = 2024), + count() +FROM values('d Date', ('2024-01-02')) +GROUP BY ALL +SETTINGS optimize_time_filter_with_preimage = 0; +2024 1 1 +SELECT + toYear(d), + (toYear(d) = 2024), + count() +FROM values('d Date', ('2024-01-02')) +GROUP BY ALL +SETTINGS optimize_time_filter_with_preimage = 1; +2024 1 1 +SELECT tuple(dummy), (tuple(dummy) = tuple(1)), count() +FROM system.one +GROUP BY ALL +SETTINGS optimize_injective_functions_in_group_by=0; +(0) 0 1 +SELECT tuple(dummy), (tuple(dummy) = tuple(1)), count() +FROM system.one +GROUP BY ALL +SETTINGS optimize_injective_functions_in_group_by=1; +(0) 0 1 diff --git a/tests/queries/0_stateless/04032_query_tree_pass_order.sql b/tests/queries/0_stateless/04032_query_tree_pass_order.sql new file mode 100644 index 000000000000..c608554b3d3a --- /dev/null +++ b/tests/queries/0_stateless/04032_query_tree_pass_order.sql @@ -0,0 +1,60 @@ +-- Tags: no-replicated-database, no-parallel-replicas +-- no-parallel, no-parallel-replicas: Dictionary is not created in parallel replicas. + +-- { echo } + +DROP DICTIONARY IF EXISTS dict; + +CREATE DICTIONARY dict +( + id Int64, + f Int64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(QUERY 'SELECT 1 id, 2 f')) +LAYOUT(flat()) +lifetime(0); + +SELECT + dictGet(dict, 'f', dummy), + (dictGet(dict, 'f', dummy) = 1), + count() +FROM system.one +GROUP BY all +SETTINGS optimize_inverse_dictionary_lookup=0; + +SELECT + dictGet(dict, 'f', dummy), + (dictGet(dict, 'f', dummy) = 1), + count() +FROM system.one +GROUP BY all +SETTINGS optimize_inverse_dictionary_lookup=1; + + +SELECT + toYear(d), + (toYear(d) = 2024), + count() +FROM values('d Date', ('2024-01-02')) +GROUP BY ALL +SETTINGS optimize_time_filter_with_preimage = 0; + +SELECT + toYear(d), + (toYear(d) = 2024), + count() +FROM values('d Date', ('2024-01-02')) +GROUP BY ALL +SETTINGS optimize_time_filter_with_preimage = 1; + + +SELECT tuple(dummy), (tuple(dummy) = tuple(1)), count() +FROM system.one +GROUP BY ALL +SETTINGS optimize_injective_functions_in_group_by=0; + +SELECT tuple(dummy), (tuple(dummy) = tuple(1)), count() +FROM system.one +GROUP BY ALL +SETTINGS optimize_injective_functions_in_group_by=1; From 82a1a0986b97459dca1b4244dd26be4b79bf49d9 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 17 Mar 2026 01:46:20 +0000 Subject: [PATCH 39/53] Backport #99505 to 26.1: Fix incorrect results for `hasAllTokens` with OR across multiple text indexes --- .../MergeTree/MergeTreeReaderTextIndex.cpp | 2 +- ...ndex_direct_read_or_multi_column.reference | 6 ++ ...text_index_direct_read_or_multi_column.sql | 58 +++++++++++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.reference create mode 100644 tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql diff --git a/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp b/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp index 8ca135430e3d..aa7a2ec33af4 100644 --- a/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderTextIndex.cpp @@ -598,7 +598,7 @@ void MergeTreeReaderTextIndex::fillColumn(IColumn & column, const String & colum if (postings.empty() || search_query->tokens.empty()) return; - if (search_query->search_mode == TextSearchMode::Any || postings.size() == 1) + if (search_query->search_mode == TextSearchMode::Any) applyPostingsAny(column, postings, indices_buffer, search_query->tokens, old_size, row_offset, num_rows); else if (search_query->search_mode == TextSearchMode::All) applyPostingsAll(column, postings, indices_buffer, search_query->tokens, old_size, row_offset, num_rows); diff --git a/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.reference b/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.reference new file mode 100644 index 000000000000..b1ae38fe6b26 --- /dev/null +++ b/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.reference @@ -0,0 +1,6 @@ +Test 1 OR neither matches: 0 +Test 2 OR left matches: 1 +Test 3 OR right matches: 1 +Test 4 OR both match: 1 +Test 5 partial token match: 0 +Test 6 hasAnyTokens OR: 1 diff --git a/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql b/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql new file mode 100644 index 000000000000..33fbb2229461 --- /dev/null +++ b/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql @@ -0,0 +1,58 @@ +-- Regression test for a bug where OR of hasAllTokens on different columns +-- with separate text indexes returned incorrect results. +-- The bug was in MergeTreeReaderTextIndex::fillColumn: when postings.size() == 1 +-- it incorrectly used applyPostingsAny instead of applyPostingsAll, ignoring +-- missing tokens in the postings map. + +SET use_skip_indexes = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 1; + +DROP TABLE IF EXISTS texttmp2; + +CREATE TABLE texttmp2 +( + `id` UInt32, + `title` String, + `content` String, + INDEX content_idx content TYPE text(tokenizer = splitByNonAlpha) GRANULARITY 100000000, + INDEX title_idx title TYPE text(tokenizer = splitByNonAlpha) GRANULARITY 100000000 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 8192; + +INSERT INTO texttmp2 VALUES(1, 'qqa rerer', 'qqa rerer'); + +-- Test 1: OR of hasAllTokens on different columns, neither side matches. +-- title does not contain 'abc' and 'def'; content does not contain both 'xyz' and 'qqa'. +SELECT 'Test 1 OR neither matches:', count() +FROM texttmp2 +WHERE hasAllTokens(title, ['abc', 'def']) OR hasAllTokens(content, ['xyz', 'qqa']); + +-- Test 2: Only the left side of OR matches. +SELECT 'Test 2 OR left matches:', count() +FROM texttmp2 +WHERE hasAllTokens(title, ['qqa', 'rerer']) OR hasAllTokens(content, ['xyz', 'abc']); + +-- Test 3: Only the right side of OR matches. +SELECT 'Test 3 OR right matches:', count() +FROM texttmp2 +WHERE hasAllTokens(title, ['abc', 'def']) OR hasAllTokens(content, ['qqa', 'rerer']); + +-- Test 4: Both sides of OR match. +SELECT 'Test 4 OR both match:', count() +FROM texttmp2 +WHERE hasAllTokens(title, ['qqa', 'rerer']) OR hasAllTokens(content, ['qqa', 'rerer']); + +-- Test 5: One token present, one missing in hasAllTokens (the exact bug scenario). +SELECT 'Test 5 partial token match:', count() +FROM texttmp2 +WHERE hasAllTokens(content, ['xyz', 'qqa']); + +-- Test 6: hasAnyTokens with OR (should work correctly). +SELECT 'Test 6 hasAnyTokens OR:', count() +FROM texttmp2 +WHERE hasAnyTokens(title, ['abc', 'def']) OR hasAnyTokens(content, ['xyz', 'qqa']); + +DROP TABLE texttmp2; From 2e6f5761eef2d2e87f5c8342e7f603042e52b1da Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 17 Mar 2026 04:34:00 +0100 Subject: [PATCH 40/53] Fix test reference: `formatQuery` on 26.1 escapes single quotes The `formatQuery` function on the 26.1 branch outputs escaped single quotes (`\'test\'`) unlike master which outputs unescaped (`'test'`). Update the reference file to match the actual output. https://github.com/ClickHouse/ClickHouse/pull/99561 Co-Authored-By: Claude Opus 4.6 (1M context) --- .../04035_forward_compat_comment_before_as_select.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference b/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference index 11e3c439fa04..6353405096a4 100644 --- a/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference +++ b/tests/queries/0_stateless/04035_forward_compat_comment_before_as_select.reference @@ -1,2 +1,2 @@ -CREATE VIEW v\nAS (SELECT 1)\nCOMMENT 'test' -CREATE MATERIALIZED VIEW v\nENGINE = MergeTree\nORDER BY c\nAS (SELECT 1 AS c)\nCOMMENT 'test' +CREATE VIEW v\nAS (SELECT 1)\nCOMMENT \'test\' +CREATE MATERIALIZED VIEW v\nENGINE = MergeTree\nORDER BY c\nAS (SELECT 1 AS c)\nCOMMENT \'test\' From f0d3232a3aa2372ce3ae9b41307af7d423f5b830 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 17 Mar 2026 11:29:57 +0000 Subject: [PATCH 41/53] Backport #99351 to 26.1: Fix CHECK TABLE with sparse serialization inside Tuple with Dynamic --- src/DataTypes/Serializations/SerializationSparse.cpp | 7 +++++-- .../04038_check_table_sparse_tuple_dynamic.reference | 1 + .../04038_check_table_sparse_tuple_dynamic.sql | 12 ++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.reference create mode 100644 tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.sql diff --git a/src/DataTypes/Serializations/SerializationSparse.cpp b/src/DataTypes/Serializations/SerializationSparse.cpp index 9aa2322c52ae..a2c664d02527 100644 --- a/src/DataTypes/Serializations/SerializationSparse.cpp +++ b/src/DataTypes/Serializations/SerializationSparse.cpp @@ -83,7 +83,10 @@ size_t deserializeOffsets( skipped_values_rows = 0; size_t max_rows_to_read = offset + limit; - if (max_rows_to_read && state.num_trailing_defaults >= max_rows_to_read) + if (max_rows_to_read == 0) + return 0; + + if (state.num_trailing_defaults >= max_rows_to_read) { state.num_trailing_defaults -= max_rows_to_read; return limit; @@ -126,7 +129,7 @@ size_t deserializeOffsets( size_t next_total_rows = total_rows + group_size; group_size += state.num_trailing_defaults; - if (max_rows_to_read && next_total_rows >= max_rows_to_read) + if (next_total_rows >= max_rows_to_read) { /// If it was not last group in granule, /// we have to add current non-default value at further reads. diff --git a/tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.reference b/tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.reference new file mode 100644 index 000000000000..2027ea099a8b --- /dev/null +++ b/tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.reference @@ -0,0 +1 @@ +all_1_1_0 1 diff --git a/tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.sql b/tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.sql new file mode 100644 index 000000000000..c406eda00b2e --- /dev/null +++ b/tests/queries/0_stateless/04038_check_table_sparse_tuple_dynamic.sql @@ -0,0 +1,12 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/96588 +-- CHECK TABLE on a Tuple with a Dynamic element and a sparse-serialized element +-- used to fail with "Unexpected size of tuple element" because deserializeOffsets +-- in SerializationSparse treated limit=0 as "read everything" instead of "read nothing". + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple(c1 Dynamic, c2 Tuple(c3 Int))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 1, ratio_of_defaults_for_sparse_serialization = 0.9; +INSERT INTO TABLE t0 (c0) SELECT (1, (number, ), ) FROM numbers(1); +CHECK TABLE t0; + +DROP TABLE t0; From 56fd1dde1ede1368d969203261a305806507c0fc Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 17 Mar 2026 13:21:40 +0100 Subject: [PATCH 42/53] Update 04041_text_index_direct_read_or_multi_column.sql --- .../0_stateless/04041_text_index_direct_read_or_multi_column.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql b/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql index 33fbb2229461..9f91d98baf81 100644 --- a/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql +++ b/tests/queries/0_stateless/04041_text_index_direct_read_or_multi_column.sql @@ -7,6 +7,7 @@ SET use_skip_indexes = 1; SET use_skip_indexes_on_data_read = 1; SET query_plan_direct_read_from_text_index = 1; +SET enable_full_text_index = 1; DROP TABLE IF EXISTS texttmp2; From ecf4cb15e299e7ba888cc4b217bb71fed8db473c Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 17 Mar 2026 15:39:41 +0000 Subject: [PATCH 43/53] Backport #99036 to 26.1: Fix server crash when dropping a patch part after schema change --- .../MergeTree/PatchParts/PatchPartsUtils.cpp | 8 +++ ...4023_issue_98484_drop_patch_part.reference | 1 + .../04023_issue_98484_drop_patch_part.sh | 65 +++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 tests/queries/0_stateless/04023_issue_98484_drop_patch_part.reference create mode 100755 tests/queries/0_stateless/04023_issue_98484_drop_patch_part.sh diff --git a/src/Storages/MergeTree/PatchParts/PatchPartsUtils.cpp b/src/Storages/MergeTree/PatchParts/PatchPartsUtils.cpp index 92c1a768faf9..ec1f69fdc118 100644 --- a/src/Storages/MergeTree/PatchParts/PatchPartsUtils.cpp +++ b/src/Storages/MergeTree/PatchParts/PatchPartsUtils.cpp @@ -68,6 +68,14 @@ StorageMetadataPtr getPatchPartMetadata(ColumnsDescription patch_part_desc, Cont { StorageInMemoryMetadata part_metadata; + /// Ensure patch part system columns are present. + /// They may be missing when creating empty coverage parts + /// (e.g. DROP PART for a patch part), because createEmptyPart + /// only includes data columns from table metadata. + for (const auto & col : getPatchPartSystemColumns()) + if (!patch_part_desc.has(col.name)) + patch_part_desc.add(ColumnDescription(col.name, col.type)); + /// Use hash of column names to put patch parts with different structure to different partitions. auto part_identifier = make_intrusive("_part"); auto columns_hash = getColumnsHash(patch_part_desc.getNamesOfPhysical()); diff --git a/tests/queries/0_stateless/04023_issue_98484_drop_patch_part.reference b/tests/queries/0_stateless/04023_issue_98484_drop_patch_part.reference new file mode 100644 index 000000000000..d86bac9de59a --- /dev/null +++ b/tests/queries/0_stateless/04023_issue_98484_drop_patch_part.reference @@ -0,0 +1 @@ +OK diff --git a/tests/queries/0_stateless/04023_issue_98484_drop_patch_part.sh b/tests/queries/0_stateless/04023_issue_98484_drop_patch_part.sh new file mode 100755 index 000000000000..c416c1405b33 --- /dev/null +++ b/tests/queries/0_stateless/04023_issue_98484_drop_patch_part.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-replicated-database +# Tag no-fasttest: requires lightweight_delete_mode setting + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# Test for issue #98484: DROP PART on patch part should not crash server. +# The bug was that getPatchPartMetadata() built a partition key expression +# referencing _part column, but the ColumnsDescription passed from +# createEmptyPart() only contained data columns, causing UNKNOWN_IDENTIFIER +# inside a NOEXCEPT_SCOPE which triggered std::terminate(). + +${CLICKHOUSE_CLIENT} --query " + CREATE TABLE t_98484 (c0 Int32, c1 String, c2 Int8) + ENGINE = MergeTree() ORDER BY tuple() + SETTINGS enable_block_offset_column = 1, enable_block_number_column = 1 +" + +${CLICKHOUSE_CLIENT} --query "INSERT INTO t_98484 VALUES (1, 'hello', 10)" +${CLICKHOUSE_CLIENT} --query "INSERT INTO t_98484 VALUES (2, 'world', 20)" +${CLICKHOUSE_CLIENT} --query "INSERT INTO t_98484 VALUES (3, 'test', 30)" + +# Create patch parts via lightweight delete +${CLICKHOUSE_CLIENT} --query "SET lightweight_delete_mode = 'lightweight_update_force'; DELETE FROM t_98484 WHERE c0 = 1" + +# Wait for mutations to complete +for _ in $(seq 1 30); do + result=$(${CLICKHOUSE_CLIENT} --query "SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 't_98484' AND name LIKE 'patch-%' AND active = 1") + if [ "$result" -ge 1 ]; then + break + fi + sleep 0.5 +done + +# Add column to change columns description (original trigger condition) +${CLICKHOUSE_CLIENT} --query "ALTER TABLE t_98484 ADD COLUMN c9 Nullable(Bool)" + +# Get the first active patch part name +PATCH_PART=$(${CLICKHOUSE_CLIENT} --query " + SELECT name FROM system.parts + WHERE database = currentDatabase() AND table = 't_98484' + AND name LIKE 'patch-%' AND active = 1 + ORDER BY name LIMIT 1 +") + +if [ -z "$PATCH_PART" ]; then + echo "FAIL: No patch parts found" + exit 1 +fi + +# DROP PART on the patch part - this should not crash the server +${CLICKHOUSE_CLIENT} --query "ALTER TABLE t_98484 DROP PART '$PATCH_PART'" 2>&1 + +# Verify server is still alive +${CLICKHOUSE_CLIENT} --query "SELECT 1" > /dev/null 2>&1 +if [ $? -ne 0 ]; then + echo "FAIL: Server crashed" + exit 1 +fi + +echo "OK" + +${CLICKHOUSE_CLIENT} --query "DROP TABLE t_98484" From ee8cdc023504d0549dc046d3bfd279a091f678ac Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 17 Mar 2026 16:38:22 +0000 Subject: [PATCH 44/53] Backport #99232 to 26.1: Change default stderr_reaction to log_last for executable UDFs --- docs/en/sql-reference/functions/udf.md | 2 + ...alUserDefinedExecutableFunctionsLoader.cpp | 2 +- src/Processors/Sources/ShellCommandSource.cpp | 83 +++++++++++++------ .../functions/test_function_config.xml | 28 ++++++- .../test.py | 51 +++++++++++- 5 files changed, 137 insertions(+), 29 deletions(-) diff --git a/docs/en/sql-reference/functions/udf.md b/docs/en/sql-reference/functions/udf.md index 98804e856927..a564ebdcdf81 100644 --- a/docs/en/sql-reference/functions/udf.md +++ b/docs/en/sql-reference/functions/udf.md @@ -44,6 +44,8 @@ A function configuration contains the following settings: | `execute_direct` | If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder specified by [user_scripts_path](../../operations/server-configuration-parameters/settings.md#user_scripts_path). Additional script arguments can be specified using whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c` | Optional | `1` | | `lifetime` | The reload interval of a function in seconds. If it is set to `0` then the function is not reloaded | Optional | `0` | | `deterministic` | If the function is deterministic (returns the same result for the same input) | Optional | `false` | +| `stderr_reaction` | How to handle the command's stderr output. Values: `none` (ignore), `log` (log all stderr immediately), `log_first` (log first 4 KiB after exit), `log_last` (log last 4 KiB after exit), `throw` (throw exception immediately on any stderr output). When using `log_first` or `log_last` with a non-zero exit code, the stderr content is included in the exception message | Optional | `log_last` | +| `check_exit_code` | If true, ClickHouse will check the exit code of the command. A non-zero exit code causes an exception | Optional | `true` | The command must read arguments from `STDIN` and must output the result to `STDOUT`. The command must process arguments iteratively. That is after processing a chunk of arguments it must wait for the next chunk. diff --git a/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp b/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp index 90fb3c22d5be..1d719d1577a6 100644 --- a/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp +++ b/src/Functions/UserDefined/ExternalUserDefinedExecutableFunctionsLoader.cpp @@ -181,7 +181,7 @@ ExternalLoader::LoadableMutablePtr ExternalUserDefinedExecutableFunctionsLoader: size_t command_read_timeout_milliseconds = config.getUInt64(key_in_config + ".command_read_timeout", 10000); size_t command_write_timeout_milliseconds = config.getUInt64(key_in_config + ".command_write_timeout", 10000); ExternalCommandStderrReaction stderr_reaction - = parseExternalCommandStderrReaction(config.getString(key_in_config + ".stderr_reaction", "throw")); + = parseExternalCommandStderrReaction(config.getString(key_in_config + ".stderr_reaction", "log_last")); bool check_exit_code = config.getBool(key_in_config + ".check_exit_code", true); size_t pool_size = 0; diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index 8c1d4d7876d8..bde146f55fb7 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -214,43 +214,50 @@ class TimeoutReadBufferFromFileDescriptor : public BufferWithOwnMemorysize() : 0; - while (current_size < MAX_STDERR_SIZE) + while (true) { pfds[1].revents = 0; int stderr_events = pollWithTimeout(&pfds[1], 1, STDERR_DRAIN_TIMEOUT_MS); if (stderr_events <= 0) break; - if (pfds[1].revents > 0) + if (pfds[1].revents <= 0) + break; + + if (stderr_read_buf == nullptr) + stderr_read_buf.reset(new char[BUFFER_SIZE]); + ssize_t stderr_res = ::read(stderr_fd, stderr_read_buf.get(), BUFFER_SIZE); + if (stderr_res <= 0) + break; + + std::string_view str(stderr_read_buf.get(), stderr_res); + if (stderr_reaction == ExternalCommandStderrReaction::THROW) { - if (stderr_read_buf == nullptr) - stderr_read_buf.reset(new char[BUFFER_SIZE]); - ssize_t stderr_res = ::read(stderr_fd, stderr_read_buf.get(), BUFFER_SIZE); - if (stderr_res <= 0) + size_t current_size = stderr_full_output ? stderr_full_output->size() : 0; + if (current_size >= MAX_STDERR_SIZE) break; - if (!stderr_full_output) stderr_full_output.emplace(); - std::string_view str(stderr_read_buf.get(), stderr_res); size_t bytes_to_append = std::min(static_cast(stderr_res), MAX_STDERR_SIZE - current_size); stderr_full_output->append(str.begin(), str.begin() + bytes_to_append); - current_size = stderr_full_output->size(); } - else + else if (stderr_reaction == ExternalCommandStderrReaction::LOG_FIRST) { - break; + ssize_t to_insert = std::min(ssize_t(stderr_result_buf.reserve()), stderr_res); + if (to_insert > 0) + stderr_result_buf.insert(stderr_result_buf.end(), str.begin(), str.begin() + to_insert); + } + else if (stderr_reaction == ExternalCommandStderrReaction::LOG_LAST) + { + stderr_result_buf.insert(stderr_result_buf.end(), str.begin(), str.begin() + stderr_res); } } - - /// Don't throw here - let prepare() handle stderr exception after all reads complete - /// This ensures we capture complete stderr and throw at the right time } break; } @@ -299,9 +306,22 @@ class TimeoutReadBufferFromFileDescriptor : public BufferWithOwnMemory= configuration.number_of_rows_to_read; + if (process_pool) + { + bool valid_command + = configuration.read_fixed_number_of_rows && current_read_rows >= configuration.number_of_rows_to_read; - // We can only wait for pooled commands when they are invalid. - if (!valid_command) + // We can only wait for pooled commands when they are invalid. + if (!valid_command) + command->wait(); + } + else command->wait(); } - else - command->wait(); + catch (Exception & e) + { + /// Enrich exit code exception with buffered stderr content (LOG_FIRST/LOG_LAST modes) + String stderr_content = timeout_command_out.consumeBufferedStderr(); + if (!stderr_content.empty()) + e.addMessage("Stderr: {}", stderr_content); + throw; + } } rethrowExceptionDuringSendDataIfNeeded(); diff --git a/tests/integration/test_executable_user_defined_function/functions/test_function_config.xml b/tests/integration/test_executable_user_defined_function/functions/test_function_config.xml index 2db02325c434..b836119e98ea 100644 --- a/tests/integration/test_executable_user_defined_function/functions/test_function_config.xml +++ b/tests/integration/test_executable_user_defined_function/functions/test_function_config.xml @@ -406,7 +406,33 @@ TabSeparated input_python_exception.py - + throw + + + + + executable + test_function_python_exception_log_last + String + + UInt64 + + TabSeparated + input_python_exception.py + log_last + + + + + executable + test_function_stderr_default_reaction + String + + String + + TabSeparated + input_always_error.py + diff --git a/tests/integration/test_executable_user_defined_function/test.py b/tests/integration/test_executable_user_defined_function/test.py index 65b1d1b6a02d..79b43f28061a 100644 --- a/tests/integration/test_executable_user_defined_function/test.py +++ b/tests/integration/test_executable_user_defined_function/test.py @@ -395,7 +395,7 @@ def test_executable_function_query_cache(started_cluster): node.query("SYSTEM DROP QUERY CACHE"); def test_executable_function_python_exception_in_query_log(started_cluster): - '''Test that Python exceptions with tracebacks appear in query_log when stderr_reaction defaults to throw''' + '''Test that Python exceptions with tracebacks appear in query_log when stderr_reaction is configured as throw''' skip_test_msan(node) # Clear query log @@ -440,3 +440,52 @@ def test_executable_function_python_exception_in_query_log(started_cluster): for component in required_components: assert component in exception_text, f"Missing required component: {component}" + + +def test_executable_function_default_stderr_reaction(started_cluster): + '''Test that UDFs writing to stderr succeed under default stderr_reaction (log_last) when exit code is 0''' + skip_test_msan(node) + + # input_always_error.py writes "Fake error" to stderr but exits 0 + # With default stderr_reaction (log_last), this should NOT throw + assert node.query("SELECT test_function_stderr_default_reaction('abc')") == "Key abc\n" + + +def test_executable_function_python_exception_log_last_in_query_log(started_cluster): + '''Test that stderr content appears in exception when exit code != 0 under log_last mode''' + skip_test_msan(node) + + node.query("SYSTEM FLUSH LOGS") + + query_id = uuid.uuid4().hex + + try: + node.query("SELECT test_function_python_exception_log_last(1)", query_id=query_id) + assert False, "Exception should have been thrown" + except Exception as ex: + assert "DB::Exception" in str(ex) + # Under log_last mode, exit code exception is enriched with stderr + assert "Child process was exited with return code 1" in str(ex) + + node.query("SYSTEM FLUSH LOGS") + + result = node.query(f""" + SELECT exception + FROM system.query_log + WHERE query_id = '{query_id}' + AND type IN ('ExceptionBeforeStart', 'ExceptionWhileProcessing') + FORMAT TabSeparated + """) + + exception_text = TSV(result).lines[0] + + # Verify stderr content is included in the exit code exception + required_components = [ + "Stderr:", + "in process_data", + "result = int(value) / 0", + "ZeroDivisionError: division by zero", + ] + + for component in required_components: + assert component in exception_text, f"Missing required component: {component}" From dcbea01abc6c45c3b8e1298d744e19fe9e82ca0e Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 18 Mar 2026 13:49:40 +0000 Subject: [PATCH 45/53] Backport #99661 to 26.1: Fix reading of text index with lightweight deletes and row policies --- .../MergeTree/MergeTreeRangeReader.cpp | 26 +++++----- src/Storages/MergeTree/MergeTreeRangeReader.h | 8 +-- src/Storages/MergeTree/MergeTreeReadTask.cpp | 26 ++++++---- .../MergeTree/MergeTreeReaderIndex.cpp | 4 +- src/Storages/MergeTree/MergeTreeReaderIndex.h | 7 ++- .../MergeTree/MergeTreeSequentialSource.cpp | 2 +- .../PatchParts/MergeTreePatchReader.cpp | 2 +- ...42_text_index_lightweight_delete.reference | 1 + .../04042_text_index_lightweight_delete.sql | 50 +++++++++++++++++++ 9 files changed, 93 insertions(+), 33 deletions(-) create mode 100644 tests/queries/0_stateless/04042_text_index_lightweight_delete.reference create mode 100644 tests/queries/0_stateless/04042_text_index_lightweight_delete.sql diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 9da2e2286122..885a8467f415 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -560,7 +560,7 @@ void MergeTreeRangeReader::ReadResult::applyFilter(const FilterWithCachedCount & LOG_TEST(log, "ReadResult::applyFilter() num_rows after: {}", num_rows); } -void MergeTreeRangeReader::ReadResult::optimize(const FilterWithCachedCount & current_filter, bool can_read_incomplete_granules, bool must_apply_filter) +void MergeTreeRangeReader::ReadResult::optimize(const FilterWithCachedCount & current_filter, bool can_read_incomplete_granules_, bool must_apply_filter) { checkInternalConsistency(); @@ -587,7 +587,7 @@ void MergeTreeRangeReader::ReadResult::optimize(const FilterWithCachedCount & cu return; NumRows zero_tails; - auto total_zero_rows_in_tails = countZeroTails(filter.getData(), zero_tails, can_read_incomplete_granules); + auto total_zero_rows_in_tails = countZeroTails(filter.getData(), zero_tails, can_read_incomplete_granules_); LOG_TEST(log, "ReadResult::optimize() before: {}", dumpInfo()); @@ -699,7 +699,7 @@ void MergeTreeRangeReader::ReadResult::optimize(const FilterWithCachedCount & cu } } -size_t MergeTreeRangeReader::ReadResult::countZeroTails(const IColumn::Filter & filter_vec, NumRows & zero_tails, bool can_read_incomplete_granules) const +size_t MergeTreeRangeReader::ReadResult::countZeroTails(const IColumn::Filter & filter_vec, NumRows & zero_tails, bool can_read_incomplete_granules_) const { zero_tails.resize(0); zero_tails.reserve(rows_per_granule.size()); @@ -712,7 +712,7 @@ size_t MergeTreeRangeReader::ReadResult::countZeroTails(const IColumn::Filter & { /// Count the number of zeros at the end of filter for rows were read from current granule. size_t zero_tail = numZerosInTail(filter_data, filter_data + rows_to_read); - if (!can_read_incomplete_granules && zero_tail != rows_to_read) + if (!can_read_incomplete_granules_ && zero_tail != rows_to_read) zero_tail = 0; zero_tails.push_back(zero_tail); total_zero_rows_in_tails += zero_tails.back(); @@ -886,12 +886,14 @@ MergeTreeRangeReader::MergeTreeRangeReader( Block prev_reader_header_, const PrewhereExprStep * prewhere_info_, ReadStepPerformanceCountersPtr performance_counters_, - bool main_reader_) + bool main_reader_, + bool can_read_incomplete_granules_) : merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part_info_for_read->getIndexGranularity())) , prewhere_info(prewhere_info_) , performance_counters(std::move(performance_counters_)) , main_reader(main_reader_) + , can_read_incomplete_granules(can_read_incomplete_granules_) { result_sample_block = std::move(prev_reader_header_); @@ -1056,10 +1058,10 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::startReadingChain(size_t size_t current_space = space_left; - /// If reader can't read part of granule, we have to increase number of reading rows - /// to read complete granules and exceed max_rows a bit. - /// When using query condition cache, you need to ensure that the read Mark is complete. - if (use_query_condition_cache || !merge_tree_reader->canReadIncompleteGranules()) + /// If any reader in chain can't read part of granule, or query condition cache + /// requires complete marks, we have to increase number of reading rows to read + /// complete granules and exceed max_rows a bit. + if (use_query_condition_cache || !can_read_incomplete_granules) current_space = stream.ceilRowsToCompleteGranules(space_left); auto rows_to_read = std::min(current_space, stream.numPendingRowsInCurrentGranule()); @@ -1099,7 +1101,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::startReadingChain(size_t { auto current_filter = FilterWithCachedCount(result.columns.front()); result.columns.clear(); - result.optimize(current_filter, merge_tree_reader->canReadIncompleteGranules(), merge_tree_reader->mustApplyFilter()); + result.optimize(current_filter, can_read_incomplete_granules, merge_tree_reader->mustApplyFilter()); } else { @@ -1508,7 +1510,7 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r /// to only output those rows from this reader to the next Sorting step. bool is_vector_search = merge_tree_reader->data_part_info_for_read->getReadHints().vector_search_results.has_value(); if (is_vector_search && (part_offsets_filter_for_vector_search.size() == result.num_rows)) - result.optimize(part_offsets_filter_for_vector_search, merge_tree_reader->canReadIncompleteGranules(), false); + result.optimize(part_offsets_filter_for_vector_search, can_read_incomplete_granules, false); if (!prewhere_info || prewhere_info->type == PrewhereExprStep::None) return; @@ -1588,7 +1590,7 @@ void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & r result.columns.erase(result.columns.begin() + filter_column_pos); FilterWithCachedCount current_filter(current_step_filter); - result.optimize(current_filter, merge_tree_reader->canReadIncompleteGranules(), false); + result.optimize(current_filter, can_read_incomplete_granules, false); if (prewhere_info->need_filter && !result.filterWasApplied()) { diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 8c8460adf7e1..cd50b9ad1eca 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -169,7 +169,8 @@ class MergeTreeRangeReader Block prev_reader_header_, const PrewhereExprStep * prewhere_info_, ReadStepPerformanceCountersPtr performance_counters_, - bool main_reader_); + bool main_reader_, + bool can_read_incomplete_granules_); MergeTreeRangeReader() = default; @@ -330,7 +331,7 @@ class MergeTreeRangeReader /// Add current step filter to the result and then for each granule calculate the number of filtered rows at the end. /// Remove them and update filter. /// Apply the filter to the columns and update num_rows if required - void optimize(const FilterWithCachedCount & current_filter, bool can_read_incomplete_granules, bool must_apply_filter); + void optimize(const FilterWithCachedCount & current_filter, bool can_read_incomplete_granules_, bool must_apply_filter); /// Remove all rows from granules. void clear(); @@ -410,7 +411,7 @@ class MergeTreeRangeReader /// Builds updated filter by cutting zeros in granules tails void collapseZeroTails(const IColumn::Filter & filter, const NumRows & rows_per_granule_previous, IColumn::Filter & new_filter) const; - size_t countZeroTails(const IColumn::Filter & filter, NumRows & zero_tails, bool can_read_incomplete_granules) const; + size_t countZeroTails(const IColumn::Filter & filter, NumRows & zero_tails, bool can_read_incomplete_granules_) const; static size_t numZerosInTail(const UInt8 * begin, const UInt8 * end); LoggerPtr log; @@ -453,6 +454,7 @@ class MergeTreeRangeReader ReadStepPerformanceCountersPtr performance_counters; bool main_reader = false; /// Whether it is the main reader or one of the readers for prewhere steps + bool can_read_incomplete_granules = false; /// Combined flag: true only if ALL readers in the chain support incomplete granules LoggerPtr log = getLogger("MergeTreeRangeReader"); }; diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 3128556d662b..e3f71c03f294 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -228,6 +228,15 @@ MergeTreeReadersChain MergeTreeReadTask::createReadersChain( size_t num_readers = prewhere_actions.steps.size() + task_readers.prewhere.size() + 1; range_readers.reserve(num_readers); + /// Compute a combined flag: true only if ALL readers in the chain support incomplete granules. + /// This ensures that the first reader in the chain (which decides batch boundaries) does not + /// create mid-mark boundaries when a later reader cannot handle them. + bool can_read_incomplete_granules = task_readers.main->canReadIncompleteGranules() + && std::ranges::all_of(task_readers.prewhere, [](const auto & reader) + { + return reader->canReadIncompleteGranules(); + }); + if (task_readers.prepared_index) { range_readers.emplace_back( @@ -235,7 +244,8 @@ MergeTreeReadersChain MergeTreeReadTask::createReadersChain( Block{}, /*prewhere_info_=*/ nullptr, read_steps_performance_counters.getCounterForIndexStep(), - /*main_reader_=*/ false); + /*main_reader_=*/ false, + can_read_incomplete_granules); } size_t counter_idx = 0; @@ -246,7 +256,8 @@ MergeTreeReadersChain MergeTreeReadTask::createReadersChain( range_readers.empty() ? Block{} : range_readers.back().getSampleBlock(), prewhere_actions.steps[i].get(), read_steps_performance_counters.getCountersForStep(counter_idx++), - /*main_reader_=*/ false); + /*main_reader_=*/ false, + can_read_incomplete_granules); } if (!task_readers.main->getColumns().empty()) @@ -256,7 +267,8 @@ MergeTreeReadersChain MergeTreeReadTask::createReadersChain( range_readers.empty() ? Block{} : range_readers.back().getSampleBlock(), /*prewhere_info_=*/ nullptr, read_steps_performance_counters.getCountersForStep(counter_idx), - /*main_reader_=*/ true); + /*main_reader_=*/ true, + can_read_incomplete_granules); } return MergeTreeReadersChain{std::move(range_readers), task_readers.patches}; @@ -303,13 +315,7 @@ void MergeTreeReadTask::initializeIndexReader(const MergeTreeIndexBuildContextPt if (index_read_result || lazy_materializing_rows) { - bool can_read_incomplete_granules = readers.main->canReadIncompleteGranules() - && std::ranges::all_of(readers.prewhere, [](const auto & reader) - { - return reader->canReadIncompleteGranules(); - }); - - readers.prepared_index = std::make_unique(readers.main.get(), std::move(index_read_result), part_rows, can_read_incomplete_granules); + readers.prepared_index = std::make_unique(readers.main.get(), std::move(index_read_result), part_rows); } } diff --git a/src/Storages/MergeTree/MergeTreeReaderIndex.cpp b/src/Storages/MergeTree/MergeTreeReaderIndex.cpp index 623d5468b4a3..63b648b8edf2 100644 --- a/src/Storages/MergeTree/MergeTreeReaderIndex.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderIndex.cpp @@ -10,7 +10,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -MergeTreeReaderIndex::MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_, bool can_read_incomplete_granules_) +MergeTreeReaderIndex::MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_) : IMergeTreeReader( main_reader_->data_part_info_for_read, {}, @@ -23,7 +23,7 @@ MergeTreeReaderIndex::MergeTreeReaderIndex(const IMergeTreeReader * main_reader_ main_reader_->settings) , index_read_result(std::move(index_read_result_)) , lazy_materializing_rows(lazy_materializing_rows_) - , can_read_incomplete_granules(can_read_incomplete_granules_) + , main_reader(main_reader_) { chassert(lazy_materializing_rows || index_read_result); chassert(lazy_materializing_rows || index_read_result->skip_index_read_result || index_read_result->projection_index_read_result); diff --git a/src/Storages/MergeTree/MergeTreeReaderIndex.h b/src/Storages/MergeTree/MergeTreeReaderIndex.h index 476b7aacad51..68f37585c80e 100644 --- a/src/Storages/MergeTree/MergeTreeReaderIndex.h +++ b/src/Storages/MergeTree/MergeTreeReaderIndex.h @@ -20,7 +20,7 @@ class MergeTreeReaderIndex : public IMergeTreeReader public: using MatchingMarks = std::vector; - MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_, bool can_read_incomplete_granules_); + MergeTreeReaderIndex(const IMergeTreeReader * main_reader_, MergeTreeIndexReadResultPtr index_read_result_, const PaddedPODArray * lazy_materializing_rows_); size_t readRows( size_t from_mark, @@ -30,7 +30,7 @@ class MergeTreeReaderIndex : public IMergeTreeReader size_t offset, Columns & res_columns) override; - bool canReadIncompleteGranules() const override { return can_read_incomplete_granules; } + bool canReadIncompleteGranules() const override { return main_reader->canReadIncompleteGranules(); } bool canSkipMark(size_t mark, size_t current_task_last_mark) override; @@ -45,8 +45,7 @@ class MergeTreeReaderIndex : public IMergeTreeReader const PaddedPODArray * lazy_materializing_rows = nullptr; - /// Determines if reading incomplete index granules is supported. - bool can_read_incomplete_granules; + const IMergeTreeReader * main_reader; /// Current row position used when continuing reads across multiple calls. size_t current_row = 0; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index b2b0862ce87e..ec217cab81c8 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -189,7 +189,7 @@ MergeTreeSequentialSource::MergeTreeSequentialSource( auto counters = std::make_shared(); - MergeTreeRangeReader range_reader(readers.main.get(), {}, nullptr, counters, true); + MergeTreeRangeReader range_reader(readers.main.get(), {}, nullptr, counters, true, readers.main->canReadIncompleteGranules()); readers_chain = MergeTreeReadersChain{{std::move(range_reader)}, readers.patches}; updateRowsToRead(0); diff --git a/src/Storages/MergeTree/PatchParts/MergeTreePatchReader.cpp b/src/Storages/MergeTree/PatchParts/MergeTreePatchReader.cpp index a292619c1f9a..77bf8c166da0 100644 --- a/src/Storages/MergeTree/PatchParts/MergeTreePatchReader.cpp +++ b/src/Storages/MergeTree/PatchParts/MergeTreePatchReader.cpp @@ -33,7 +33,7 @@ namespace ErrorCodes MergeTreePatchReader::MergeTreePatchReader(PatchPartInfoForReader patch_part_, MergeTreeReaderPtr reader_) : patch_part(std::move(patch_part_)) , reader(std::move(reader_)) - , range_reader(reader.get(), {}, nullptr, std::make_shared(), false) + , range_reader(reader.get(), {}, nullptr, std::make_shared(), false, reader->canReadIncompleteGranules()) { } diff --git a/tests/queries/0_stateless/04042_text_index_lightweight_delete.reference b/tests/queries/0_stateless/04042_text_index_lightweight_delete.reference new file mode 100644 index 000000000000..8bd1af11bf28 --- /dev/null +++ b/tests/queries/0_stateless/04042_text_index_lightweight_delete.reference @@ -0,0 +1 @@ +2000 diff --git a/tests/queries/0_stateless/04042_text_index_lightweight_delete.sql b/tests/queries/0_stateless/04042_text_index_lightweight_delete.sql new file mode 100644 index 000000000000..90ecf374c324 --- /dev/null +++ b/tests/queries/0_stateless/04042_text_index_lightweight_delete.sql @@ -0,0 +1,50 @@ +-- Reproduces a bug where MergeTreeReaderTextIndex::readRows overruns past the +-- final mark when the text index reader is non-first in the reader chain. + +SET enable_full_text_index = 1; + +DROP TABLE IF EXISTS t_text_index_lwd_bug; + +CREATE TABLE t_text_index_lwd_bug +( + id UInt64, + body String, + created_at DateTime, + INDEX fts_body body TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + index_granularity = 8192, + index_granularity_bytes = '10M'; + +INSERT INTO t_text_index_lwd_bug +SELECT + number, + concat('document ', toString(number), if(number % 100 = 0, ' vector', '')), + toDateTime('2024-01-01 00:00:00') - toIntervalSecond(number) +FROM numbers(200000); + +OPTIMIZE TABLE t_text_index_lwd_bug FINAL; + +-- Lightweight delete: creates _row_exists column, makes text index reader non-first. +DELETE FROM t_text_index_lwd_bug WHERE id % 1000 = 999; + +-- Full scan that reaches the end of the part. +-- The _row_exists reader is first (canReadIncompleteGranules=true), +-- text index reader is second (canReadIncompleteGranules=false). +-- max_block_size=65505 is not a multiple of index_granularity=8192, +-- so batch boundaries fall mid-mark, triggering the drift. +SELECT count() +FROM t_text_index_lwd_bug +WHERE hasToken(body, 'vector') AND created_at >= (toDateTime('2024-01-01 00:00:00') - toIntervalMonth(1)) +SETTINGS + enable_full_text_index = 1, + use_skip_indexes = 1, + use_query_condition_cache = 0, + query_plan_direct_read_from_text_index = 1, + use_skip_indexes_on_data_read = 1, + max_threads = 1, + max_block_size = 65505; + +DROP TABLE t_text_index_lwd_bug; From e679eeadcea50011121e64e8a5a2eec018753699 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 18 Mar 2026 17:36:39 +0000 Subject: [PATCH 46/53] Backport #99678 to 26.1: Fix incorrect seek in AsynchronousReadBufferFromFileDescriptor with O_DIRECT --- ...ynchronousReadBufferFromFileDescriptor.cpp | 9 +++-- ...AsynchronousReadBufferFromFileDescriptor.h | 2 +- ...4041_variant_read_with_direct_io.reference | 3 ++ .../04041_variant_read_with_direct_io.sh | 37 +++++++++++++++++++ 4 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/04041_variant_read_with_direct_io.reference create mode 100755 tests/queries/0_stateless/04041_variant_read_with_direct_io.sh diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp index 67c84d028ff8..699b9e629ba4 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp @@ -203,7 +203,7 @@ off_t AsynchronousReadBufferFromFileDescriptor::seek(off_t offset, int whence) } else if (whence == SEEK_CUR) { - new_pos = file_offset_of_buffer_end - (working_buffer.end() - pos) + offset; + new_pos = static_cast(getPosition()) + offset; } else { @@ -211,13 +211,15 @@ off_t AsynchronousReadBufferFromFileDescriptor::seek(off_t offset, int whence) } /// Position is unchanged. - if (new_pos + (working_buffer.end() - pos) == file_offset_of_buffer_end) + if (new_pos == static_cast(getPosition())) return new_pos; bool read_from_prefetch = false; while (true) { - if (file_offset_of_buffer_end - working_buffer.size() <= new_pos && new_pos <= file_offset_of_buffer_end) + if (bytes_to_ignore == 0 + && file_offset_of_buffer_end - working_buffer.size() <= new_pos + && new_pos <= file_offset_of_buffer_end) { /// Position is still inside the buffer. /// Probably it is at the end of the buffer - then we will load data on the following 'next' call. @@ -283,6 +285,7 @@ void AsynchronousReadBufferFromFileDescriptor::rewind() working_buffer.resize(0); pos = working_buffer.begin(); file_offset_of_buffer_end = 0; + bytes_to_ignore = 0; } std::optional AsynchronousReadBufferFromFileDescriptor::tryGetFileSize() diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.h b/src/IO/AsynchronousReadBufferFromFileDescriptor.h index e15a41474256..fe4fa55d886f 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.h +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.h @@ -62,7 +62,7 @@ class AsynchronousReadBufferFromFileDescriptor : public ReadBufferFromFileBase off_t getPosition() override { - return file_offset_of_buffer_end - (working_buffer.end() - pos); + return file_offset_of_buffer_end - (working_buffer.end() - pos) + bytes_to_ignore; } /// If 'offset' is small enough to stay in buffer after seek, then true seek in file does not happen. diff --git a/tests/queries/0_stateless/04041_variant_read_with_direct_io.reference b/tests/queries/0_stateless/04041_variant_read_with_direct_io.reference new file mode 100644 index 000000000000..3cd40e317c88 --- /dev/null +++ b/tests/queries/0_stateless/04041_variant_read_with_direct_io.reference @@ -0,0 +1,3 @@ +500000 +100000 +100000 diff --git a/tests/queries/0_stateless/04041_variant_read_with_direct_io.sh b/tests/queries/0_stateless/04041_variant_read_with_direct_io.sh new file mode 100755 index 000000000000..d50fa6d943b2 --- /dev/null +++ b/tests/queries/0_stateless/04041_variant_read_with_direct_io.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Tags: long + +# Regression test for incorrect seek in AsynchronousReadBufferFromFileDescriptor +# with O_DIRECT (min_bytes_to_use_direct_io=1). The bug was that getPosition() +# and seek NOOP/in-buffer checks did not account for bytes_to_ignore set by +# O_DIRECT alignment, causing corrupted reads of Variant subcolumns. + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_suspicious_variant_types=1 --max_threads 2 --min_bytes_to_use_direct_io 1" + +$CH_CLIENT -q "drop table if exists test_variant_direct_io;" + +$CH_CLIENT -q "create table test_variant_direct_io (id UInt64, v Variant(String, UInt64, LowCardinality(String), Tuple(a UInt32, b UInt32), Array(UInt64))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity_bytes=10485760, index_granularity=8192;" + +$CH_CLIENT -mq "insert into test_variant_direct_io select number, NULL from numbers(100000); +insert into test_variant_direct_io select number + 100000, number from numbers(100000); +insert into test_variant_direct_io select number + 200000, ('str_' || toString(number))::Variant(String) from numbers(100000); +insert into test_variant_direct_io select number + 300000, ('lc_str_' || toString(number))::LowCardinality(String) from numbers(100000); +insert into test_variant_direct_io select number + 400000, tuple(number, number + 1)::Tuple(a UInt32, b UInt32) from numbers(100000); +insert into test_variant_direct_io select number + 500000, range(number % 20 + 1)::Array(UInt64) from numbers(100000);" + +$CH_CLIENT -q "optimize table test_variant_direct_io final settings mutations_sync=1;" + +# Without the fix, reading v.String here would fail with: +# "Size of deserialized variant column less than the limit" +$CH_CLIENT -q "select v.String from test_variant_direct_io format Null;" + +# Also check that subcolumn reads return the correct count +$CH_CLIENT -q "select count() from test_variant_direct_io where v is not null;" +$CH_CLIENT -q "select count() from test_variant_direct_io where v.String is not null;" +$CH_CLIENT -q "select count() from test_variant_direct_io where v.UInt64 is not null;" + +$CH_CLIENT -q "drop table test_variant_direct_io;" From 0fde68115f10c3be12459e9b1960ffff2cc8b487 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 18 Mar 2026 18:34:44 +0000 Subject: [PATCH 47/53] Backport #99587 to 26.1: Prune unused columns from ARRAY JOIN --- .../Passes/PruneArrayJoinColumnsPass.cpp | 441 ++++++++++++++++++ .../Passes/PruneArrayJoinColumnsPass.h | 34 ++ src/Analyzer/QueryTreePassManager.cpp | 2 + ...03285_analyzer_array_join_nested.reference | 26 +- .../04039_prune_array_join_columns.reference | 242 ++++++++++ .../04039_prune_array_join_columns.sql | 52 +++ 6 files changed, 783 insertions(+), 14 deletions(-) create mode 100644 src/Analyzer/Passes/PruneArrayJoinColumnsPass.cpp create mode 100644 src/Analyzer/Passes/PruneArrayJoinColumnsPass.h create mode 100644 tests/queries/0_stateless/04039_prune_array_join_columns.reference create mode 100644 tests/queries/0_stateless/04039_prune_array_join_columns.sql diff --git a/src/Analyzer/Passes/PruneArrayJoinColumnsPass.cpp b/src/Analyzer/Passes/PruneArrayJoinColumnsPass.cpp new file mode 100644 index 000000000000..610384caa5b2 --- /dev/null +++ b/src/Analyzer/Passes/PruneArrayJoinColumnsPass.cpp @@ -0,0 +1,441 @@ +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +#include + +namespace DB +{ + +namespace Setting +{ + +extern const SettingsBool enable_unaligned_array_join; + +} + +namespace +{ + +/// Per-expression usage state inside a single ARRAY JOIN node. +struct ExpressionUsage +{ + /// True when the expression is referenced directly (not via tupleElement), + /// meaning all subcolumns are needed, or it has no nested() inner function. + bool fully_used = false; + + /// Used subcolumn names (only meaningful for nested() expressions). + std::unordered_set used_subcolumns; + + /// Subcolumn names from the nested() first argument. Empty if not a nested() expression. + std::vector nested_subcolumn_names; + + bool hasNested() const { return !nested_subcolumn_names.empty(); } + + bool isUsed() const { return fully_used || !used_subcolumns.empty(); } +}; + +/// Key: (ArrayJoinNode raw ptr, column name) → ExpressionUsage. +using ArrayJoinUsageMap = std::unordered_map>; + +/// Set of ArrayJoinNode raw pointers that we are tracking. +using ArrayJoinNodeSet = std::unordered_set; + +/// Map: (ArrayJoinNode raw ptr, column name) → post-pruning column DataType. +using UpdatedTypeMap = std::unordered_map>; + +/// Visitor that marks which ARRAY JOIN expressions and nested subcolumns are used. +class MarkUsedArrayJoinColumnsVisitor : public InDepthQueryTreeVisitorWithContext +{ +public: + using Base = InDepthQueryTreeVisitorWithContext; + + MarkUsedArrayJoinColumnsVisitor(ContextPtr context_, ArrayJoinUsageMap & usage_map_, const ArrayJoinNodeSet & tracked_nodes_) + : Base(std::move(context_)) + , usage_map(usage_map_) + , tracked_nodes(tracked_nodes_) + { + } + + bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child) + { + /// Skip visiting the join expressions list of a tracked ARRAY JOIN node — + /// those are the expression definitions, not references. + if (tracked_nodes.contains(parent.get())) + { + auto * array_join_node = parent->as(); + if (array_join_node && child.get() == array_join_node->getJoinExpressionsNode().get()) + return false; + } + + /// If the parent is tupleElement whose first argument is a column from a tracked + /// ARRAY JOIN, skip visiting children — enterImpl already handles the tupleElement + /// by marking only the specific subcolumn. + auto * function_node = parent->as(); + if (function_node && function_node->getFunctionName() == "tupleElement") + { + const auto & arguments = function_node->getArguments().getNodes(); + if (arguments.size() >= 2) + { + if (auto * column_node = arguments[0]->as()) + { + auto source = column_node->getColumnSourceOrNull(); + if (source && tracked_nodes.contains(source.get())) + return false; + } + } + } + + return true; + } + + void enterImpl(const QueryTreeNodePtr & node) + { + /// Case 1: tupleElement(array_join_col, 'subcolumn_name') — mark specific subcolumn. + if (auto * function_node = node->as()) + { + if (function_node->getFunctionName() != "tupleElement") + return; + + const auto & arguments = function_node->getArguments().getNodes(); + if (arguments.size() < 2) + return; + + auto * column_node = arguments[0]->as(); + auto * constant_node = arguments[1]->as(); + if (!column_node || !constant_node) + return; + + auto source = column_node->getColumnSourceOrNull(); + if (!source || !tracked_nodes.contains(source.get())) + return; + + auto map_it = usage_map.find(source.get()); + if (map_it == usage_map.end()) + return; + + auto expr_it = map_it->second.find(column_node->getColumnName()); + if (expr_it == map_it->second.end() || expr_it->second.fully_used) + return; + + if (expr_it->second.hasNested()) + { + const auto & value = constant_node->getValue(); + if (value.getType() == Field::Types::String) + { + expr_it->second.used_subcolumns.insert(value.safeGet()); + } + else if (value.getType() == Field::Types::UInt64) + { + /// tupleElement uses 1-based indexing. + UInt64 index = value.safeGet(); + if (index >= 1 && index <= expr_it->second.nested_subcolumn_names.size()) + expr_it->second.used_subcolumns.insert(expr_it->second.nested_subcolumn_names[index - 1]); + else + expr_it->second.fully_used = true; + } + else + { + expr_it->second.fully_used = true; + } + } + else + expr_it->second.fully_used = true; + + return; + } + + /// Case 2: direct reference to an ARRAY JOIN column — mark fully used. + auto * column_node = node->as(); + if (!column_node) + return; + + auto source = column_node->getColumnSourceOrNull(); + if (!source || !tracked_nodes.contains(source.get())) + return; + + auto map_it = usage_map.find(source.get()); + if (map_it == usage_map.end()) + return; + + auto expr_it = map_it->second.find(column_node->getColumnName()); + if (expr_it != map_it->second.end()) + expr_it->second.fully_used = true; + } + +private: + ArrayJoinUsageMap & usage_map; + const ArrayJoinNodeSet & tracked_nodes; +}; + +void pruneNestedFunctionArguments( + ColumnNode & column_node, + FunctionNode & function_node, + const ExpressionUsage & expr_usage, + const ContextPtr & context) +{ + auto & nested_args = function_node.getArguments().getNodes(); + const auto & subcolumn_names = expr_usage.nested_subcolumn_names; + size_t num_subcolumns = subcolumn_names.size(); + + /// Find which indices to keep. + std::vector indices_to_keep; + for (size_t i = 0; i < num_subcolumns; ++i) + { + if (expr_usage.used_subcolumns.contains(subcolumn_names[i])) + indices_to_keep.push_back(i); + } + + /// Nothing to prune. + if (indices_to_keep.size() == num_subcolumns) + return; + + /// Keep at least one subcolumn so the expression remains valid. + if (indices_to_keep.empty()) + indices_to_keep.push_back(0); + + /// Build pruned names array and arguments. + Array pruned_names_array; + QueryTreeNodes pruned_args; + pruned_names_array.reserve(indices_to_keep.size()); + pruned_args.reserve(indices_to_keep.size() + 1); + + for (size_t idx : indices_to_keep) + pruned_names_array.push_back(subcolumn_names[idx]); + + auto pruned_names_type = std::make_shared(std::make_shared()); + pruned_args.push_back(std::make_shared(std::move(pruned_names_array), std::move(pruned_names_type))); + + for (size_t idx : indices_to_keep) + pruned_args.push_back(nested_args[idx + 1]); /// +1: first arg is the names array. + + nested_args = std::move(pruned_args); + + /// Re-resolve the function to update its return type. + auto nested_function = FunctionFactory::instance().get("nested", context); + function_node.resolveAsFunction(nested_function->build(function_node.getArgumentColumns())); + + /// Update the ARRAY JOIN column node's type to match the new result. + auto new_result_type = function_node.getResultType(); + auto new_column_type = assert_cast(*new_result_type).getNestedType(); + column_node.setColumnType(std::move(new_column_type)); +} + +/// Visitor that updates reference ColumnNode types and re-resolves tupleElement functions +/// after nested() arguments have been pruned. +class UpdateArrayJoinReferenceTypesVisitor : public InDepthQueryTreeVisitorWithContext +{ +public: + using Base = InDepthQueryTreeVisitorWithContext; + + UpdateArrayJoinReferenceTypesVisitor( + ContextPtr context_, + const UpdatedTypeMap & updated_types_, + const ArrayJoinNodeSet & tracked_nodes_) + : Base(std::move(context_)) + , updated_types(updated_types_) + , tracked_nodes(tracked_nodes_) + { + } + + void enterImpl(const QueryTreeNodePtr & node) + { + auto * function_node = node->as(); + if (!function_node || function_node->getFunctionName() != "tupleElement") + return; + + const auto & arguments = function_node->getArguments().getNodes(); + if (arguments.size() < 2) + return; + + auto * column_node = arguments[0]->as(); + if (!column_node) + return; + + auto source = column_node->getColumnSourceOrNull(); + if (!source || !tracked_nodes.contains(source.get())) + return; + + auto node_it = updated_types.find(source.get()); + if (node_it == updated_types.end()) + return; + + auto type_it = node_it->second.find(column_node->getColumnName()); + if (type_it == node_it->second.end()) + return; + + const auto & new_type = type_it->second; + if (column_node->getColumnType()->equals(*new_type)) + return; + + column_node->setColumnType(new_type); + + auto tuple_element_function = FunctionFactory::instance().get("tupleElement", getContext()); + function_node->resolveAsFunction(tuple_element_function->build(function_node->getArgumentColumns())); + } + +private: + const UpdatedTypeMap & updated_types; + const ArrayJoinNodeSet & tracked_nodes; +}; + +} + +void PruneArrayJoinColumnsPass::run(QueryTreeNodePtr & query_tree_node, ContextPtr context) +{ + auto * top_query_node = query_tree_node->as(); + if (!top_query_node) + return; + + const auto & settings = context->getSettingsRef(); + if (settings[Setting::enable_unaligned_array_join]) + return; + + /// Step 1: Find all ARRAY JOIN nodes and build the usage map. + ArrayJoinUsageMap usage_map; + ArrayJoinNodeSet tracked_nodes; + + auto table_expressions = extractTableExpressions(top_query_node->getJoinTree(), /*add_array_join=*/ true); + for (const auto & table_expr : table_expressions) + { + auto * array_join_node = table_expr->as(); + if (!array_join_node) + continue; + + auto & expressions_usage = usage_map[table_expr.get()]; + + for (const auto & join_expr : array_join_node->getJoinExpressions().getNodes()) + { + auto * column_node = join_expr->as(); + if (!column_node || !column_node->hasExpression()) + continue; + + ExpressionUsage expr_usage; + + auto * function_node = column_node->getExpression()->as(); + if (function_node && function_node->getFunctionName() == "nested") + { + const auto & args = function_node->getArguments().getNodes(); + if (args.size() >= 2) + { + if (auto * names_constant = args[0]->as()) + { + const auto & names_array = names_constant->getValue().safeGet(); + for (const auto & name : names_array) + expr_usage.nested_subcolumn_names.push_back(name.safeGet()); + } + } + } + + expressions_usage[column_node->getColumnName()] = std::move(expr_usage); + } + + if (!expressions_usage.empty()) + tracked_nodes.insert(table_expr.get()); + } + + if (tracked_nodes.empty()) + return; + + /// Step 2: Mark used expressions and subcolumns. + MarkUsedArrayJoinColumnsVisitor visitor(context, usage_map, tracked_nodes); + visitor.visit(query_tree_node); + + /// Step 3: Prune. + UpdatedTypeMap updated_types; + + for (auto & [node_ptr, expressions_usage] : usage_map) + { + /// Find the ArrayJoinNode among our table_expressions. + ArrayJoinNode * array_join_node = nullptr; + for (const auto & te : table_expressions) + { + if (te.get() == node_ptr) + { + array_join_node = te->as(); + break; + } + } + if (!array_join_node) + continue; + + auto & join_expressions = array_join_node->getJoinExpressions().getNodes(); + + /// 3a: Remove entire unused ARRAY JOIN expressions. + { + QueryTreeNodes kept; + kept.reserve(join_expressions.size()); + + for (auto & join_expr : join_expressions) + { + auto * column_node = join_expr->as(); + if (!column_node) + { + kept.push_back(std::move(join_expr)); + continue; + } + + auto expr_it = expressions_usage.find(column_node->getColumnName()); + if (expr_it == expressions_usage.end() || expr_it->second.isUsed()) + kept.push_back(std::move(join_expr)); + } + + /// Keep at least one expression to preserve row multiplication. + if (kept.empty() && !join_expressions.empty()) + kept.push_back(std::move(join_expressions[0])); + + join_expressions = std::move(kept); + } + + /// 3b: Prune unused nested() subcolumn arguments. + for (auto & join_expr : join_expressions) + { + auto * column_node = join_expr->as(); + if (!column_node || !column_node->hasExpression()) + continue; + + auto expr_it = expressions_usage.find(column_node->getColumnName()); + if (expr_it == expressions_usage.end()) + continue; + + auto & expr_usage = expr_it->second; + if (expr_usage.fully_used || !expr_usage.hasNested()) + continue; + + auto * function_node = column_node->getExpression()->as(); + if (!function_node) + continue; + + pruneNestedFunctionArguments(*column_node, *function_node, expr_usage, context); + } + + /// Collect post-pruning types for step 3c. + auto & type_map = updated_types[node_ptr]; + for (const auto & join_expr : join_expressions) + { + auto * col_node = join_expr->as(); + if (!col_node) + continue; + type_map[col_node->getColumnName()] = col_node->getColumnType(); + } + } + + /// 3c: Update types of reference ColumnNodes and re-resolve tupleElement functions. + UpdateArrayJoinReferenceTypesVisitor type_updater(context, updated_types, tracked_nodes); + type_updater.visit(query_tree_node); +} + +} diff --git a/src/Analyzer/Passes/PruneArrayJoinColumnsPass.h b/src/Analyzer/Passes/PruneArrayJoinColumnsPass.h new file mode 100644 index 000000000000..f265f2743346 --- /dev/null +++ b/src/Analyzer/Passes/PruneArrayJoinColumnsPass.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +namespace DB +{ + +/** Prune unused ARRAY JOIN expressions and unused subcolumns from `nested()` functions. + * + * 1. If ARRAY JOIN has multiple expressions and some are not referenced + * anywhere in the query, remove the unused expressions. + * + * 2. When a Nested column is used in ARRAY JOIN, the analyzer creates a `nested()` + * function with ALL subcolumns as arguments. This pass removes arguments that + * are not referenced, so that only the needed subcolumns are read from storage. + * + * Example 1: SELECT b FROM t ARRAY JOIN a, b => ARRAY JOIN b + * + * Example 2: Table has n.a, n.b, n.c. + * SELECT n.a FROM t ARRAY JOIN n + * Before: ARRAY JOIN nested(['a','b','c'], n.a, n.b, n.c) AS n + * After: ARRAY JOIN nested(['a'], n.a) AS n + */ +class PruneArrayJoinColumnsPass final : public IQueryTreePass +{ +public: + String getName() override { return "PruneArrayJoinColumns"; } + + String getDescription() override { return "Prune unused ARRAY JOIN expressions and nested() subcolumns"; } + + void run(QueryTreeNodePtr & query_tree_node, ContextPtr context) override; +}; + +} diff --git a/src/Analyzer/QueryTreePassManager.cpp b/src/Analyzer/QueryTreePassManager.cpp index 8e08b931fa49..bc988b740182 100644 --- a/src/Analyzer/QueryTreePassManager.cpp +++ b/src/Analyzer/QueryTreePassManager.cpp @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -266,6 +267,7 @@ void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze) /// This pass should be run for the secondary queries /// to ensure that the only required columns are read from VIEWs on the shards. manager.addPass(std::make_unique()); + manager.addPass(std::make_unique()); manager.addPass(std::make_unique()); manager.addPass(std::make_unique()); diff --git a/tests/queries/0_stateless/03285_analyzer_array_join_nested.reference b/tests/queries/0_stateless/03285_analyzer_array_join_nested.reference index bd66e2daf4d5..787a1d8c2fa2 100644 --- a/tests/queries/0_stateless/03285_analyzer_array_join_nested.reference +++ b/tests/queries/0_stateless/03285_analyzer_array_join_nested.reference @@ -12,7 +12,7 @@ QUERY id: 0 FUNCTION id: 2, function_name: tupleElement, function_type: ordinary, result_type: String ARGUMENTS LIST id: 3, nodes: 2 - COLUMN id: 4, column_name: __array_join_exp_1, result_type: Tuple(names String, values Int64), source_id: 5 + COLUMN id: 4, column_name: __array_join_exp_1, result_type: Tuple(names String), source_id: 5 CONSTANT id: 6, constant_value: \'names\', constant_value_type: String JOIN TREE ARRAY_JOIN id: 5, is_left: 0 @@ -20,18 +20,17 @@ QUERY id: 0 TABLE id: 7, alias: __table2, table_name: default.hourly JOIN EXPRESSIONS LIST id: 8, nodes: 1 - COLUMN id: 9, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(names String, values Int64), source_id: 5 + COLUMN id: 9, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(names String), source_id: 5 EXPRESSION - FUNCTION id: 10, function_name: nested, function_type: ordinary, result_type: Array(Tuple(names String, values Int64)) + FUNCTION id: 10, function_name: nested, function_type: ordinary, result_type: Array(Tuple(names String)) ARGUMENTS - LIST id: 11, nodes: 3 - CONSTANT id: 12, constant_value: Array_[\'names\', \'values\'], constant_value_type: Array(String) + LIST id: 11, nodes: 2 + CONSTANT id: 12, constant_value: Array_[\'names\'], constant_value_type: Array(String) COLUMN id: 13, column_name: metric.names, result_type: Array(String), source_id: 7 - COLUMN id: 14, column_name: metric.values, result_type: Array(Int64), source_id: 7 SELECT tupleElement(__array_join_exp_1, \'names\') AS `metric.names` FROM default.hourly AS __table2 -ARRAY JOIN nested(_CAST([\'names\', \'values\'], \'Array(String)\'), __table2.`metric.names`, __table2.`metric.values`) AS __array_join_exp_1 +ARRAY JOIN nested(_CAST([\'names\'], \'Array(String)\'), __table2.`metric.names`) AS __array_join_exp_1 explain query tree dump_ast = 1 SELECT metric.names @@ -44,7 +43,7 @@ QUERY id: 0 FUNCTION id: 2, function_name: tupleElement, function_type: ordinary, result_type: String ARGUMENTS LIST id: 3, nodes: 2 - COLUMN id: 4, column_name: __array_join_exp_1, result_type: Tuple(names String, values Int64), source_id: 5 + COLUMN id: 4, column_name: __array_join_exp_1, result_type: Tuple(names String), source_id: 5 CONSTANT id: 6, constant_value: \'names\', constant_value_type: String JOIN TREE ARRAY_JOIN id: 5, is_left: 0 @@ -52,18 +51,17 @@ QUERY id: 0 TABLE id: 7, alias: __table2, table_name: default.hourly JOIN EXPRESSIONS LIST id: 8, nodes: 1 - COLUMN id: 9, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(names String, values Int64), source_id: 5 + COLUMN id: 9, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(names String), source_id: 5 EXPRESSION - FUNCTION id: 10, function_name: nested, function_type: ordinary, result_type: Array(Tuple(names String, values Int64)) + FUNCTION id: 10, function_name: nested, function_type: ordinary, result_type: Array(Tuple(names String)) ARGUMENTS - LIST id: 11, nodes: 3 - CONSTANT id: 12, constant_value: Array_[\'names\', \'values\'], constant_value_type: Array(String) + LIST id: 11, nodes: 2 + CONSTANT id: 12, constant_value: Array_[\'names\'], constant_value_type: Array(String) COLUMN id: 13, column_name: metric.names, result_type: Array(String), source_id: 7 - COLUMN id: 14, column_name: metric.values, result_type: Array(Int64), source_id: 7 SELECT tupleElement(__array_join_exp_1, \'names\') AS `metric.names` FROM default.hourly AS __table2 -ARRAY JOIN nested(_CAST([\'names\', \'values\'], \'Array(String)\'), __table2.`metric.names`, __table2.`metric.values`) AS __array_join_exp_1 +ARRAY JOIN nested(_CAST([\'names\'], \'Array(String)\'), __table2.`metric.names`) AS __array_join_exp_1 -- { echoOn } SELECT nested(['click', 'house'], x.b.first, x.b.second) AS n, toTypeName(n) FROM tab; diff --git a/tests/queries/0_stateless/04039_prune_array_join_columns.reference b/tests/queries/0_stateless/04039_prune_array_join_columns.reference new file mode 100644 index 000000000000..66b8a189b808 --- /dev/null +++ b/tests/queries/0_stateless/04039_prune_array_join_columns.reference @@ -0,0 +1,242 @@ +1 +2 +QUERY id: 0 + PROJECTION COLUMNS + n.a Int64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: __array_join_exp_1, result_type: Tuple(a Int64), source_id: 5 + CONSTANT id: 6, constant_value: \'a\', constant_value_type: String + JOIN TREE + ARRAY_JOIN id: 5, is_left: 0 + TABLE EXPRESSION + TABLE id: 7, alias: __table2, table_name: default.t_nested + JOIN EXPRESSIONS + LIST id: 8, nodes: 1 + COLUMN id: 9, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(a Int64), source_id: 5 + EXPRESSION + FUNCTION id: 10, function_name: nested, function_type: ordinary, result_type: Array(Tuple(a Int64)) + ARGUMENTS + LIST id: 11, nodes: 2 + CONSTANT id: 12, constant_value: Array_[\'a\'], constant_value_type: Array(String) + COLUMN id: 13, column_name: n.a, result_type: Array(Int64), source_id: 7 + ORDER BY + LIST id: 14, nodes: 1 + SORT id: 15, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION n.a + FUNCTION id: 16, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 17, nodes: 2 + COLUMN id: 18, column_name: __array_join_exp_1, result_type: Tuple(a Int64), source_id: 5 + CONSTANT id: 19, constant_value: \'a\', constant_value_type: String +Expression (Project names) +Header: n.a Int64 + Sorting (Sorting for ORDER BY) + Header: tupleElement(__array_join_exp_1, \'a\'_String) Int64 + Expression ((Before ORDER BY + Projection)) + Header: tupleElement(__array_join_exp_1, \'a\'_String) Int64 + ArrayJoin (ARRAY JOIN) + Header: __array_join_exp_1 Tuple(a Int64) + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + Change column names to column identifiers))) + Header: __array_join_exp_1 Array(Tuple(a Int64)) + ReadFromMergeTree (default.t_nested) + Header: n.a Array(Int64) +1 3 +2 4 +QUERY id: 0 + PROJECTION COLUMNS + n.a Int64 + n.b Int64 + PROJECTION + LIST id: 1, nodes: 2 + FUNCTION id: 2, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64), source_id: 5 + CONSTANT id: 6, constant_value: \'a\', constant_value_type: String + FUNCTION id: 7, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 9, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64), source_id: 5 + CONSTANT id: 10, constant_value: \'b\', constant_value_type: String + JOIN TREE + ARRAY_JOIN id: 5, is_left: 0 + TABLE EXPRESSION + TABLE id: 11, alias: __table2, table_name: default.t_nested + JOIN EXPRESSIONS + LIST id: 12, nodes: 1 + COLUMN id: 13, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64), source_id: 5 + EXPRESSION + FUNCTION id: 14, function_name: nested, function_type: ordinary, result_type: Array(Tuple(a Int64, b Int64)) + ARGUMENTS + LIST id: 15, nodes: 3 + CONSTANT id: 16, constant_value: Array_[\'a\', \'b\'], constant_value_type: Array(String) + COLUMN id: 17, column_name: n.a, result_type: Array(Int64), source_id: 11 + COLUMN id: 18, column_name: n.b, result_type: Array(Int64), source_id: 11 + ORDER BY + LIST id: 19, nodes: 1 + SORT id: 20, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION n.a + FUNCTION id: 21, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 22, nodes: 2 + COLUMN id: 23, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64), source_id: 5 + CONSTANT id: 24, constant_value: \'a\', constant_value_type: String +Expression ((Project names + (Before ORDER BY + Projection) [lifted up part])) +Header: n.a Int64 + n.b Int64 + Sorting (Sorting for ORDER BY) + Header: __array_join_exp_1 Tuple(a Int64, b Int64) + tupleElement(__array_join_exp_1, \'a\'_String) Int64 + Expression ((Before ORDER BY + Projection)) + Header: __array_join_exp_1 Tuple(a Int64, b Int64) + tupleElement(__array_join_exp_1, \'a\'_String) Int64 + ArrayJoin (ARRAY JOIN) + Header: __array_join_exp_1 Tuple(a Int64, b Int64) + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + Change column names to column identifiers))) + Header: __array_join_exp_1 Array(Tuple(a Int64, b Int64)) + ReadFromMergeTree (default.t_nested) + Header: n.a Array(Int64) + n.b Array(Int64) +(1,3,5) +(2,4,6) +QUERY id: 0 + PROJECTION COLUMNS + n Tuple(a Int64, b Int64, c Int64) + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64, c Int64), source_id: 3 + JOIN TREE + ARRAY_JOIN id: 3, is_left: 0 + TABLE EXPRESSION + TABLE id: 4, alias: __table2, table_name: default.t_nested + JOIN EXPRESSIONS + LIST id: 5, nodes: 1 + COLUMN id: 6, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64, c Int64), source_id: 3 + EXPRESSION + FUNCTION id: 7, function_name: nested, function_type: ordinary, result_type: Array(Tuple(a Int64, b Int64, c Int64)) + ARGUMENTS + LIST id: 8, nodes: 4 + CONSTANT id: 9, constant_value: Array_[\'a\', \'b\', \'c\'], constant_value_type: Array(String) + COLUMN id: 10, column_name: n.a, result_type: Array(Int64), source_id: 4 + COLUMN id: 11, column_name: n.b, result_type: Array(Int64), source_id: 4 + COLUMN id: 12, column_name: n.c, result_type: Array(Int64), source_id: 4 + ORDER BY + LIST id: 13, nodes: 1 + SORT id: 14, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION n.a + FUNCTION id: 15, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 16, nodes: 2 + COLUMN id: 17, column_name: __array_join_exp_1, result_type: Tuple(a Int64, b Int64, c Int64), source_id: 3 + CONSTANT id: 18, constant_value: \'a\', constant_value_type: String +Expression (Project names) +Header: n Tuple(a Int64, b Int64, c Int64) + Sorting (Sorting for ORDER BY) + Header: tupleElement(__array_join_exp_1, \'a\'_String) Int64 + __array_join_exp_1 Tuple(a Int64, b Int64, c Int64) + Expression ((Before ORDER BY + Projection)) + Header: tupleElement(__array_join_exp_1, \'a\'_String) Int64 + __array_join_exp_1 Tuple(a Int64, b Int64, c Int64) + ArrayJoin (ARRAY JOIN) + Header: __array_join_exp_1 Tuple(a Int64, b Int64, c Int64) + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + Change column names to column identifiers))) + Header: __array_join_exp_1 Array(Tuple(a Int64, b Int64, c Int64)) + ReadFromMergeTree (default.t_nested) + Header: n.a Array(Int64) + n.b Array(Int64) + n.c Array(Int64) +1 +1 +QUERY id: 0 + PROJECTION COLUMNS + 1 UInt8 + PROJECTION + LIST id: 1, nodes: 1 + CONSTANT id: 2, constant_value: UInt64_1, constant_value_type: UInt8 + JOIN TREE + ARRAY_JOIN id: 3, is_left: 0 + TABLE EXPRESSION + TABLE id: 4, alias: __table2, table_name: default.t_nested + JOIN EXPRESSIONS + LIST id: 5, nodes: 1 + COLUMN id: 6, alias: __array_join_exp_1, column_name: __array_join_exp_1, result_type: Tuple(a Int64), source_id: 3 + EXPRESSION + FUNCTION id: 7, function_name: nested, function_type: ordinary, result_type: Array(Tuple(a Int64)) + ARGUMENTS + LIST id: 8, nodes: 2 + CONSTANT id: 9, constant_value: Array_[\'a\'], constant_value_type: Array(String) + COLUMN id: 10, column_name: n.a, result_type: Array(Int64), source_id: 4 + WHERE + FUNCTION id: 11, function_name: greater, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + FUNCTION id: 13, function_name: tupleElement, function_type: ordinary, result_type: Int64 + ARGUMENTS + LIST id: 14, nodes: 2 + COLUMN id: 15, column_name: __array_join_exp_1, result_type: Tuple(a Int64), source_id: 3 + CONSTANT id: 16, constant_value: \'a\', constant_value_type: String + CONSTANT id: 17, constant_value: UInt64_0, constant_value_type: UInt8 +Expression ((Project names + Projection)) +Header: 1 UInt8 + Filter (WHERE) + Header: + ArrayJoin (ARRAY JOIN) + Header: __array_join_exp_1 Tuple(a Int64) + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + Change column names to column identifiers))) + Header: __array_join_exp_1 Array(Tuple(a Int64)) + ReadFromMergeTree (default.t_nested) + Header: n.a Array(Int64) +1 +2 +Expression ((Project names + (Before ORDER BY + Projection) [lifted up part])) +Header: tupleElement(n, 1) Int64 + Sorting (Sorting for ORDER BY) + Header: __array_join_exp_1 Tuple(a Int64) + tupleElement(__array_join_exp_1, \'a\'_String) Int64 + Expression ((Before ORDER BY + Projection)) + Header: __array_join_exp_1 Tuple(a Int64) + tupleElement(__array_join_exp_1, \'a\'_String) Int64 + ArrayJoin (ARRAY JOIN) + Header: __array_join_exp_1 Tuple(a Int64) + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + Change column names to column identifiers))) + Header: __array_join_exp_1 Array(Tuple(a Int64)) + ReadFromMergeTree (default.t_nested) + Header: n.a Array(Int64) +3 +4 +QUERY id: 0 + PROJECTION COLUMNS + b Int64 + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: __array_join_exp_2, result_type: Int64, source_id: 3 + JOIN TREE + ARRAY_JOIN id: 3, is_left: 0 + TABLE EXPRESSION + TABLE id: 4, alias: __table2, table_name: default.t_two_arrays + JOIN EXPRESSIONS + LIST id: 5, nodes: 1 + COLUMN id: 6, alias: __array_join_exp_2, column_name: __array_join_exp_2, result_type: Int64, source_id: 3 + EXPRESSION + COLUMN id: 7, column_name: b, result_type: Array(Int64), source_id: 4 + ORDER BY + LIST id: 8, nodes: 1 + SORT id: 9, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION b + COLUMN id: 10, column_name: __array_join_exp_2, result_type: Int64, source_id: 3 +Expression (Project names) +Header: b Int64 + Sorting (Sorting for ORDER BY) + Header: __array_join_exp_2 Int64 + Expression ((Before ORDER BY + Projection)) + Header: __array_join_exp_2 Int64 + ArrayJoin (ARRAY JOIN) + Header: __array_join_exp_2 Int64 + Expression ((DROP unused columns before ARRAY JOIN + (ARRAY JOIN actions + Change column names to column identifiers))) + Header: __array_join_exp_2 Array(Int64) + ReadFromMergeTree (default.t_two_arrays) + Header: b Array(Int64) diff --git a/tests/queries/0_stateless/04039_prune_array_join_columns.sql b/tests/queries/0_stateless/04039_prune_array_join_columns.sql new file mode 100644 index 000000000000..399479d6f41b --- /dev/null +++ b/tests/queries/0_stateless/04039_prune_array_join_columns.sql @@ -0,0 +1,52 @@ +SET enable_analyzer = 1; +SET enable_parallel_replicas = 0; + +DROP TABLE IF EXISTS t_nested; +CREATE TABLE t_nested (`n.a` Array(Int64), `n.b` Array(Int64), `n.c` Array(Int64)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_nested VALUES ([1, 2], [3, 4], [5, 6]); + +-- Only n.a is used — n.b and n.c should not be read. +SELECT n.a FROM t_nested ARRAY JOIN n ORDER BY n.a; + +-- Verify nested() is pruned to only a. +EXPLAIN QUERY TREE SELECT n.a FROM t_nested ARRAY JOIN n ORDER BY n.a; +EXPLAIN header = 1 SELECT n.a FROM t_nested ARRAY JOIN n ORDER BY n.a; + +-- Both n.a and n.b used — n.c should not be read. +SELECT n.a, n.b FROM t_nested ARRAY JOIN n ORDER BY n.a; + +EXPLAIN QUERY TREE SELECT n.a, n.b FROM t_nested ARRAY JOIN n ORDER BY n.a; +EXPLAIN header = 1 SELECT n.a, n.b FROM t_nested ARRAY JOIN n ORDER BY n.a; + +-- Direct reference to n — all subcolumns needed. +SELECT n FROM t_nested ARRAY JOIN n ORDER BY n.a; + +EXPLAIN QUERY TREE SELECT n FROM t_nested ARRAY JOIN n ORDER BY n.a; +EXPLAIN header = 1 SELECT n FROM t_nested ARRAY JOIN n ORDER BY n.a; + +-- n used only in WHERE — should still be pruned to only n.a. +SELECT 1 FROM t_nested ARRAY JOIN n WHERE n.a > 0; + +EXPLAIN QUERY TREE SELECT 1 FROM t_nested ARRAY JOIN n WHERE n.a > 0; +EXPLAIN header = 1 SELECT 1 FROM t_nested ARRAY JOIN n WHERE n.a > 0; + +-- Numeric tupleElement index — should prune the same as string access. +SELECT tupleElement(n, 1) FROM t_nested ARRAY JOIN n ORDER BY n.a; +EXPLAIN header = 1 SELECT tupleElement(n, 1) FROM t_nested ARRAY JOIN n ORDER BY n.a; + +DROP TABLE t_nested; + +-- General case: ARRAY JOIN with two independent arrays, only one used. +DROP TABLE IF EXISTS t_two_arrays; +CREATE TABLE t_two_arrays (a Array(Int64), b Array(Int64)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_two_arrays VALUES ([1, 2], [3, 4]); + +SELECT b FROM t_two_arrays ARRAY JOIN a, b ORDER BY b; + +-- Verify: column a should be pruned from ARRAY JOIN, only b remains. +EXPLAIN QUERY TREE SELECT b FROM t_two_arrays ARRAY JOIN a, b ORDER BY b; + +-- Verify with EXPLAIN header=1 that only b is read from storage. +EXPLAIN header = 1 SELECT b FROM t_two_arrays ARRAY JOIN a, b ORDER BY b; + +DROP TABLE t_two_arrays; From 71f5d33cdd1a6e485c2cb81d90d7822ba5902bec Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 18 Mar 2026 23:20:49 +0000 Subject: [PATCH 48/53] Backport #99164 to 26.1: Fix LOGICAL_ERROR due to patch parts column order mismatch --- src/Common/FailPoint.cpp | 1 + .../MergeTree/MergeTreeBlockReadUtils.cpp | 17 +++++++ .../MergeTree/PatchParts/applyPatches.cpp | 8 ++- ...atch_parts_column_order_mismatch.reference | 6 +++ ...4034_patch_parts_column_order_mismatch.sql | 50 +++++++++++++++++++ 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.reference create mode 100644 tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.sql diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index f47c1303cba4..1131c5247111 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -140,6 +140,7 @@ static struct InitFiu REGULAR(rmt_delay_execute_drop_range) \ REGULAR(rmt_delay_commit_part) \ ONCE(local_object_storage_network_error_during_remove) \ + REGULAR(patch_parts_reverse_column_order) ONCE(parallel_replicas_check_read_mode_always) namespace FailPoints diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 69239c1f3c69..9a141e54b25c 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,11 @@ namespace ErrorCodes extern const int NO_SUCH_COLUMN_IN_TABLE; } +namespace FailPoints +{ + extern const char patch_parts_reverse_column_order[]; +} + namespace { @@ -345,6 +351,17 @@ void addPatchPartsColumns( required_virtuals.insert(patch_system_columns.begin(), patch_system_columns.end()); Names patch_columns_to_read_names(patch_columns_to_read_set.begin(), patch_columns_to_read_set.end()); + + fiu_do_on(FailPoints::patch_parts_reverse_column_order, + { + /// Simulate non-deterministic NameSet iteration producing different column + /// orderings for different patches. This reproduces the bug fixed in + /// getUpdatedHeader (applyPatches.cpp) where sortColumns() normalizes order + /// before the positional assertCompatibleHeader comparison. + if (i % 2 == 1) + std::reverse(patch_columns_to_read_names.begin(), patch_columns_to_read_names.end()); + }); + result.patch_columns[i] = storage_snapshot->getColumnsByNames(options, patch_columns_to_read_names); } diff --git a/src/Storages/MergeTree/PatchParts/applyPatches.cpp b/src/Storages/MergeTree/PatchParts/applyPatches.cpp index 6b8c2fcdcc70..ce43998a5458 100644 --- a/src/Storages/MergeTree/PatchParts/applyPatches.cpp +++ b/src/Storages/MergeTree/PatchParts/applyPatches.cpp @@ -275,7 +275,13 @@ Block getUpdatedHeader(const PatchesToApply & patches, const NameSet & updated_c header.erase(column.name); } - headers.push_back(std::move(header)); + /// Sort columns by name so that assertCompatibleHeader below compares + /// matching columns at the same positions. Patch blocks may arrive with + /// different column orderings because addPatchPartsColumns collects names + /// from a NameSet (unordered_set) whose iteration order is non-deterministic. + /// Downstream consumers use name-based lookups, so order does not matter + /// for correctness — only for this positional compatibility check. + headers.push_back(header.sortColumns()); } for (size_t i = 1; i < headers.size(); ++i) diff --git a/tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.reference b/tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.reference new file mode 100644 index 000000000000..4287567495fe --- /dev/null +++ b/tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.reference @@ -0,0 +1,6 @@ +1 updated1 99 9.9 999 upd1 +2 updated1 99 9.9 999 upd1 +1 updated2 88 8.8 888 upd2 +2 updated2 88 8.8 888 upd2 +1 updated2 88 8.8 888 upd2 +2 updated2 88 8.8 888 upd2 diff --git a/tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.sql b/tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.sql new file mode 100644 index 000000000000..3c1d29ede8d4 --- /dev/null +++ b/tests/queries/0_stateless/04034_patch_parts_column_order_mismatch.sql @@ -0,0 +1,50 @@ +-- Regression test for https://github.com/ClickHouse/clickhouse-core-incidents/issues/1021 +-- When multiple patch parts (Merge + Join mode) update the same columns, +-- the column ordering in patch blocks must be deterministic to avoid +-- LOGICAL_ERROR "Block structure mismatch in patch parts stream". +-- +-- The failpoint reverses column order for odd-indexed patches to expose any +-- code relying on positional column matching. Without the sort in +-- getUpdatedHeader, this triggers the bug. + +SET enable_lightweight_update = 1; + +SYSTEM ENABLE FAILPOINT patch_parts_reverse_column_order; + +DROP TABLE IF EXISTS t_patch_order; + +CREATE TABLE t_patch_order (id UInt64, a_col String, b_col UInt64, c_col Float64, d_col UInt32, e_col String) +ENGINE = MergeTree ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1, + apply_patches_on_merge = 0; + +-- Insert two separate blocks to create two base parts. +INSERT INTO t_patch_order VALUES (1, 'hello', 10, 1.5, 100, 'world'); +INSERT INTO t_patch_order VALUES (2, 'foo', 20, 2.5, 200, 'bar'); + +-- First UPDATE: creates Merge-mode patch parts for both base parts. +UPDATE t_patch_order SET a_col = 'updated1', b_col = 99, c_col = 9.9, d_col = 999, e_col = 'upd1' WHERE 1; + +-- Verify patch application works in Merge mode. +SELECT * FROM t_patch_order ORDER BY id; + +-- Merge base parts; patches become Join-mode (apply_patches_on_merge = 0). +OPTIMIZE TABLE t_patch_order FINAL; + +-- Second UPDATE: creates new Merge-mode patch parts for the merged base part. +UPDATE t_patch_order SET a_col = 'updated2', b_col = 88, c_col = 8.8, d_col = 888, e_col = 'upd2' WHERE 1; + +-- This SELECT must apply both Join-mode and Merge-mode patches simultaneously. +-- The failpoint reverses column order for odd-indexed patches. Without the fix, +-- getUpdatedHeader throws LOGICAL_ERROR because it compares patch headers positionally. +SELECT * FROM t_patch_order ORDER BY id; + +-- Materialize patches and verify final state. +ALTER TABLE t_patch_order APPLY PATCHES SETTINGS mutations_sync = 2; +SELECT * FROM t_patch_order ORDER BY id SETTINGS apply_patch_parts = 0; + +SYSTEM DISABLE FAILPOINT patch_parts_reverse_column_order; + +DROP TABLE t_patch_order; From e566bf163fcc4710b2c0b1e9ff1c5d41e0e59267 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 18 Mar 2026 23:44:19 +0000 Subject: [PATCH 49/53] Fix missing backslash in `APPLY_FOR_FAILPOINTS` macro continuation The cherry-picked `REGULAR(patch_parts_reverse_column_order)` line was missing a trailing backslash to continue the multi-line macro, causing a build failure. Co-Authored-By: Claude Opus 4.6 (1M context) --- src/Common/FailPoint.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index 1131c5247111..4abd260e47b3 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -140,7 +140,7 @@ static struct InitFiu REGULAR(rmt_delay_execute_drop_range) \ REGULAR(rmt_delay_commit_part) \ ONCE(local_object_storage_network_error_during_remove) \ - REGULAR(patch_parts_reverse_column_order) + REGULAR(patch_parts_reverse_column_order) \ ONCE(parallel_replicas_check_read_mode_always) namespace FailPoints From 3358526df337b08e9f48b2e0113d9f9fad53177d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 19 Mar 2026 11:31:21 +0000 Subject: [PATCH 50/53] Update autogenerated version to 26.1.5.41 and contributors --- cmake/autogenerated_versions.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 35b7873e140e..b23fc7bd021f 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54510) +SET(VERSION_REVISION 54511) SET(VERSION_MAJOR 26) SET(VERSION_MINOR 1) -SET(VERSION_PATCH 5) -SET(VERSION_GITHASH 94d63f06ae951bac9413c31cb91ebd5ea02b5066) -SET(VERSION_DESCRIBE v26.1.5.1-stable) -SET(VERSION_STRING 26.1.5.1) +SET(VERSION_PATCH 6) +SET(VERSION_GITHASH d1072851f03c9dcae0bf59f48f5ec9b00405675d) +SET(VERSION_DESCRIBE v26.1.6.1-stable) +SET(VERSION_STRING 26.1.6.1) # end of autochange From fa5f6a7c41eef2126d0a4f7e0cf48fdb6569bc36 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 19 Mar 2026 16:32:28 +0000 Subject: [PATCH 51/53] Backport #99392 to 26.1: Fix implicit index compatibility when upgrading replicated tables from 25.10 --- .../ReplicatedMergeTreeTableMetadata.cpp | 66 ++++- .../ReplicatedMergeTreeTableMetadata.h | 13 +- .../MergeTree/registerStorageMergeTree.cpp | 16 +- src/Storages/StorageReplicatedMergeTree.cpp | 21 +- .../test_implicit_index_upgrade/__init__.py | 0 .../test_implicit_index_upgrade/test.py | 239 ++++++++++++++++++ 6 files changed, 338 insertions(+), 17 deletions(-) create mode 100644 tests/integration/test_implicit_index_upgrade/__init__.py create mode 100644 tests/integration/test_implicit_index_upgrade/test.py diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp index 4bdebb4f5011..b9a9333741e3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.cpp @@ -1,6 +1,8 @@ #include #include #include +#include +#include #include #include #include @@ -244,7 +246,7 @@ void ReplicatedMergeTreeTableMetadata::read(ReadBuffer & in) } } -ReplicatedMergeTreeTableMetadata ReplicatedMergeTreeTableMetadata::parse(const String & s) +ReplicatedMergeTreeTableMetadata ReplicatedMergeTreeTableMetadata::parseRaw(const String & s) { ReplicatedMergeTreeTableMetadata metadata; ReadBufferFromString buf(s); @@ -252,6 +254,54 @@ ReplicatedMergeTreeTableMetadata ReplicatedMergeTreeTableMetadata::parse(const S return metadata; } +ReplicatedMergeTreeTableMetadata ReplicatedMergeTreeTableMetadata::parseAndNormalize( + const String & s, + const ColumnsDescription & columns, + bool add_minmax_index_for_numeric_columns, + bool add_minmax_index_for_string_columns, + ContextPtr context) +{ + auto result = parseRaw(s); + + /// Backward compatibility: older replicas (before 25.12) stored implicit indices in Keeper + /// metadata. Newer replicas only store explicit indices. Strip implicit indices from the + /// parsed metadata so that all downstream comparisons work against the new format. + if (result.skip_indices.empty() + || (!add_minmax_index_for_numeric_columns && !add_minmax_index_for_string_columns)) + return result; + + constexpr bool escape_index_filenames = true; /// Does not matter here, we re-serialize the parsed result + auto parsed = IndicesDescription::parse(result.skip_indices, columns, escape_index_filenames, context); + + bool has_implicit = false; + for (auto & index : parsed) + { + if (!index.name.starts_with(IMPLICITLY_ADDED_MINMAX_INDEX_PREFIX)) + continue; + + String column_name = index.name.substr(strlen(IMPLICITLY_ADDED_MINMAX_INDEX_PREFIX)); + if (!columns.has(column_name)) + continue; + + const auto & col_type = columns.get(column_name).type; + + /// Only `add_minmax_index_for_numeric_columns` and `add_minmax_index_for_string_columns` + /// need to be checked here. The temporal setting (`add_minmax_index_for_temporal_columns`) + /// was introduced in 26.2 and never stored implicit indices in Keeper metadata. + if ((add_minmax_index_for_numeric_columns && isNumber(col_type)) + || (add_minmax_index_for_string_columns && isString(col_type))) + { + index.is_implicitly_created = true; + has_implicit = true; + } + } + + if (has_implicit) + result.skip_indices = parsed.explicitToString(); + + return result; +} + static void handleTableMetadataMismatch( const std::string & table_name_for_error_message, std::string_view differs_in, @@ -371,18 +421,14 @@ bool ReplicatedMergeTreeTableMetadata::checkEquals( is_equal = false; } + /// Implicit indices are stripped from Keeper metadata during `parseAndNormalize`, + /// so at this point `from_zk.skip_indices` only contains explicit indices. constexpr bool escape_index_filenames = true; /// It doesn't matter here, as we compare parsed strings - String parsed_zk_skip_indices = IndicesDescription::parse(from_zk.skip_indices, columns, escape_index_filenames, context).explicitToString(); + String parsed_zk_skip_indices = IndicesDescription::parse(from_zk.skip_indices, columns, escape_index_filenames, context).allToString(); if (skip_indices != parsed_zk_skip_indices) { - String all_parsed_zk_skip_indices = IndicesDescription::parse(from_zk.skip_indices, columns, escape_index_filenames, context).allToString(); - // Backward compatibility: older replicas included implicit indices in metadata, - // while newer ones exclude them. This check allows comparison between both formats. - if (skip_indices != all_parsed_zk_skip_indices) - { - handleTableMetadataMismatch(table_name_for_error_message, "skip indexes", from_zk.skip_indices, parsed_zk_skip_indices, skip_indices, strict_check, logger); - is_equal = false; - } + handleTableMetadataMismatch(table_name_for_error_message, "skip indexes", from_zk.skip_indices, parsed_zk_skip_indices, skip_indices, strict_check, logger); + is_equal = false; } String parsed_zk_projections = ProjectionsDescription::parse(from_zk.projections, columns, context).toString(); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h index bba65b5616b6..ffeaba9cd5ab 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeTableMetadata.h @@ -45,7 +45,18 @@ struct ReplicatedMergeTreeTableMetadata explicit ReplicatedMergeTreeTableMetadata(const MergeTreeData & data, const StorageMetadataPtr & metadata_snapshot); void read(ReadBuffer & in); - static ReplicatedMergeTreeTableMetadata parse(const String & s); + /// Pure deserialization without any backward-compatibility normalization. + static ReplicatedMergeTreeTableMetadata parseRaw(const String & s); + /// Parse and normalize: removes implicit indices from `skip_indices` for backward + /// compatibility with older replicas (before 25.12) that stored them in Keeper. + /// `columns` must match the column set described by the same metadata source as `s` + /// (e.g. entry.columns_str for ALTER entries), not necessarily the current table columns. + static ReplicatedMergeTreeTableMetadata parseAndNormalize( + const String & s, + const ColumnsDescription & columns, + bool add_minmax_index_for_numeric_columns, + bool add_minmax_index_for_string_columns, + ContextPtr context); void write(WriteBuffer & out) const; String toString() const; diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 21feb6719a8d..3486a7a48c21 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -734,7 +734,21 @@ static StoragePtr create(const StorageFactory::Arguments & args) && (metadata.add_minmax_index_for_numeric_columns || metadata.add_minmax_index_for_string_columns) && index_name.starts_with(IMPLICITLY_ADDED_MINMAX_INDEX_PREFIX)) { - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot create table because index {} uses a reserved index name", index_name); + if (args.mode <= LoadingStrictnessLevel::CREATE) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot create table because index {} uses a reserved index name", index_name); + + /// Backward compatibility: older versions (before 25.12) stored implicit indices + /// on disk as regular indices. Re-mark them so `explicitToString` excludes them. + auto & added = metadata.secondary_indices.back(); + String col_name = index_name.substr(strlen(IMPLICITLY_ADDED_MINMAX_INDEX_PREFIX)); + if (columns.has(col_name)) + { + const auto & col_type = columns.get(col_name).type; + if ((metadata.add_minmax_index_for_numeric_columns && isNumber(col_type)) + || (metadata.add_minmax_index_for_string_columns && isString(col_type)) + || (metadata.add_minmax_index_for_temporal_columns && isDateOrDate32OrTimeOrTime64OrDateTimeOrDateTime64(col_type))) + added.is_implicitly_created = true; + } } } } diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 6af964c450aa..4e39cb0dd906 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -673,7 +673,7 @@ bool StorageReplicatedMergeTree::checkFixedGranularityInZookeeper(const ZooKeepe { auto zookeeper = getZooKeeper(); String metadata_str = zookeeper->get(zookeeper_path + "/metadata"); - auto metadata_from_zk = ReplicatedMergeTreeTableMetadata::parse(metadata_str); + auto metadata_from_zk = ReplicatedMergeTreeTableMetadata::parseRaw(metadata_str); fixed_granularity = (metadata_from_zk.index_granularity_bytes == 0); }; @@ -1680,7 +1680,11 @@ bool StorageReplicatedMergeTree::checkTableStructureAttempt( Coordination::Stat metadata_stat; String metadata_str = zookeeper->get(fs::path(zookeeper_prefix) / "metadata", &metadata_stat); - auto metadata_from_zk = ReplicatedMergeTreeTableMetadata::parse(metadata_str); + auto metadata_from_zk = ReplicatedMergeTreeTableMetadata::parseAndNormalize( + metadata_str, metadata_snapshot->getColumns(), + metadata_snapshot->add_minmax_index_for_numeric_columns, + metadata_snapshot->add_minmax_index_for_string_columns, + getContext()); bool is_metadata_equal = old_metadata.checkEquals(metadata_from_zk, metadata_snapshot->getColumns(), getStorageID().getNameForLogs(), getContext(), /*check_index_granularity*/ true, strict_check, log.load()); if (metadata_version) @@ -6357,7 +6361,11 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer auto zookeeper = getZooKeeper(); auto columns_from_entry = ColumnsDescription::parse(entry.columns_str); - auto metadata_from_entry = ReplicatedMergeTreeTableMetadata::parse(entry.metadata_str); + auto metadata_from_entry = ReplicatedMergeTreeTableMetadata::parseAndNormalize( + entry.metadata_str, columns_from_entry, + current_metadata->add_minmax_index_for_numeric_columns, + current_metadata->add_minmax_index_for_string_columns, + getContext()); MergeTreeData::DataParts parts; @@ -11185,9 +11193,12 @@ void StorageReplicatedMergeTree::applyMetadataChangesToCreateQueryForBackup(cons /// Try to adjust the create query using values from ZooKeeper. auto zookeeper = getZooKeeper(); auto columns_from_entry = ColumnsDescription::parse(zookeeper->get(fs::path(zookeeper_path) / "columns")); - auto metadata_from_entry = ReplicatedMergeTreeTableMetadata::parse(zookeeper->get(fs::path(zookeeper_path) / "metadata")); - auto current_metadata = getInMemoryMetadataPtr(); + auto metadata_from_entry = ReplicatedMergeTreeTableMetadata::parseAndNormalize( + zookeeper->get(fs::path(zookeeper_path) / "metadata"), columns_from_entry, + current_metadata->add_minmax_index_for_numeric_columns, + current_metadata->add_minmax_index_for_string_columns, + getContext()); const auto table_metadata = ReplicatedMergeTreeTableMetadata(*this, current_metadata); auto metadata_diff = table_metadata.checkAndFindDiff(metadata_from_entry, current_metadata->getColumns(), getStorageID().getNameForLogs(), getContext()); auto adjusted_metadata = metadata_diff.getNewMetadata(columns_from_entry, getContext(), *current_metadata); diff --git a/tests/integration/test_implicit_index_upgrade/__init__.py b/tests/integration/test_implicit_index_upgrade/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/integration/test_implicit_index_upgrade/test.py b/tests/integration/test_implicit_index_upgrade/test.py new file mode 100644 index 000000000000..3d047811dba7 --- /dev/null +++ b/tests/integration/test_implicit_index_upgrade/test.py @@ -0,0 +1,239 @@ +import pytest +import time + +from helpers.cluster import ClickHouseCluster + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster = ClickHouseCluster(__file__) + # 25.10 stored implicit indices in ZooKeeper metadata. + # Newer versions only store explicit indices, so upgrading + # requires backward-compatible metadata comparison. + cluster.add_instance( + "node", + with_zookeeper=True, + image="clickhouse/clickhouse-server", + tag="25.10", + with_installed_binary=True, + stay_alive=True, + ) + cluster.add_instance( + "node2", + with_zookeeper=True, + stay_alive=True, + ) + cluster.start() + + yield cluster + finally: + cluster.shutdown() + + +def wait_for_active_replica(node, table, timeout=30): + for _ in range(timeout): + is_readonly = node.query( + f"SELECT is_readonly FROM system.replicas WHERE table = '{table}';" + ).strip() + if is_readonly == "0": + return + time.sleep(1) + assert False, f"Replica for {table} is still in readonly mode after {timeout}s" + + +def test_implicit_index_upgrade_numeric(started_cluster): + node = started_cluster.instances["node"] + + node.query("DROP TABLE IF EXISTS test_numeric SYNC;") + node.query( + """ + CREATE TABLE test_numeric ( + key UInt64, + value1 Int32, + value2 Float64, + label String + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_numeric', 'r1') + ORDER BY key + SETTINGS add_minmax_index_for_numeric_columns=1, add_minmax_index_for_string_columns=0; + """ + ) + + node.query( + "INSERT INTO test_numeric SELECT number, number % 100, number / 3.14, toString(number) FROM numbers(10000);" + ) + + old_indices = node.query( + "SELECT name FROM system.data_skipping_indices WHERE table = 'test_numeric' ORDER BY name;" + ).strip() + assert "auto_minmax_index_key" in old_indices + assert "auto_minmax_index_value1" in old_indices + assert "auto_minmax_index_value2" in old_indices + # String column should not have an implicit index with this setting + assert "auto_minmax_index_label" not in old_indices + + node.restart_with_latest_version() + + assert node.query("SELECT count() FROM test_numeric;").strip() == "10000" + wait_for_active_replica(node, "test_numeric") + + node.query("INSERT INTO test_numeric VALUES (99999, 1, 1.0, 'x');") + assert node.query("SELECT count() FROM test_numeric;").strip() == "10001" + + node.query("DROP TABLE test_numeric SYNC;") + node.restart_with_original_version() + + +def test_implicit_index_upgrade_string(started_cluster): + node = started_cluster.instances["node"] + + node.query("DROP TABLE IF EXISTS test_string SYNC;") + node.query( + """ + CREATE TABLE test_string ( + key UInt64, + label String, + tag String + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_string', 'r1') + ORDER BY key + SETTINGS add_minmax_index_for_numeric_columns=0, add_minmax_index_for_string_columns=1; + """ + ) + + node.query( + "INSERT INTO test_string SELECT number, toString(number), 'tag' FROM numbers(10000);" + ) + + old_indices = node.query( + "SELECT name FROM system.data_skipping_indices WHERE table = 'test_string' ORDER BY name;" + ).strip() + assert "auto_minmax_index_label" in old_indices + assert "auto_minmax_index_tag" in old_indices + # Numeric column should not have an implicit index with this setting + assert "auto_minmax_index_key" not in old_indices + + node.restart_with_latest_version() + + assert node.query("SELECT count() FROM test_string;").strip() == "10000" + wait_for_active_replica(node, "test_string") + + node.query("INSERT INTO test_string VALUES (99999, 'x', 'y');") + assert node.query("SELECT count() FROM test_string;").strip() == "10001" + + node.query("DROP TABLE test_string SYNC;") + node.restart_with_original_version() + + +def test_implicit_index_upgrade_mixed(started_cluster): + node = started_cluster.instances["node"] + + node.query("DROP TABLE IF EXISTS test_mixed SYNC;") + node.query( + """ + CREATE TABLE test_mixed ( + key UInt64, + value Int32, + label String, + tag String + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_mixed', 'r1') + ORDER BY key + SETTINGS add_minmax_index_for_numeric_columns=1, add_minmax_index_for_string_columns=1; + """ + ) + + node.query( + "INSERT INTO test_mixed SELECT number, number % 100, toString(number), 'tag' FROM numbers(10000);" + ) + + old_indices = node.query( + "SELECT name FROM system.data_skipping_indices WHERE table = 'test_mixed' ORDER BY name;" + ).strip() + assert "auto_minmax_index_key" in old_indices + assert "auto_minmax_index_value" in old_indices + assert "auto_minmax_index_label" in old_indices + assert "auto_minmax_index_tag" in old_indices + + node.restart_with_latest_version() + + assert node.query("SELECT count() FROM test_mixed;").strip() == "10000" + wait_for_active_replica(node, "test_mixed") + + node.query("INSERT INTO test_mixed VALUES (99999, 1, 'x', 'y');") + assert node.query("SELECT count() FROM test_mixed;").strip() == "10001" + + node.query("DROP TABLE test_mixed SYNC;") + node.restart_with_original_version() + + +def test_implicit_index_upgrade_alter_replay(started_cluster): + """Exercise `executeMetadataAlter`: node (25.10) creates a table with implicit + indices, then ALTERs it (adding a column). node2 (latest) joins as a second + replica and must replay the ALTER_METADATA entry whose metadata string was + written by 25.10 and contains implicit indices.""" + node = started_cluster.instances["node"] + node2 = started_cluster.instances["node2"] + + node.query("DROP TABLE IF EXISTS test_alter_replay SYNC;") + node2.query("DROP TABLE IF EXISTS test_alter_replay SYNC;") + + # Create on 25.10 with implicit numeric indices. + node.query( + """ + CREATE TABLE test_alter_replay ( + key UInt64, + value Int32, + label String + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_alter_replay', 'r1') + ORDER BY key + SETTINGS add_minmax_index_for_numeric_columns=1, add_minmax_index_for_string_columns=0; + """ + ) + + node.query( + "INSERT INTO test_alter_replay SELECT number, number % 100, toString(number) FROM numbers(10000);" + ) + + # ALTER on 25.10: adds a column. The ALTER_METADATA log entry written to Keeper + # contains skip_indices in old format (implicit indices included). + node.query("ALTER TABLE test_alter_replay ADD COLUMN extra Float64 DEFAULT 0;") + node.query("INSERT INTO test_alter_replay (key, value, label, extra) VALUES (99999, 1, 'x', 3.14);") + + # Upgrade node to latest so both replicas run the new code. + node.restart_with_latest_version() + wait_for_active_replica(node, "test_alter_replay") + + # node2 (latest) joins as second replica — it replays the full replication log + # including the ALTER_METADATA entry written by 25.10. + # Schema must match the post-ALTER state (includes `extra` column). + node2.query( + """ + CREATE TABLE test_alter_replay ( + key UInt64, + value Int32, + label String, + extra Float64 DEFAULT 0 + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_alter_replay', 'r2') + ORDER BY key + SETTINGS add_minmax_index_for_numeric_columns=1, add_minmax_index_for_string_columns=0; + """ + ) + + wait_for_active_replica(node2, "test_alter_replay") + + # Verify node2 has the ALTER-added column and all data. + assert node2.query("SELECT count() FROM test_alter_replay;").strip() == "10001" + assert ( + node2.query( + "SELECT extra FROM test_alter_replay WHERE key = 99999;" + ).strip() + == "3.14" + ) + + node.query("DROP TABLE test_alter_replay SYNC;") + node2.query("DROP TABLE test_alter_replay SYNC;") + node.restart_with_original_version() From 9bdc1dec29494e0024adabc1f27a64754878fc24 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 19 Mar 2026 17:34:10 +0000 Subject: [PATCH 52/53] Backport #99976 to 26.1: Fix assertion failure in CRoaring on self-merge of NumericIndexedVector --- ...FunctionGroupNumericIndexedVectorDataBSI.h | 23 +++++++++++++++++++ ...umeric_indexed_vector_self_merge.reference | 2 ++ ...tion_numeric_indexed_vector_self_merge.sql | 11 +++++++++ 3 files changed, 36 insertions(+) create mode 100644 tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.reference create mode 100644 tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.sql diff --git a/src/AggregateFunctions/AggregateFunctionGroupNumericIndexedVectorDataBSI.h b/src/AggregateFunctions/AggregateFunctionGroupNumericIndexedVectorDataBSI.h index a60334afdf3d..6291a9238d33 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupNumericIndexedVectorDataBSI.h +++ b/src/AggregateFunctions/AggregateFunctionGroupNumericIndexedVectorDataBSI.h @@ -463,6 +463,19 @@ class BSINumericIndexedVector */ void pointwiseAddInplace(const BSINumericIndexedVector & rhs) { + /// Self-addition requires a deep copy because the full adder logic below + /// performs in-place XOR on shared bitmaps (`sum->rb_xor(*addend)` where + /// `sum` and `addend` alias the same Roaring bitmap via `shallowCopyFrom`), + /// which triggers an assertion in CRoaring (`assert(x1 != x2)`) and would + /// produce incorrect results (A XOR A = 0) in release builds. + if (this == &rhs) + { + BSINumericIndexedVector copy; + copy.deepCopyFrom(rhs); + pointwiseAddInplace(copy); + return; + } + if (isEmpty()) { deepCopyFrom(rhs); @@ -539,6 +552,16 @@ class BSINumericIndexedVector */ void pointwiseSubtractInplace(const BSINumericIndexedVector & rhs) { + /// Self-subtraction requires a deep copy for the same reason as + /// `pointwiseAddInplace`: in-place XOR on aliased bitmaps is undefined. + if (this == &rhs) + { + BSINumericIndexedVector copy; + copy.deepCopyFrom(rhs); + pointwiseSubtractInplace(copy); + return; + } + auto total_indexes = getAllIndex(); total_indexes->rb_or(*rhs.getAllIndex()); diff --git a/tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.reference b/tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.reference new file mode 100644 index 000000000000..156baf3abc90 --- /dev/null +++ b/tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.reference @@ -0,0 +1,2 @@ +{100:2} +{100:4} diff --git a/tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.sql b/tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.sql new file mode 100644 index 000000000000..15eb95fe70bd --- /dev/null +++ b/tests/queries/0_stateless/04049_aggregate_function_numeric_indexed_vector_self_merge.sql @@ -0,0 +1,11 @@ +-- Test that self-merge of NumericIndexedVector aggregate states does not trigger +-- assertion failure in CRoaring (x1 != x2 in `roaring_bitmap_xor_inplace`). +-- https://github.com/ClickHouse/ClickHouse/issues/99704 + +-- `multiply` triggers self-merge via exponentiation by squaring (even branch). +SELECT arrayJoin([numericIndexedVectorToMap( + multiply(2, groupNumericIndexedVectorState(100, 1)))]); + +-- Power of 2 forces multiple self-merge iterations. +SELECT arrayJoin([numericIndexedVectorToMap( + multiply(4, groupNumericIndexedVectorState(100, 1)))]); From 3e30ea9696cce190f7a008daf672e7394c433b3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 19 Mar 2026 19:33:13 +0100 Subject: [PATCH 53/53] Fix backport build: adapt to 26.1 APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove `add_minmax_index_for_temporal_columns` check from `registerStorageMergeTree` — this field and `isDateOrDate32OrTimeOrTime64OrDateTimeOrDateTime64` don't exist on 26.1 (introduced in master by cea15754af4 for 26.2). The temporal check is irrelevant here since temporal implicit indices were never stored in Keeper metadata by any version that 26.1 would upgrade from. Also fix the cherry-pick merge issue where the outer `if` duplicated the `args.mode <= LoadingStrictnessLevel::CREATE` check, making the backward-compat re-marking code unreachable. https://github.com/ClickHouse/ClickHouse/pull/100073 Co-Authored-By: Claude Opus 4.6 (1M context) --- src/Storages/MergeTree/registerStorageMergeTree.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 3486a7a48c21..90990e60e5f2 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -730,9 +730,8 @@ static StoragePtr create(const StorageFactory::Arguments & args) metadata.secondary_indices.push_back(IndexDescription::getIndexFromAST(index, columns, /* is_implicitly_created */ false, metadata.escape_index_filenames, context)); auto index_name = index->as()->name; - if (args.mode <= LoadingStrictnessLevel::CREATE - && (metadata.add_minmax_index_for_numeric_columns || metadata.add_minmax_index_for_string_columns) - && index_name.starts_with(IMPLICITLY_ADDED_MINMAX_INDEX_PREFIX)) + auto using_auto_minmax_index = metadata.add_minmax_index_for_numeric_columns || metadata.add_minmax_index_for_string_columns; + if (using_auto_minmax_index && index_name.starts_with(IMPLICITLY_ADDED_MINMAX_INDEX_PREFIX)) { if (args.mode <= LoadingStrictnessLevel::CREATE) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot create table because index {} uses a reserved index name", index_name); @@ -745,8 +744,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) { const auto & col_type = columns.get(col_name).type; if ((metadata.add_minmax_index_for_numeric_columns && isNumber(col_type)) - || (metadata.add_minmax_index_for_string_columns && isString(col_type)) - || (metadata.add_minmax_index_for_temporal_columns && isDateOrDate32OrTimeOrTime64OrDateTimeOrDateTime64(col_type))) + || (metadata.add_minmax_index_for_string_columns && isString(col_type))) added.is_implicitly_created = true; } }