diff --git a/src/Analyzer/FunctionNode.cpp b/src/Analyzer/FunctionNode.cpp index db2e4c558335..26b76784c6ca 100644 --- a/src/Analyzer/FunctionNode.cpp +++ b/src/Analyzer/FunctionNode.cpp @@ -12,6 +12,7 @@ #include #include +#include #include @@ -164,6 +165,13 @@ void FunctionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state buffer << '\n' << std::string(indent + 2, ' ') << "WINDOW\n"; getWindowNode()->dumpTreeImpl(buffer, format_state, indent + 4); } + + if (!settings_changes.empty()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "SETTINGS"; + for (const auto & change : settings_changes) + buffer << fmt::format(" {}={}", change.name, fieldToString(change.value)); + } } bool FunctionNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions compare_options) const @@ -171,7 +179,7 @@ bool FunctionNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions compar const auto & rhs_typed = assert_cast(rhs); if (function_name != rhs_typed.function_name || isAggregateFunction() != rhs_typed.isAggregateFunction() || isOrdinaryFunction() != rhs_typed.isOrdinaryFunction() || isWindowFunction() != rhs_typed.isWindowFunction() - || nulls_action != rhs_typed.nulls_action) + || nulls_action != rhs_typed.nulls_action || settings_changes != rhs_typed.settings_changes) return false; /// is_operator is ignored here because it affects only AST formatting @@ -206,6 +214,17 @@ void FunctionNode::updateTreeHashImpl(HashState & hash_state, CompareOptions com hash_state.update(isWindowFunction()); hash_state.update(nulls_action); + hash_state.update(settings_changes.size()); + for (const auto & change : settings_changes) + { + hash_state.update(change.name.size()); + hash_state.update(change.name); + + const auto & value_dump = change.value.dump(); + hash_state.update(value_dump.size()); + hash_state.update(value_dump); + } + /// is_operator is ignored here because it affects only AST formatting if (!compare_options.compare_types) @@ -230,6 +249,7 @@ QueryTreeNodePtr FunctionNode::cloneImpl() const result_function->nulls_action = nulls_action; result_function->wrap_with_nullable = wrap_with_nullable; result_function->is_operator = is_operator; + result_function->settings_changes = settings_changes; return result_function; } @@ -292,6 +312,14 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const function_ast->window_definition = window_node->toAST(new_options); } + if (!settings_changes.empty()) + { + auto settings_ast = make_intrusive(); + settings_ast->changes = settings_changes; + settings_ast->is_standalone = false; + function_ast->arguments->children.push_back(settings_ast); + } + return function_ast; } diff --git a/src/Analyzer/FunctionNode.h b/src/Analyzer/FunctionNode.h index c0005016def6..0ec99c9ab40c 100644 --- a/src/Analyzer/FunctionNode.h +++ b/src/Analyzer/FunctionNode.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB { @@ -204,6 +205,18 @@ class FunctionNode final : public IQueryTreeNode wrap_with_nullable = true; } + /// Get settings changes passed to table function + const SettingsChanges & getSettingsChanges() const + { + return settings_changes; + } + + /// Set settings changes passed as last argument to table function + void setSettingsChanges(SettingsChanges settings_changes_) + { + settings_changes = std::move(settings_changes_); + } + void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override; protected: @@ -228,6 +241,8 @@ class FunctionNode final : public IQueryTreeNode static constexpr size_t arguments_child_index = 1; static constexpr size_t window_child_index = 2; static constexpr size_t children_size = window_child_index + 1; + + SettingsChanges settings_changes; }; } diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index 80caa1c2ba86..284accbb1687 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -699,7 +699,12 @@ QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression, co { const auto & function_arguments_list = function->arguments->as()->children; for (const auto & argument : function_arguments_list) - function_node->getArguments().getNodes().push_back(buildExpression(argument, context)); + { + if (const auto * ast_set = argument->as()) + function_node->setSettingsChanges(ast_set->changes); + else + function_node->getArguments().getNodes().push_back(buildExpression(argument, context)); + } } if (function->is_window_function) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index f221978682ee..ecffbeaab0ba 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -3861,6 +3861,7 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node, { auto table_function_node_to_resolve_typed = std::make_shared(table_function_argument_function_name); table_function_node_to_resolve_typed->getArgumentsNode() = table_function_argument_function->getArgumentsNode(); + table_function_node_to_resolve_typed->setSettingsChanges(table_function_argument_function->getSettingsChanges()); QueryTreeNodePtr table_function_node_to_resolve = std::move(table_function_node_to_resolve_typed); if (table_function_argument_function_name == "view") diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index fda56c37291d..925d2ccf8319 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -7658,6 +7658,9 @@ Rewrite expressions like 'x IN subquery' to JOIN. This might be useful for optim )", EXPERIMENTAL) \ DECLARE(Bool, object_storage_remote_initiator, false, R"( Execute request to object storage as remote on one of object_storage_cluster nodes. +)", EXPERIMENTAL) \ + DECLARE(String, object_storage_remote_initiator_cluster, "", R"( +Cluster to choose remote initiator, when `object_storage_remote_initiator` is true. When empty, `object_storage_cluster` is used. )", EXPERIMENTAL) \ DECLARE(Bool, allow_experimental_iceberg_read_optimization, true, R"( Allow Iceberg read optimization based on Iceberg metadata. diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 55ba5928add7..3e1bb4759792 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -42,9 +42,9 @@ const VersionToSettingsChangesMap & getSettingsChangesHistory() addSettingsChanges(settings_changes_history, "26.1.3.20001.altinityantalya", { {"iceberg_partition_timezone", "", "", "New setting."}, - // {"object_storage_max_nodes", 0, 0, "Antalya: New setting"}, {"s3_propagate_credentials_to_other_storages", false, false, "New setting"}, {"export_merge_tree_part_filename_pattern", "", "{part_name}_{checksum}", "New setting"}, + {"object_storage_remote_initiator_cluster", "", "", "New setting."}, {"iceberg_metadata_staleness_ms", 0, 0, "New setting allowing using cached metadata version at READ operations to prevent fetching from remote catalog"}, }); addSettingsChanges(settings_changes_history, "26.1", @@ -247,26 +247,19 @@ const VersionToSettingsChangesMap & getSettingsChangesHistory() {"object_storage_cluster_join_mode", "allow", "allow", "New setting"}, {"lock_object_storage_task_distribution_ms", 500, 500, "New setting."}, {"allow_retries_in_cluster_requests", false, false, "New setting"}, - // {"object_storage_remote_initiator", false, false, "New setting."}, {"allow_experimental_export_merge_tree_part", false, true, "Turned ON by default for Antalya."}, {"export_merge_tree_part_overwrite_file_if_exists", false, false, "New setting."}, {"export_merge_tree_partition_force_export", false, false, "New setting."}, {"export_merge_tree_partition_max_retries", 3, 3, "New setting."}, {"export_merge_tree_partition_manifest_ttl", 180, 180, "New setting."}, {"export_merge_tree_part_file_already_exists_policy", "skip", "skip", "New setting."}, - // {"iceberg_timezone_for_timestamptz", "UTC", "UTC", "New setting."}, {"hybrid_table_auto_cast_columns", true, true, "New setting to automatically cast Hybrid table columns when segments disagree on types. Default enabled."}, {"allow_experimental_hybrid_table", false, false, "Added new setting to allow the Hybrid table engine."}, {"enable_alias_marker", true, true, "New setting."}, - // {"input_format_parquet_use_native_reader_v3", false, true, "Seems stable"}, - // {"input_format_parquet_verify_checksums", true, true, "New setting."}, - // {"output_format_parquet_write_checksums", false, true, "New setting."}, {"export_merge_tree_part_max_bytes_per_file", 0, 0, "New setting."}, {"export_merge_tree_part_max_rows_per_file", 0, 0, "New setting."}, {"export_merge_tree_partition_lock_inside_the_task", false, false, "New setting."}, {"export_merge_tree_partition_system_table_prefer_remote_information", true, true, "New setting."}, - // {"cluster_table_function_split_granularity", "file", "file", "New setting."}, - // {"cluster_table_function_buckets_batch_size", 0, 0, "New setting."}, {"export_merge_tree_part_throw_on_pending_mutations", true, true, "New setting."}, {"export_merge_tree_part_throw_on_pending_patch_parts", true, true, "New setting."}, {"object_storage_cluster", "", "", "Antalya: New setting"}, diff --git a/src/Storages/IStorageCluster.cpp b/src/Storages/IStorageCluster.cpp index 3f09a0a595bf..2f1caaa6b974 100644 --- a/src/Storages/IStorageCluster.cpp +++ b/src/Storages/IStorageCluster.cpp @@ -49,11 +49,10 @@ namespace Setting extern const SettingsBool async_query_sending_for_remote; extern const SettingsBool async_socket_for_remote; extern const SettingsBool skip_unavailable_shards; - extern const SettingsBool parallel_replicas_local_plan; - extern const SettingsString cluster_for_parallel_replicas; extern const SettingsNonZeroUInt64 max_parallel_replicas; extern const SettingsUInt64 object_storage_max_nodes; extern const SettingsBool object_storage_remote_initiator; + extern const SettingsString object_storage_remote_initiator_cluster; extern const SettingsObjectStorageClusterJoinMode object_storage_cluster_join_mode; } @@ -330,8 +329,6 @@ void IStorageCluster::read( const auto & settings = context->getSettingsRef(); - auto cluster = getClusterImpl(context, cluster_name_from_settings, isObjectStorage() ? settings[Setting::object_storage_max_nodes] : 0); - /// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*) SharedHeader sample_block; @@ -352,9 +349,21 @@ void IStorageCluster::read( updateQueryToSendIfNeeded(query_to_send, storage_snapshot, context); + /// In case the current node is not supposed to initiate the clustered query + /// Sends this query to a remote initiator using the `remote` table function if (settings[Setting::object_storage_remote_initiator]) { - auto storage_and_context = convertToRemote(cluster, context, cluster_name_from_settings, query_to_send); + /// Re-writes queries in the form of: + /// Input: SELECT * FROM iceberg(...) SETTINGS object_storage_cluster='swarm', object_storage_remote_initiator=1 + /// Output: SELECT * FROM remote('remote_host', icebergCluster('swarm', ...) + /// Where `remote_host` is a random host from the cluster which will execute the query + /// This means the initiator node belongs to the same cluster that will execute the query + /// In case remote_initiator_cluster_name is set, the initiator might be set to a different cluster + auto remote_initiator_cluster_name = settings[Setting::object_storage_remote_initiator_cluster].value; + if (remote_initiator_cluster_name.empty()) + remote_initiator_cluster_name = cluster_name_from_settings; + auto remote_initiator_cluster = getClusterImpl(context, remote_initiator_cluster_name); + auto storage_and_context = convertToRemote(remote_initiator_cluster, context, remote_initiator_cluster_name, query_to_send); auto src_distributed = std::dynamic_pointer_cast(storage_and_context.storage); auto modified_query_info = query_info; modified_query_info.cluster = src_distributed->getCluster(); @@ -363,6 +372,8 @@ void IStorageCluster::read( return; } + auto cluster = getClusterImpl(context, cluster_name_from_settings, isObjectStorage() ? settings[Setting::object_storage_max_nodes] : 0); + RestoreQualifiedNamesVisitor::Data data; data.distributed_table = DatabaseAndTableWithAlias(*getTableExpression(query_to_send->as(), 0)); data.remote_table.database = context->getCurrentDatabase(); @@ -396,6 +407,10 @@ IStorageCluster::RemoteCallVariables IStorageCluster::convertToRemote( const std::string & cluster_name_from_settings, ASTPtr query_to_send) { + /// TODO: Allow to use secret for remote queries + if (!cluster->getSecret().empty()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Can't convert query to remote when cluster uses secret"); + auto host_addresses = cluster->getShardsAddresses(); if (host_addresses.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty cluster {}", cluster_name_from_settings); @@ -417,6 +432,7 @@ IStorageCluster::RemoteCallVariables IStorageCluster::convertToRemote( /// Clean object_storage_remote_initiator setting to avoid infinite remote call auto new_context = Context::createCopy(context); new_context->setSetting("object_storage_remote_initiator", false); + new_context->setSetting("object_storage_remote_initiator_cluster", String("")); auto * select_query = query_to_send->as(); if (!select_query) @@ -436,7 +452,20 @@ IStorageCluster::RemoteCallVariables IStorageCluster::convertToRemote( if (!table_expression) throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't find table expression"); - auto remote_query = makeASTFunction(remote_function_name, make_intrusive(host_name), table_expression->table_function); + boost::intrusive_ptr remote_query; + + if (shard_addresses[0].user_specified) + { // with user/password for clsuter access remote query is executed from this user, add it in query parameters + remote_query = makeASTFunction(remote_function_name, + make_intrusive(host_name), + table_expression->table_function, + make_intrusive(shard_addresses[0].user), + make_intrusive(shard_addresses[0].password)); + } + else + { // without specified user/password remote query is executed from default user + remote_query = makeASTFunction(remote_function_name, make_intrusive(host_name), table_expression->table_function); + } table_expression->table_function = remote_query; diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 0c0ad60c3e85..5d8c06418467 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -1057,6 +1057,7 @@ QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info, auto table_function_node = std::make_shared(remote_table_function_node.getFunctionName()); table_function_node->getArgumentsNode() = remote_table_function_node.getArgumentsNode(); + table_function_node->setSettingsChanges(remote_table_function_node.getSettingsChanges()); if (table_expression_modifiers) table_function_node->setTableExpressionModifiers(*table_expression_modifiers); diff --git a/src/TableFunctions/ITableFunctionCluster.h b/src/TableFunctions/ITableFunctionCluster.h index 975322d054b3..920f271f0535 100644 --- a/src/TableFunctions/ITableFunctionCluster.h +++ b/src/TableFunctions/ITableFunctionCluster.h @@ -75,9 +75,11 @@ class ITableFunctionCluster : public Base /// Cluster name is always the first cluster_name = checkAndGetLiteralArgument(args[0], "cluster_name"); - - if (!context->tryGetCluster(cluster_name)) - throw Exception(ErrorCodes::CLUSTER_DOESNT_EXIST, "Requested cluster '{}' not found", cluster_name); + /// Remove check cluster existing here + /// In query like + /// remote('remote_host', xxxCluster('remote_cluster', ...)) + /// 'remote_cluster' can be defined only on 'remote_host' + /// If cluster not exists, query falls later /// Just cut the first arg (cluster_name) and try to parse other table function arguments as is args.erase(args.begin()); diff --git a/tests/integration/test_s3_cluster/configs/cluster.xml b/tests/integration/test_s3_cluster/configs/cluster.xml index 0452a383a709..7f3dab539985 100644 --- a/tests/integration/test_s3_cluster/configs/cluster.xml +++ b/tests/integration/test_s3_cluster/configs/cluster.xml @@ -76,6 +76,39 @@ + + + + s0_0_1 + 9000 + foo + bar + + + s0_1_0 + 9000 + foo + bar + + + + + + baz + + + s0_0_1 + 9000 + foo + + + s0_1_0 + 9000 + foo + + + + diff --git a/tests/integration/test_s3_cluster/configs/hidden_clusters.xml b/tests/integration/test_s3_cluster/configs/hidden_clusters.xml new file mode 100644 index 000000000000..8816cca1c79b --- /dev/null +++ b/tests/integration/test_s3_cluster/configs/hidden_clusters.xml @@ -0,0 +1,20 @@ + + + + + + s0_0_1 + 9000 + foo + bar + + + s0_1_0 + 9000 + foo + bar + + + + + diff --git a/tests/integration/test_s3_cluster/configs/users.xml b/tests/integration/test_s3_cluster/configs/users.xml index 4b6ba057ecb1..95d2d329cac0 100644 --- a/tests/integration/test_s3_cluster/configs/users.xml +++ b/tests/integration/test_s3_cluster/configs/users.xml @@ -5,5 +5,9 @@ default 1 + + bar + default + diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py index 77626af827ee..43fd2d48c81f 100644 --- a/tests/integration/test_s3_cluster/test.py +++ b/tests/integration/test_s3_cluster/test.py @@ -116,7 +116,7 @@ def started_cluster(): ) cluster.add_instance( "c2.s0_0_0", - main_configs=["configs/cluster.xml", "configs/named_collections.xml"], + main_configs=["configs/cluster.xml", "configs/named_collections.xml", "configs/hidden_clusters.xml"], user_configs=["configs/users.xml"], macros={"replica": "replica1", "shard": "shard1"}, with_zookeeper=True, @@ -124,7 +124,7 @@ def started_cluster(): ) cluster.add_instance( "c2.s0_0_1", - main_configs=["configs/cluster.xml", "configs/named_collections.xml"], + main_configs=["configs/cluster.xml", "configs/named_collections.xml", "configs/hidden_clusters.xml"], user_configs=["configs/users.xml"], macros={"replica": "replica2", "shard": "shard1"}, with_zookeeper=True, @@ -1218,6 +1218,7 @@ def test_joins(started_cluster): def test_object_storage_remote_initiator(started_cluster): node = started_cluster.instances["s0_0_0"] + # Simple cluster query_id = uuid.uuid4().hex result = node.query( f""" @@ -1245,6 +1246,7 @@ def test_object_storage_remote_initiator(started_cluster): # initial node + describe table + remote initiator + 2 subqueries on replicas assert queries == ["5"] + # Cluster with dots in the host names query_id = uuid.uuid4().hex result = node.query( f""" @@ -1271,3 +1273,122 @@ def test_object_storage_remote_initiator(started_cluster): # initial node + describe table + remote initiator + 2 subqueries on replicas assert queries == ["5"] + + users = node.query( + f""" + SELECT DISTINCT hostname, user + FROM clusterAllReplicas('cluster_all', system.query_log) + WHERE type='QueryFinish' AND initial_query_id='{query_id}' + ORDER BY ALL + FORMAT TSV + """ + ).splitlines() + + assert users == ["c2.s0_0_0\tdefault", + "c2.s0_0_1\tdefault", + "s0_0_0\tdefault"] + + # Cluster with user and password + query_id = uuid.uuid4().hex + result = node.query( + f""" + SELECT * from s3Cluster( + 'cluster_with_username_and_password', + 'http://minio1:9001/root/data/{{clickhouse,database}}/*', 'minio', '{minio_secret_key}', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon) + SETTINGS object_storage_remote_initiator=1 + """, + query_id = query_id, + ) + + assert result is not None + + node.query("SYSTEM FLUSH LOGS ON CLUSTER 'cluster_all'") + queries = node.query( + f""" + SELECT count() + FROM clusterAllReplicas('cluster_all', system.query_log) + WHERE type='QueryFinish' AND initial_query_id='{query_id}' + FORMAT TSV + """ + ).splitlines() + + # initial node + describe table + remote initiator + 2 subqueries on replicas + assert queries == ["5"] + + users = node.query( + f""" + SELECT DISTINCT hostname, user + FROM clusterAllReplicas('cluster_all', system.query_log) + WHERE type='QueryFinish' AND initial_query_id='{query_id}' + ORDER BY ALL + FORMAT TSV + """ + ).splitlines() + + assert users == ["s0_0_0\tdefault", + "s0_0_1\tfoo", + "s0_1_0\tfoo"] + + # Cluster with secret + query_id = uuid.uuid4().hex + result = node.query_and_get_error( + f""" + SELECT * from s3Cluster( + 'cluster_with_secret', + 'http://minio1:9001/root/data/{{clickhouse,database}}/*', 'minio', '{minio_secret_key}', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon) + SETTINGS object_storage_remote_initiator=1 + """, + query_id = query_id, + ) + + assert "Can't convert query to remote when cluster uses secret" in result + + # Different cluster for remote initiator and query execution + # with `hidden_cluster_with_username_and_password` existed only in `cluster_with_dots` nodes + query_id = uuid.uuid4().hex + + result = node.query( + f""" + SELECT * from s3( + 'http://minio1:9001/root/data/{{clickhouse,database}}/*', 'minio', '{minio_secret_key}', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))') ORDER BY (name, value, polygon) + SETTINGS + object_storage_remote_initiator=1, + object_storage_cluster='hidden_cluster_with_username_and_password', + object_storage_remote_initiator_cluster='cluster_with_dots' + """, + query_id = query_id, + ) + + assert result is not None + + node.query("SYSTEM FLUSH LOGS ON CLUSTER 'cluster_all'") + queries = node.query( + f""" + SELECT count() + FROM clusterAllReplicas('cluster_all', system.query_log) + WHERE type='QueryFinish' AND initial_query_id='{query_id}' + FORMAT TSV + """ + ).splitlines() + + # initial node + describe table + remote initiator + 2 subqueries on replicas + assert queries == ["5"] + + users = node.query( + f""" + SELECT DISTINCT hostname, user + FROM clusterAllReplicas('cluster_all', system.query_log) + WHERE type='QueryFinish' AND initial_query_id='{query_id}' + ORDER BY ALL + FORMAT TSV + """ + ).splitlines() + + # Random host from 'cluster_with_dots' for remote query + assert users[0] in ["c2.s0_0_0\tdefault", "c2.s0_0_1\tdefault"] + assert users[1:] == ["s0_0_0\tdefault", + "s0_0_1\tfoo", + "s0_1_0\tfoo"] diff --git a/tests/queries/0_stateless/01625_constraints_index_append.reference b/tests/queries/0_stateless/01625_constraints_index_append.reference index 4593d63d1cf7..b68b514ca8bd 100644 --- a/tests/queries/0_stateless/01625_constraints_index_append.reference +++ b/tests/queries/0_stateless/01625_constraints_index_append.reference @@ -13,7 +13,7 @@ Prewhere info Prewhere filter Prewhere filter column: less(multiply(2, b), 100) - Filter column: and(indexHint(greater(plus(i, 40), 0)), equals(a, 0)) (removed) + Filter column: and(equals(a, 0), indexHint(greater(plus(i, 40), 0))) (removed) Prewhere info Prewhere filter Prewhere filter column: equals(a, 0) @@ -24,7 +24,7 @@ Prewhere info Prewhere filter Prewhere filter column: greaterOrEquals(a, 0) - Filter column: and(indexHint(less(i, 100)), less(multiply(2, b), 100)) (removed) + Filter column: and(less(multiply(2, b), 100), indexHint(less(i, 100))) (removed) Prewhere info Prewhere filter Prewhere filter column: less(multiply(2, b), 100)