diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index f01021bb0..47d0efd59 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake @@ -6,6 +6,7 @@ set(ABSL_INTERNAL_DLL_FILES "algorithm/container.h" "base/attributes.h" "base/call_once.h" + "base/casts.cc" "base/casts.h" "base/config.h" "base/const_init.h" @@ -20,12 +21,11 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/endian.h" "base/internal/errno_saver.h" "base/internal/hide_ptr.h" - "base/internal/identity.h" "base/internal/iterator_traits.h" "base/internal/low_level_alloc.cc" "base/internal/low_level_alloc.h" "base/internal/low_level_scheduling.h" - "base/internal/nullability_deprecated.h" + "base/internal/nullability_traits.h" "base/internal/per_thread_tls.h" "base/internal/poison.cc" "base/internal/poison.h" @@ -69,6 +69,7 @@ set(ABSL_INTERNAL_DLL_FILES "cleanup/internal/cleanup.h" "container/btree_map.h" "container/btree_set.h" + "container/chunked_queue.h" "container/hash_container_defaults.h" "container/fixed_array.h" "container/flat_hash_map.h" @@ -76,6 +77,7 @@ set(ABSL_INTERNAL_DLL_FILES "container/inlined_vector.h" "container/internal/btree.h" "container/internal/btree_container.h" + "container/internal/chunked_queue.h" "container/internal/common.h" "container/internal/common_policy_traits.h" "container/internal/compressed_tuple.h" @@ -96,6 +98,8 @@ set(ABSL_INTERNAL_DLL_FILES "container/internal/raw_hash_set.h" "container/internal/raw_hash_set_resize_impl.h" "container/internal/tracked.h" + "container/linked_hash_map.h" + "container/linked_hash_set.h" "container/node_hash_map.h" "container/node_hash_set.h" "crc/crc32c.cc" @@ -129,6 +133,8 @@ set(ABSL_INTERNAL_DLL_FILES "debugging/internal/address_is_readable.h" "debugging/internal/addresses.h" "debugging/internal/bounded_utf8_length_sequence.h" + "debugging/internal/borrowed_fixup_buffer.h" + "debugging/internal/borrowed_fixup_buffer.cc" "debugging/internal/decode_rust_punycode.cc" "debugging/internal/decode_rust_punycode.h" "debugging/internal/demangle.cc" @@ -160,8 +166,6 @@ set(ABSL_INTERNAL_DLL_FILES "hash/internal/hash.h" "hash/internal/hash.cc" "hash/internal/spy_hash_state.h" - "hash/internal/low_level_hash.h" - "hash/internal/low_level_hash.cc" "hash/internal/weakly_mixed_integer.h" "log/absl_check.h" "log/absl_log.h" @@ -178,6 +182,7 @@ set(ABSL_INTERNAL_DLL_FILES "log/internal/conditions.cc" "log/internal/conditions.h" "log/internal/config.h" + "log/internal/container.h" "log/internal/fnmatch.h" "log/internal/fnmatch.cc" "log/internal/globals.cc" @@ -204,6 +209,7 @@ set(ABSL_INTERNAL_DLL_FILES "log/initialize.cc" "log/initialize.h" "log/log.h" + "log/log_entry.cc" "log/log_entry.h" "log/log_sink.cc" "log/log_sink.h" @@ -213,15 +219,20 @@ set(ABSL_INTERNAL_DLL_FILES "log/vlog_is_on.h" "memory/memory.h" "meta/type_traits.h" + "meta/internal/requires.h" "numeric/bits.h" "numeric/int128.cc" "numeric/int128.h" "numeric/internal/bits.h" "numeric/internal/representation.h" + "profiling/hashtable.cc" + "profiling/hashtable.h" "profiling/internal/exponential_biased.cc" "profiling/internal/exponential_biased.h" "profiling/internal/periodic_sampler.cc" "profiling/internal/periodic_sampler.h" + "profiling/internal/profile_builder.cc" + "profiling/internal/profile_builder.h" "profiling/internal/sample_recorder.h" "random/bernoulli_distribution.h" "random/beta_distribution.h" @@ -291,6 +302,7 @@ set(ABSL_INTERNAL_DLL_FILES "strings/cord_buffer.h" "strings/escaping.cc" "strings/escaping.h" + "strings/internal/append_and_overwrite.h" "strings/internal/charconv_bigint.cc" "strings/internal/charconv_bigint.h" "strings/internal/charconv_parse.cc" @@ -322,6 +334,9 @@ set(ABSL_INTERNAL_DLL_FILES "strings/internal/cordz_update_tracker.h" "strings/internal/damerau_levenshtein_distance.h" "strings/internal/damerau_levenshtein_distance.cc" + "strings/internal/generic_printer.cc" + "strings/internal/generic_printer.h" + "strings/internal/generic_printer_internal.h" "strings/internal/stl_type_traits.h" "strings/internal/string_constant.h" "strings/internal/stringify_sink.h" @@ -340,8 +355,6 @@ set(ABSL_INTERNAL_DLL_FILES "strings/str_replace.h" "strings/str_split.cc" "strings/str_split.h" - "strings/string_view.cc" - "strings/string_view.h" "strings/strip.h" "strings/substitute.cc" "strings/substitute.h" @@ -372,6 +385,7 @@ set(ABSL_INTERNAL_DLL_FILES "strings/internal/str_split_internal.h" "strings/internal/utf8.cc" "strings/internal/utf8.h" + "strings/resize_and_overwrite.h" "synchronization/barrier.cc" "synchronization/barrier.h" "synchronization/blocking_counter.cc" @@ -440,9 +454,15 @@ set(ABSL_INTERNAL_DLL_FILES "types/variant.h" "utility/utility.h" "debugging/leak_check.cc" + "strings/string_view.h" ) -if(NOT MSVC) +if(MSVC) + list(APPEND ABSL_INTERNAL_DLL_FILES + "time/internal/cctz/src/time_zone_name_win.cc" + "time/internal/cctz/src/time_zone_name_win.h" + ) +else() list(APPEND ABSL_INTERNAL_DLL_FILES "flags/commandlineflag.cc" "flags/commandlineflag.h" @@ -719,8 +739,10 @@ int main() { return 0; } if(ABSL_INTERNAL_AT_LEAST_CXX20) set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_20) -else() +elseif(ABSL_INTERNAL_AT_LEAST_CXX17) set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17) +else() + message(FATAL_ERROR "The compiler defaults to or is configured for C++ < 17. C++ >= 17 is required and Abseil and all libraries that use Abseil must use the same C++ language standard") endif() function(absl_internal_dll_contains) @@ -825,6 +847,9 @@ function(absl_make_dll) ${_dll_libs} ${ABSL_DEFAULT_LINKOPTS} $<$:-llog> + $<$:-ladvapi32> + $<$:-ldbghelp> + $<$:-lbcrypt> ) set_target_properties(${_dll} PROPERTIES LINKER_LANGUAGE "CXX" diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake index 624a3c7e9..61e1ae421 100644 --- a/CMake/AbseilHelpers.cmake +++ b/CMake/AbseilHelpers.cmake @@ -326,7 +326,12 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n") ) if (_build_type STREQUAL "dll") - set(ABSL_CC_LIB_DEPS abseil_dll) + if(${_in_dll}) + set(ABSL_CC_LIB_DEPS abseil_dll) + endif() + if(${_in_test_dll}) + set(ABSL_CC_LIB_DEPS abseil_test_dll) + endif() endif() target_link_libraries(${_NAME} diff --git a/CMakeLists.txt b/CMakeLists.txt index 8d3059d5c..26dc8e747 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -23,8 +23,8 @@ if (POLICY CMP0141) cmake_policy(SET CMP0141 NEW) endif (POLICY CMP0141) -project(absl LANGUAGES CXX VERSION 20250512) -set(ABSL_SOVERSION "2505.0.0") +project(absl LANGUAGES CXX VERSION 20260107) +set(ABSL_SOVERSION "2601.0.0") include(CTest) # Output directory is correct by default for most build setups. However, when diff --git a/MODULE.bazel b/MODULE.bazel index 48a65c795..7f542c9d2 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -16,7 +16,7 @@ module( name = "abseil-cpp", - version = "20250512.1", + version = "20260107.1", compatibility_level = 1, ) @@ -25,13 +25,13 @@ cc_configure = use_extension("@rules_cc//cc:extensions.bzl", dev_dependency = True) use_repo(cc_configure, "local_config_cc") -bazel_dep(name = "rules_cc", version = "0.1.1") -bazel_dep(name = "bazel_skylib", version = "1.7.1") -bazel_dep(name = "platforms", version = "0.0.11") +bazel_dep(name = "rules_cc", version = "0.2.9") +bazel_dep(name = "bazel_skylib", version = "1.8.1") +bazel_dep(name = "platforms", version = "1.0.0") bazel_dep( name = "google_benchmark", - version = "1.9.2", + version = "1.9.4", dev_dependency = True, ) diff --git a/Package.swift b/Package.swift index fe934d3cc..5e50632ce 100644 --- a/Package.swift +++ b/Package.swift @@ -48,6 +48,8 @@ let package = Package( // other files "absl/flags/flag_benchmark.lds", "absl/abseil.podspec.gen.py", + "absl/time/internal/cctz/src/time_zone_name_win.cc", + "absl/crc/internal/gen_crc32c_consts.py", ], sources: [ "absl/" diff --git a/absl/abseil.podspec.gen.py b/absl/abseil.podspec.gen.py index e1afa210b..e19f95119 100755 --- a/absl/abseil.podspec.gen.py +++ b/absl/abseil.podspec.gen.py @@ -42,6 +42,7 @@ 'USER_HEADER_SEARCH_PATHS' => '$(inherited) "$(PODS_TARGET_SRCROOT)"', 'USE_HEADERMAP' => 'NO', 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + 'CLANG_CXX_LANGUAGE_STANDARD' => 'c++17', } s.ios.deployment_target = '12.0' s.osx.deployment_target = '10.13' diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h index 6f9c1938f..c0b8a10ae 100644 --- a/absl/algorithm/container.h +++ b/absl/algorithm/container.h @@ -53,7 +53,6 @@ #include "absl/algorithm/algorithm.h" #include "absl/base/config.h" #include "absl/base/macros.h" -#include "absl/base/nullability.h" #include "absl/meta/type_traits.h" namespace absl { @@ -522,7 +521,8 @@ ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 // Container-based version of the `std::copy()` function to copy a // container's elements into an iterator. template -OutputIterator c_copy(const InputSequence& input, OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_copy(const InputSequence& input, OutputIterator output) { return std::copy(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output); } @@ -532,7 +532,8 @@ OutputIterator c_copy(const InputSequence& input, OutputIterator output) { // Container-based version of the `std::copy_n()` function to copy a // container's first N elements into an iterator. template -OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_copy_n(const C& input, Size n, OutputIterator output) { return std::copy_n(container_algorithm_internal::c_begin(input), n, output); } @@ -541,8 +542,8 @@ OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { // Container-based version of the `std::copy_if()` function to copy // a container's elements satisfying some condition into an iterator. template -OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_copy_if(const InputSequence& input, OutputIterator output, Pred&& pred) { return std::copy_if(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(pred)); @@ -553,8 +554,8 @@ OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, // Container-based version of the `std::copy_backward()` function to // copy a container's elements in reverse order into an iterator. template -BidirectionalIterator c_copy_backward(const C& src, - BidirectionalIterator dest) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 BidirectionalIterator +c_copy_backward(const C& src, BidirectionalIterator dest) { return std::copy_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -564,7 +565,8 @@ BidirectionalIterator c_copy_backward(const C& src, // Container-based version of the `std::move()` function to move // a container's elements into an iterator. template -OutputIterator c_move(C&& src, OutputIterator dest) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_move(C&& src, + OutputIterator dest) { return std::move(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -574,7 +576,8 @@ OutputIterator c_move(C&& src, OutputIterator dest) { // Container-based version of the `std::move_backward()` function to // move a container's elements into an iterator in reverse order. template -BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 BidirectionalIterator +c_move_backward(C&& src, BidirectionalIterator dest) { return std::move_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -585,7 +588,9 @@ BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { // swap a container's elements with another container's elements. Swaps the // first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). template -container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_swap_ranges(C1& c1, C2& c2) { auto first1 = container_algorithm_internal::c_begin(c1); auto last1 = container_algorithm_internal::c_end(c1); auto first2 = container_algorithm_internal::c_begin(c2); @@ -605,8 +610,8 @@ container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { // result in an iterator pointing to the last transformed element in the output // range. template -OutputIterator c_transform(const InputSequence& input, OutputIterator output, - UnaryOp&& unary_op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_transform( + const InputSequence& input, OutputIterator output, UnaryOp&& unary_op) { return std::transform(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(unary_op)); @@ -617,9 +622,9 @@ OutputIterator c_transform(const InputSequence& input, OutputIterator output, // where N = min(size(c1), size(c2)). template -OutputIterator c_transform(const InputSequence1& input1, - const InputSequence2& input2, OutputIterator output, - BinaryOp&& binary_op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_transform(const InputSequence1& input1, const InputSequence2& input2, + OutputIterator output, BinaryOp&& binary_op) { auto first1 = container_algorithm_internal::c_begin(input1); auto last1 = container_algorithm_internal::c_end(input1); auto first2 = container_algorithm_internal::c_begin(input2); @@ -638,7 +643,9 @@ OutputIterator c_transform(const InputSequence1& input1, // replace a container's elements of some value with a new value. The container // is modified in place. template -void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_replace(Sequence& sequence, + const T& old_value, + const T& new_value) { std::replace(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), old_value, new_value); @@ -650,7 +657,8 @@ void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { // replace a container's elements of some value with a new value based on some // condition. The container is modified in place. template -void c_replace_if(C& c, Pred&& pred, T&& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_replace_if(C& c, Pred&& pred, + T&& new_value) { std::replace_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred), std::forward(new_value)); @@ -662,8 +670,8 @@ void c_replace_if(C& c, Pred&& pred, T&& new_value) { // replace a container's elements of some value with a new value and return the // results within an iterator. template -OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, - T&& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_replace_copy( + const C& c, OutputIterator result, T&& old_value, T&& new_value) { return std::replace_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(old_value), @@ -676,8 +684,8 @@ OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, // to replace a container's elements of some value with a new value based on // some condition, and return the results within an iterator. template -OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, - const T& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_replace_copy_if( + const C& c, OutputIterator result, Pred&& pred, const T& new_value) { return std::replace_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred), new_value); @@ -688,7 +696,7 @@ OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, // Container-based version of the `std::fill()` function to fill a // container with some value. template -void c_fill(C& c, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_fill(C& c, const T& value) { std::fill(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), value); } @@ -698,7 +706,8 @@ void c_fill(C& c, const T& value) { // Container-based version of the `std::fill_n()` function to fill // the first N elements in a container with some value. template -void c_fill_n(C& c, Size n, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_fill_n(C& c, Size n, + const T& value) { std::fill_n(container_algorithm_internal::c_begin(c), n, value); } @@ -707,7 +716,7 @@ void c_fill_n(C& c, Size n, const T& value) { // Container-based version of the `std::generate()` function to // assign a container's elements to the values provided by the given generator. template -void c_generate(C& c, Generator&& gen) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_generate(C& c, Generator&& gen) { std::generate(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); @@ -719,8 +728,9 @@ void c_generate(C& c, Generator&& gen) { // assign a container's first N elements to the values provided by the given // generator. template -container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, - Generator&& gen) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_generate_n(C& c, Size n, Generator&& gen) { return std::generate_n(container_algorithm_internal::c_begin(c), n, std::forward(gen)); } @@ -736,8 +746,8 @@ container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, // copy a container's elements while removing any elements matching the given // `value`. template -OutputIterator c_remove_copy(const C& c, OutputIterator result, - const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_remove_copy(const C& c, OutputIterator result, const T& value) { return std::remove_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, value); @@ -749,8 +759,8 @@ OutputIterator c_remove_copy(const C& c, OutputIterator result, // to copy a container's elements while removing any elements matching the given // condition. template -OutputIterator c_remove_copy_if(const C& c, OutputIterator result, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_remove_copy_if(const C& c, OutputIterator result, Pred&& pred) { return std::remove_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); @@ -762,7 +772,8 @@ OutputIterator c_remove_copy_if(const C& c, OutputIterator result, // copy a container's elements while removing any elements containing duplicate // values. template -OutputIterator c_unique_copy(const C& c, OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_unique_copy(const C& c, OutputIterator result) { return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result); } @@ -770,8 +781,8 @@ OutputIterator c_unique_copy(const C& c, OutputIterator result) { // Overload of c_unique_copy() for using a predicate evaluation other than // `==` for comparing uniqueness of the element values. template -OutputIterator c_unique_copy(const C& c, OutputIterator result, - BinaryPredicate&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_unique_copy(const C& c, OutputIterator result, BinaryPredicate&& pred) { return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); @@ -782,7 +793,7 @@ OutputIterator c_unique_copy(const C& c, OutputIterator result, // Container-based version of the `std::reverse()` function to // reverse a container's elements. template -void c_reverse(Sequence& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_reverse(Sequence& sequence) { std::reverse(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -792,7 +803,8 @@ void c_reverse(Sequence& sequence) { // Container-based version of the `std::reverse()` function to // reverse a container's elements and write them to an iterator range. template -OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_reverse_copy(const C& sequence, OutputIterator result) { return std::reverse_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), result); @@ -805,7 +817,8 @@ OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { // the first element in the container. template > -Iterator c_rotate(C& sequence, Iterator middle) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 Iterator c_rotate(C& sequence, + Iterator middle) { return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); } @@ -816,10 +829,10 @@ Iterator c_rotate(C& sequence, Iterator middle) { // shift a container's elements leftward such that the `middle` element becomes // the first element in a new iterator range. template -OutputIterator c_rotate_copy( - const C& sequence, - container_algorithm_internal::ContainerIter middle, - OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_rotate_copy(const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result) { return std::rotate_copy(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), result); @@ -861,7 +874,8 @@ OutputIterator c_sample(const C& c, OutputIterator result, Distance n, // to test whether all elements in the container for which `pred` returns `true` // precede those for which `pred` is `false`. template -bool c_is_partitioned(const C& c, Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_partitioned(const C& c, + Pred&& pred) { return std::is_partitioned(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); @@ -874,7 +888,9 @@ bool c_is_partitioned(const C& c, Pred&& pred) { // which `pred` returns `true` precede all those for which it returns `false`, // returning an iterator to the first element of the second group. template -container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partition(C& c, Pred&& pred) { return std::partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); @@ -903,9 +919,9 @@ container_algorithm_internal::ContainerIter c_stable_partition(C& c, template -std::pair c_partition_copy( - const C& c, OutputIterator1 out_true, OutputIterator2 out_false, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 std::pair +c_partition_copy(const C& c, OutputIterator1 out_true, + OutputIterator2 out_false, Pred&& pred) { return std::partition_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), out_true, out_false, std::forward(pred)); @@ -917,8 +933,9 @@ std::pair c_partition_copy( // to return the first element of an already partitioned container for which // the given `pred` is not `true`. template -container_algorithm_internal::ContainerIter c_partition_point(C& c, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partition_point(C& c, Pred&& pred) { return std::partition_point(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); @@ -933,7 +950,7 @@ container_algorithm_internal::ContainerIter c_partition_point(C& c, // Container-based version of the `std::sort()` function // to sort elements in ascending order of their values. template -void c_sort(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort(C& c) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -941,7 +958,7 @@ void c_sort(C& c) { // Overload of c_sort() for performing a `comp` comparison other than the // default `operator<`. template -void c_sort(C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort(C& c, LessThan&& comp) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -972,7 +989,7 @@ void c_stable_sort(C& c, LessThan&& comp) { // Container-based version of the `std::is_sorted()` function // to evaluate whether the given container is sorted in ascending order. template -bool c_is_sorted(const C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_sorted(const C& c) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -980,7 +997,8 @@ bool c_is_sorted(const C& c) { // c_is_sorted() overload for performing a `comp` comparison other than the // default `operator<`. template -bool c_is_sorted(const C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_sorted(const C& c, + LessThan&& comp) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -992,7 +1010,7 @@ bool c_is_sorted(const C& c, LessThan&& comp) { // to rearrange elements within a container such that elements before `middle` // are sorted in ascending order. template -void c_partial_sort( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle) { std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, @@ -1002,7 +1020,7 @@ void c_partial_sort( // Overload of c_partial_sort() for performing a `comp` comparison other than // the default `operator<`. template -void c_partial_sort( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle, LessThan&& comp) { @@ -1019,8 +1037,9 @@ void c_partial_sort( // At most min(result.last - result.first, sequence.last - sequence.first) // elements from the sequence will be stored in the result. template -container_algorithm_internal::ContainerIter -c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), @@ -1030,9 +1049,10 @@ c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { // Overload of c_partial_sort_copy() for performing a `comp` comparison other // than the default `operator<`. template -container_algorithm_internal::ContainerIter -c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, + LessThan&& comp) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), @@ -1046,7 +1066,9 @@ c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, // to return the first element within a container that is not sorted in // ascending order as an iterator. template -container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_sorted_until(C& c) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -1054,8 +1076,9 @@ container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { // Overload of c_is_sorted_until() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIter c_is_sorted_until( - C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_sorted_until(C& c, LessThan&& comp) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -1069,7 +1092,7 @@ container_algorithm_internal::ContainerIter c_is_sorted_until( // any order, except that all preceding `nth` will be less than that element, // and all following `nth` will be greater than that element. template -void c_nth_element( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth) { std::nth_element(container_algorithm_internal::c_begin(sequence), nth, @@ -1079,7 +1102,7 @@ void c_nth_element( // Overload of c_nth_element() for performing a `comp` comparison other than // the default `operator<`. template -void c_nth_element( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth, LessThan&& comp) { @@ -1098,8 +1121,9 @@ void c_nth_element( // to return an iterator pointing to the first element in a sorted container // which does not compare less than `value`. template -container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_lower_bound(Sequence& sequence, const T& value) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1107,8 +1131,9 @@ container_algorithm_internal::ContainerIter c_lower_bound( // Overload of c_lower_bound() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, const T& value, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_lower_bound(Sequence& sequence, const T& value, LessThan&& comp) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1120,8 +1145,9 @@ container_algorithm_internal::ContainerIter c_lower_bound( // to return an iterator pointing to the first element in a sorted container // which is greater than `value`. template -container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_upper_bound(Sequence& sequence, const T& value) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1129,8 +1155,9 @@ container_algorithm_internal::ContainerIter c_upper_bound( // Overload of c_upper_bound() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, const T& value, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_upper_bound(Sequence& sequence, const T& value, LessThan&& comp) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1142,8 +1169,9 @@ container_algorithm_internal::ContainerIter c_upper_bound( // to return an iterator pair pointing to the first and last elements in a // sorted container which compare equal to `value`. template -container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, const T& value) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1151,8 +1179,9 @@ c_equal_range(Sequence& sequence, const T& value) { // Overload of c_equal_range() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1164,7 +1193,8 @@ c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { // to test if any element in the sorted container contains a value equivalent to // 'value'. template -bool c_binary_search(const Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_binary_search( + const Sequence& sequence, const T& value) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); @@ -1173,8 +1203,8 @@ bool c_binary_search(const Sequence& sequence, const T& value) { // Overload of c_binary_search() for performing a `comp` comparison other than // the default `operator<`. template -bool c_binary_search(const Sequence& sequence, const T& value, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_binary_search( + const Sequence& sequence, const T& value, LessThan&& comp) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1189,7 +1219,8 @@ bool c_binary_search(const Sequence& sequence, const T& value, // Container-based version of the `std::merge()` function // to merge two sorted containers into a single sorted iterator. template -OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_merge(const C1& c1, const C2& c2, OutputIterator result) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1199,8 +1230,8 @@ OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { // Overload of c_merge() for performing a `comp` comparison other than // the default `operator<`. template -OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_merge(const C1& c1, const C2& c2, OutputIterator result, LessThan&& comp) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1236,7 +1267,8 @@ void c_inplace_merge(C& c, // to test whether a sorted container `c1` entirely contains another sorted // container `c2`. template -bool c_includes(const C1& c1, const C2& c2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_includes(const C1& c1, + const C2& c2) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1246,7 +1278,8 @@ bool c_includes(const C1& c1, const C2& c2) { // Overload of c_includes() for performing a merge using a `comp` other than // `operator<`. template -bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_includes(const C1& c1, const C2& c2, + LessThan&& comp) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1266,7 +1299,8 @@ template ::value, void>::type> -OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_union(const C1& c1, const C2& c2, OutputIterator output) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1282,8 +1316,8 @@ template ::value, void>::type> -OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_union( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1302,13 +1336,13 @@ template ::value, void>::type> -OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) { // In debug builds, ensure that both containers are sorted with respect to the // default comparator. std::set_intersection requires the containers be sorted // using operator<. - assert(absl::c_is_sorted(c1)); - assert(absl::c_is_sorted(c2)); + ABSL_ASSERT(absl::c_is_sorted(c1)); + ABSL_ASSERT(absl::c_is_sorted(c2)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1324,13 +1358,13 @@ template ::value, void>::type> -OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_intersection( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { // In debug builds, ensure that both containers are sorted with respect to the // default comparator. std::set_intersection requires the containers be sorted // using the same comparator. - assert(absl::c_is_sorted(c1, comp)); - assert(absl::c_is_sorted(c2, comp)); + ABSL_ASSERT(absl::c_is_sorted(c1, comp)); + ABSL_ASSERT(absl::c_is_sorted(c2, comp)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1350,8 +1384,8 @@ template ::value, void>::type> -OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1367,8 +1401,8 @@ template ::value, void>::type> -OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_difference( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1388,8 +1422,8 @@ template ::value, void>::type> -OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, - OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), @@ -1406,9 +1440,8 @@ template ::value, void>::type> -OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, - OutputIterator output, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_symmetric_difference( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), @@ -1426,7 +1459,8 @@ OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, // Container-based version of the `std::push_heap()` function // to push a value onto a container heap. template -void c_push_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_push_heap( + RandomAccessContainer& sequence) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1434,7 +1468,8 @@ void c_push_heap(RandomAccessContainer& sequence) { // Overload of c_push_heap() for performing a push operation on a heap using a // `comp` other than `operator<`. template -void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_push_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1445,7 +1480,8 @@ void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::pop_heap()` function // to pop a value from a heap container. template -void c_pop_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_pop_heap( + RandomAccessContainer& sequence) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1453,7 +1489,8 @@ void c_pop_heap(RandomAccessContainer& sequence) { // Overload of c_pop_heap() for performing a pop operation on a heap using a // `comp` other than `operator<`. template -void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_pop_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1464,7 +1501,8 @@ void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::make_heap()` function // to make a container a heap. template -void c_make_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_make_heap( + RandomAccessContainer& sequence) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1472,7 +1510,8 @@ void c_make_heap(RandomAccessContainer& sequence) { // Overload of c_make_heap() for performing heap comparisons using a // `comp` other than `operator<` template -void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_make_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1483,7 +1522,8 @@ void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::sort_heap()` function // to sort a heap into ascending order (after which it is no longer a heap). template -void c_sort_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort_heap( + RandomAccessContainer& sequence) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1491,7 +1531,8 @@ void c_sort_heap(RandomAccessContainer& sequence) { // Overload of c_sort_heap() for performing heap comparisons using a // `comp` other than `operator<` template -void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1502,7 +1543,8 @@ void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::is_heap()` function // to check whether the given container is a heap. template -bool c_is_heap(const RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_heap( + const RandomAccessContainer& sequence) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1510,7 +1552,8 @@ bool c_is_heap(const RandomAccessContainer& sequence) { // Overload of c_is_heap() for performing heap comparisons using a // `comp` other than `operator<` template -bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_heap( + const RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1521,8 +1564,9 @@ bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::is_heap_until()` function // to find the first element in a given container which is not in heap order. template -container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1530,8 +1574,9 @@ c_is_heap_until(RandomAccessContainer& sequence) { // Overload of c_is_heap_until() for performing heap comparisons using a // `comp` other than `operator<` template -container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1626,8 +1671,8 @@ ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 // that capital letters ("A-Z") have ASCII values less than lowercase letters // ("a-z"). template -bool c_lexicographical_compare(const Sequence1& sequence1, - const Sequence2& sequence2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_lexicographical_compare( + const Sequence1& sequence1, const Sequence2& sequence2) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), @@ -1638,8 +1683,8 @@ bool c_lexicographical_compare(const Sequence1& sequence1, // Overload of c_lexicographical_compare() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template -bool c_lexicographical_compare(const Sequence1& sequence1, - const Sequence2& sequence2, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_lexicographical_compare( + const Sequence1& sequence1, const Sequence2& sequence2, LessThan&& comp) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), @@ -1654,7 +1699,7 @@ bool c_lexicographical_compare(const Sequence1& sequence1, // to rearrange a container's elements into the next lexicographically greater // permutation. template -bool c_next_permutation(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_next_permutation(C& c) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -1662,7 +1707,8 @@ bool c_next_permutation(C& c) { // Overload of c_next_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template -bool c_next_permutation(C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_next_permutation(C& c, + LessThan&& comp) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -1674,7 +1720,7 @@ bool c_next_permutation(C& c, LessThan&& comp) { // to rearrange a container's elements into the next lexicographically lesser // permutation. template -bool c_prev_permutation(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_prev_permutation(C& c) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -1682,7 +1728,8 @@ bool c_prev_permutation(C& c) { // Overload of c_prev_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template -bool c_prev_permutation(C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_prev_permutation(C& c, + LessThan&& comp) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -1698,7 +1745,8 @@ bool c_prev_permutation(C& c, LessThan&& comp) { // to compute successive values of `value`, as if incremented with `++value` // after each element is written, and write them to the container. template -void c_iota(Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_iota(Sequence& sequence, + const T& value) { std::iota(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1713,7 +1761,8 @@ void c_iota(Sequence& sequence, const T& value) { // absl::decay_t. As a user of this function you can casually read // this as "returns T by value" and assume it does the right thing. template -decay_t c_accumulate(const Sequence& sequence, T&& init) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_accumulate( + const Sequence& sequence, T&& init) { return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init)); @@ -1722,8 +1771,8 @@ decay_t c_accumulate(const Sequence& sequence, T&& init) { // Overload of c_accumulate() for using a binary operations other than // addition for computing the accumulation. template -decay_t c_accumulate(const Sequence& sequence, T&& init, - BinaryOp&& binary_op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_accumulate( + const Sequence& sequence, T&& init, BinaryOp&& binary_op) { return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init), @@ -1739,8 +1788,8 @@ decay_t c_accumulate(const Sequence& sequence, T&& init, // absl::decay_t. As a user of this function you can casually read // this as "returns T by value" and assume it does the right thing. template -decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, - T&& sum) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_inner_product( + const Sequence1& factors1, const Sequence2& factors2, T&& sum) { return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), @@ -1752,8 +1801,9 @@ decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, // the product between the two container's element pair). template -decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, - T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_inner_product( + const Sequence1& factors1, const Sequence2& factors2, T&& sum, + BinaryOp1&& op1, BinaryOp2&& op2) { return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), @@ -1767,8 +1817,8 @@ decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, // function to compute the difference between each element and the one preceding // it and write it to an iterator. template -OutputIt c_adjacent_difference(const InputSequence& input, - OutputIt output_first) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt +c_adjacent_difference(const InputSequence& input, OutputIt output_first) { return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); @@ -1777,8 +1827,8 @@ OutputIt c_adjacent_difference(const InputSequence& input, // Overload of c_adjacent_difference() for using a binary operation other than // subtraction to compute the adjacent difference. template -OutputIt c_adjacent_difference(const InputSequence& input, - OutputIt output_first, BinaryOp&& op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt c_adjacent_difference( + const InputSequence& input, OutputIt output_first, BinaryOp&& op) { return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); @@ -1791,7 +1841,8 @@ OutputIt c_adjacent_difference(const InputSequence& input, // to an iterator. The partial sum is the sum of all element values so far in // the sequence. template -OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt +c_partial_sum(const InputSequence& input, OutputIt output_first) { return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); @@ -1800,8 +1851,8 @@ OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { // Overload of c_partial_sum() for using a binary operation other than addition // to compute the "partial sum". template -OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, - BinaryOp&& op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt c_partial_sum( + const InputSequence& input, OutputIt output_first, BinaryOp&& op) { return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); diff --git a/absl/base/attributes.h b/absl/base/attributes.h index d009f6d49..77482e5f4 100644 --- a/absl/base/attributes.h +++ b/absl/base/attributes.h @@ -553,7 +553,7 @@ // // Prevents the compiler from complaining about variables that appear unused. // -// Deprecated: Use the standard C++17 `[[maybe_unused]` instead. +// Deprecated: Use the standard C++17 `[[maybe_unused]]` instead. // // Due to differences in positioning requirements between the old, compiler // specific __attribute__ syntax and the now standard `[[maybe_unused]]`, this @@ -580,7 +580,11 @@ // Instructs the compiler not to use natural alignment for a tagged data // structure, but instead to reduce its alignment to 1. // -// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing +// Use of this attribute is HIGHLY DISCOURAGED. Taking the address of or +// binding a reference to any unaligned member is UB, and it is very easy to +// do so unintentionally when passing such members as function arguments. +// +// DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing // so can cause atomic variables to be mis-aligned and silently violate // atomicity on x86. // diff --git a/absl/base/casts.cc b/absl/base/casts.cc new file mode 100644 index 000000000..d864a8c55 --- /dev/null +++ b/absl/base/casts.cc @@ -0,0 +1,61 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/base/casts.h" + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" + +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE +#include +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN + +namespace base_internal { + +namespace { + +std::string DemangleCppString(const char* mangled) { + std::string out; + int status = 0; + char* demangled = nullptr; +#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE + demangled = abi::__cxa_demangle(mangled, nullptr, nullptr, &status); +#endif + if (status == 0 && demangled != nullptr) { + out.append(demangled); + free(demangled); + } else { + out.append(mangled); + } + return out; +} + +} // namespace + +void BadDownCastCrash(const char* source_type, const char* target_type) { + ABSL_RAW_LOG(FATAL, "down cast from %s to %s failed", + DemangleCppString(source_type).c_str(), + DemangleCppString(target_type).c_str()); +} + +} // namespace base_internal + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/base/casts.h b/absl/base/casts.h index e0b11bbe4..480855ac3 100644 --- a/absl/base/casts.h +++ b/absl/base/casts.h @@ -33,8 +33,11 @@ #include // For std::bit_cast. #endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L -#include "absl/base/internal/identity.h" +#include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/macros.h" +#include "absl/base/optimization.h" +#include "absl/base/options.h" #include "absl/meta/type_traits.h" namespace absl { @@ -90,9 +93,26 @@ ABSL_NAMESPACE_BEGIN // // Such implicit cast chaining may be useful within template logic. template -constexpr To implicit_cast(typename absl::internal::type_identity_t to) { +constexpr std::enable_if_t< + !type_traits_internal::IsView, std::remove_cv_t>>::value, + To> +implicit_cast(absl::type_identity_t to) { return to; } +template +constexpr std::enable_if_t< + type_traits_internal::IsView, + std::remove_cv_t>>::value, + To> +implicit_cast(absl::type_identity_t to ABSL_ATTRIBUTE_LIFETIME_BOUND) { + return to; +} +template +constexpr std::enable_if_t, To> implicit_cast( + absl::type_identity_t to ABSL_ATTRIBUTE_LIFETIME_BOUND) { + return std::forward>(to); +} // bit_cast() // @@ -174,6 +194,112 @@ inline Dest bit_cast(const Source& source) { #endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +namespace base_internal { + +[[noreturn]] ABSL_ATTRIBUTE_NOINLINE void BadDownCastCrash( + const char* source_type, const char* target_type); + +template +inline void ValidateDownCast(From* f ABSL_ATTRIBUTE_UNUSED) { + // Assert only if RTTI is enabled and in debug mode or hardened asserts are + // enabled. +#ifdef ABSL_INTERNAL_HAS_RTTI +#if !defined(NDEBUG) || (ABSL_OPTION_HARDENED == 1 || ABSL_OPTION_HARDENED == 2) + // Suppress erroneous nonnull comparison warning on older GCC. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnonnull-compare" +#endif + if (ABSL_PREDICT_FALSE(f != nullptr && dynamic_cast(f) == nullptr)) { +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif + absl::base_internal::BadDownCastCrash( + typeid(*f).name(), typeid(std::remove_pointer_t).name()); + } +#endif +#endif +} + +} // namespace base_internal + +// An "upcast", i.e. a conversion from a pointer to an object to a pointer to a +// base subobject, always succeeds if the base is unambiguous and accessible, +// and so it's fine to use implicit_cast. +// +// A "downcast", i.e. a conversion from a pointer to an object to a pointer +// to a more-derived object that may contain the original object as a base +// subobject, cannot safely be done using static_cast, because you do not +// generally know whether the source object is really the base subobject of +// a containing, more-derived object of the target type. Thus, when you +// downcast in a polymorphic type hierarchy, you should use the following +// function template. +// +// This function only returns null when the input is null. In debug mode, we +// use dynamic_cast to double-check whether the downcast is legal (we die if +// it's not). In normal mode, we do the efficient static_cast instead. Because +// the process will die in debug mode, it's important to test to make sure the +// cast is legal before calling this function! +// +// dynamic_cast should be avoided except as allowed by the style guide +// (https://google.github.io/styleguide/cppguide.html#Run-Time_Type_Information__RTTI_). + +template // use like this: down_cast(foo); +[[nodiscard]] +inline To down_cast(From* f) { // so we only accept pointers + static_assert(std::is_pointer::value, "target type not a pointer"); + // dynamic_cast allows casting to the same type or a more cv-qualified + // version of the same type without them being polymorphic. + if constexpr (!std::is_same>, + std::remove_cv_t>::value) { + static_assert(std::is_polymorphic::value, + "source type must be polymorphic"); + static_assert(std::is_polymorphic>::value, + "target type must be polymorphic"); + } + static_assert( + std::is_convertible>*, + std::remove_cv_t*>::value, + "target type not derived from source type"); + + absl::base_internal::ValidateDownCast(f); + + return static_cast(f); +} + +// Overload of down_cast for references. Use like this: +// absl::down_cast(foo). The code is slightly convoluted because we're still +// using the pointer form of dynamic cast. (The reference form throws an +// exception if it fails.) +// +// There's no need for a special const overload either for the pointer +// or the reference form. If you call down_cast with a const T&, the +// compiler will just bind From to const T. +template +[[nodiscard]] +inline To down_cast(From& f) { + static_assert(std::is_lvalue_reference::value, + "target type not a reference"); + // dynamic_cast allows casting to the same type or a more cv-qualified + // version of the same type without them being polymorphic. + if constexpr (!std::is_same>, + std::remove_cv_t>::value) { + static_assert(std::is_polymorphic::value, + "source type must be polymorphic"); + static_assert(std::is_polymorphic>::value, + "target type must be polymorphic"); + } + static_assert( + std::is_convertible>*, + std::remove_cv_t*>::value, + "target type not derived from source type"); + + absl::base_internal::ValidateDownCast*>( + std::addressof(f)); + + return static_cast(f); +} + ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/base/config.h b/absl/base/config.h index 1a9bc591d..4f155e205 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -117,7 +117,7 @@ // // LTS releases can be obtained from // https://github.com/abseil/abseil-cpp/releases. -#define ABSL_LTS_RELEASE_VERSION 20250512 +#define ABSL_LTS_RELEASE_VERSION 20260107 #define ABSL_LTS_RELEASE_PATCH_LEVEL 1 // Helper macro to convert a CPP variable to a string literal. @@ -237,6 +237,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error ABSL_HAVE_TLS cannot be directly set #elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) #define ABSL_HAVE_TLS 1 +#elif defined(__INTEL_LLVM_COMPILER) +#define ABSL_HAVE_TLS 1 #endif // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE @@ -358,10 +360,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Darwin (macOS and iOS) __APPLE__ // Akaros (http://akaros.org) __ros__ // Windows _WIN32 -// NaCL __native_client__ // AsmJS __asmjs__ // WebAssembly (Emscripten) __EMSCRIPTEN__ // Fuchsia __Fuchsia__ +// WebAssembly (WASI) _WASI_EMULATED_MMAN (implies __wasi__) // // Note that since Android defines both __ANDROID__ and __linux__, one // may probe for either Linux or Android by simply testing for __linux__. @@ -372,12 +374,13 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set -#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ - defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \ - defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) || \ - defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) || \ - defined(__VXWORKS__) || defined(__hexagon__) || defined(__XTENSA__) +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__asmjs__) || \ + defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__myriad2__) || defined(__HAIKU__) || defined(__OpenBSD__) || \ + defined(__NetBSD__) || defined(__QNX__) || defined(__VXWORKS__) || \ + defined(__hexagon__) || defined(__XTENSA__) || \ + defined(_WASI_EMULATED_MMAN) #define ABSL_HAVE_MMAP 1 #endif @@ -453,8 +456,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // WASI doesn't support signals #elif defined(__Fuchsia__) // Signals don't exist on fuchsia. -#elif defined(__native_client__) -// Signals don't exist on hexagon/QuRT #elif defined(__hexagon__) #else // other standard libraries @@ -525,20 +526,11 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_USES_STD_ANY 1 #define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_USES_STD_OPTIONAL 1 +#define ABSL_HAVE_STD_STRING_VIEW 1 +#define ABSL_USES_STD_STRING_VIEW 1 #define ABSL_HAVE_STD_VARIANT 1 #define ABSL_USES_STD_VARIANT 1 -// ABSL_HAVE_STD_STRING_VIEW -// -// Deprecated: always defined to 1. -// std::string_view was added in C++17, which means all versions of C++ -// supported by Abseil have it. -#ifdef ABSL_HAVE_STD_STRING_VIEW -#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." -#else -#define ABSL_HAVE_STD_STRING_VIEW 1 -#endif - // ABSL_HAVE_STD_ORDERING // // Checks whether C++20 std::{partial,weak,strong}_ordering are available. @@ -555,20 +547,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #define ABSL_HAVE_STD_ORDERING 1 #endif -// ABSL_USES_STD_STRING_VIEW -// -// Indicates whether absl::string_view is an alias for std::string_view. -#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) -#error options.h is misconfigured. -#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 -#undef ABSL_USES_STD_STRING_VIEW -#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ - ABSL_OPTION_USE_STD_STRING_VIEW == 2 -#define ABSL_USES_STD_STRING_VIEW 1 -#else -#error options.h is misconfigured. -#endif - // ABSL_USES_STD_ORDERING // // Indicates whether absl::{partial,weak,strong}_ordering are aliases for the @@ -754,7 +732,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE #error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set #elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__)) -#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0 +#undef ABSL_INTERNAL_HAS_CXA_DEMANGLE #elif defined(__GNUC__) #define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1 #elif defined(__clang__) && !defined(_MSC_VER) diff --git a/absl/base/internal/dynamic_annotations.h b/absl/base/internal/dynamic_annotations.h index b23c5ec1c..537a2fe67 100644 --- a/absl/base/internal/dynamic_annotations.h +++ b/absl/base/internal/dynamic_annotations.h @@ -89,7 +89,7 @@ #endif // Memory annotations are also made available to LLVM's Memory Sanitizer -#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#if defined(ABSL_HAVE_MEMORY_SANITIZER) #define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 #endif diff --git a/absl/base/internal/iterator_traits.h b/absl/base/internal/iterator_traits.h index 472c43688..5fa4df813 100644 --- a/absl/base/internal/iterator_traits.h +++ b/absl/base/internal/iterator_traits.h @@ -60,6 +60,10 @@ template using IsAtLeastIterator = std::is_convertible, IteratorTag>; +template +using IsAtLeastInputIterator = + IsAtLeastIterator; + template using IsAtLeastForwardIterator = IsAtLeastIterator; diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index 158b60982..a5bd71da6 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -19,6 +19,9 @@ #include "absl/base/internal/low_level_alloc.h" +#include + +#include #include #include "absl/base/call_once.h" @@ -219,6 +222,32 @@ struct LowLevelAlloc::Arena { uint32_t random ABSL_GUARDED_BY(mu); }; +// --------------------------------------------------------------- +// An async-signal-safe arena for LowLevelAlloc +static std::atomic g_sig_safe_arena; + +base_internal::LowLevelAlloc::Arena *SigSafeArena() { + return g_sig_safe_arena.load(std::memory_order_acquire); +} + +void InitSigSafeArena() { + if (SigSafeArena() == nullptr) { + uint32_t flags = 0; +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + flags |= base_internal::LowLevelAlloc::kAsyncSignalSafe; +#endif + base_internal::LowLevelAlloc::Arena *new_arena = + base_internal::LowLevelAlloc::NewArena(flags); + base_internal::LowLevelAlloc::Arena *old_value = nullptr; + if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena, + std::memory_order_release, + std::memory_order_relaxed)) { + // We lost a race to allocate an arena; deallocate. + base_internal::LowLevelAlloc::DeleteArena(new_arena); + } + } +} + namespace { // Static storage space for the lazily-constructed, default global arena // instances. We require this space because the whole point of LowLevelAlloc @@ -289,11 +318,11 @@ class ABSL_SCOPED_LOCKABLE ArenaLock { mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; } #endif - arena_->mu.Lock(); + arena_->mu.lock(); } ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } void Leave() ABSL_UNLOCK_FUNCTION() { - arena_->mu.Unlock(); + arena_->mu.unlock(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (mask_valid_) { const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); @@ -544,7 +573,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { } // we unlock before mmap() both because mmap() may call a callback hook, // and because it may be slow. - arena->mu.Unlock(); + arena->mu.unlock(); // mmap generous 64K chunks to decrease // the chances/impact of fragmentation: size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); @@ -583,7 +612,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { #endif #endif // __linux__ #endif // _WIN32 - arena->mu.Lock(); + arena->mu.lock(); s = reinterpret_cast(new_pages); s->header.size = new_pages_size; // Pretend the block is allocated; call AddToFreelist() to free it. diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h index c2f1f25d8..23218dd5a 100644 --- a/absl/base/internal/low_level_alloc.h +++ b/absl/base/internal/low_level_alloc.h @@ -120,6 +120,12 @@ class LowLevelAlloc { LowLevelAlloc(); // no instances }; +// Returns a global async-signal-safe arena for LowLevelAlloc. +LowLevelAlloc::Arena *SigSafeArena(); + +// Ensures the global async-signal-safe arena for LowLevelAlloc is initialized. +void InitSigSafeArena(); + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/base/internal/nullability_traits.h b/absl/base/internal/nullability_traits.h new file mode 100644 index 000000000..790ec9096 --- /dev/null +++ b/absl/base/internal/nullability_traits.h @@ -0,0 +1,71 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_NULLABILITY_TRAITS_H_ +#define ABSL_BASE_INTERNAL_NULLABILITY_TRAITS_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/nullability.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// `value` is true if the type `T` is compatible with nullability annotations +// (is a raw pointer, a smart pointer, or marked with +// ABSL_NULLABILITY_COMPATIBLE). Prefer to use the higher-level +// `AddNonnullIfCompatible` if that is sufficient. +// +// NOTE: This should not be used to detect if the compiler is Clang (since +// Clang is the only compiler that supports nullability annotations). +#if defined(__clang__) && !defined(__OBJC__) && \ + ABSL_HAVE_FEATURE(nullability_on_classes) +template +struct IsNullabilityCompatibleType { + constexpr static bool value = false; +}; + +template +struct IsNullabilityCompatibleType> { + constexpr static bool value = true; +}; +#else +// False when absl_nullable is a no-op (for non-Clang compilers or Objective-C.) +template +struct IsNullabilityCompatibleType { + constexpr static bool value = false; +}; +#endif + +// A trait to add `absl_nonnull` to a type if it is compatible with nullability +// annotations. +template ::value> +struct AddNonnullIfCompatible; + +template +struct AddNonnullIfCompatible { + using type = T; +}; +template +struct AddNonnullIfCompatible { + using type = absl_nonnull T; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_NULLABILITY_TRAITS_H_ diff --git a/absl/base/internal/poison.cc b/absl/base/internal/poison.cc index b33d4c2d3..c639c9666 100644 --- a/absl/base/internal/poison.cc +++ b/absl/base/internal/poison.cc @@ -57,19 +57,20 @@ size_t GetPageSize() { void* InitializePoisonedPointerInternal() { const size_t block_size = GetPageSize(); + void* data = nullptr; #if defined(ABSL_HAVE_ADDRESS_SANITIZER) - void* data = malloc(block_size); + data = malloc(block_size); ASAN_POISON_MEMORY_REGION(data, block_size); #elif defined(ABSL_HAVE_MEMORY_SANITIZER) - void* data = malloc(block_size); + data = malloc(block_size); __msan_poison(data, block_size); #elif defined(ABSL_HAVE_MMAP) - void* data = DirectMmap(nullptr, block_size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + data = DirectMmap(nullptr, block_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, + -1, 0); if (data == MAP_FAILED) return GetBadPointerInternal(); #elif defined(_WIN32) - void* data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT, - PAGE_NOACCESS); + data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT, + PAGE_NOACCESS); if (data == nullptr) return GetBadPointerInternal(); #else return GetBadPointerInternal(); diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc index 35a08f0ac..8537f3ec5 100644 --- a/absl/base/internal/raw_logging.cc +++ b/absl/base/internal/raw_logging.cc @@ -41,9 +41,8 @@ // // This preprocessor token is also defined in raw_io.cc. If you need to copy // this, consider moving both to config.h instead. -#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__hexagon__) || defined(__Fuchsia__) || \ - defined(__native_client__) || defined(__OpenBSD__) || \ +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__hexagon__) || defined(__Fuchsia__) || defined(__OpenBSD__) || \ defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include @@ -158,7 +157,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, #endif #ifdef ABSL_MIN_LOG_LEVEL - if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && + if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && severity < absl::LogSeverity::kFatal) { enabled = false; } diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc index 430f775bd..41d2b482f 100644 --- a/absl/base/internal/spinlock.cc +++ b/absl/base/internal/spinlock.cc @@ -16,15 +16,18 @@ #include #include +#include #include #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock_wait.h" #include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ -#include "absl/base/call_once.h" +#include "absl/base/internal/tsan_mutex_interface.h" // Description of lock-word: // 31..00: [............................3][2][1][0] @@ -58,7 +61,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook submit_profile_data; @@ -67,25 +70,24 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, submit_profile_data.Store(fn); } -// Uncommon constructors. -SpinLock::SpinLock(base_internal::SchedulingMode mode) - : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { - ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); -} - // Monitor the lock to see if its value changes within some time period -// (adaptive_spin_count loop iterations). The last value read from the lock +// (adaptive_spin_count_ loop iterations). The last value read from the lock // is returned from the method. +ABSL_CONST_INIT std::atomic SpinLock::adaptive_spin_count_{0}; uint32_t SpinLock::SpinLoop() { // We are already in the slow path of SpinLock, initialize the // adaptive_spin_count here. - ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count; - ABSL_CONST_INIT static int adaptive_spin_count = 0; - base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() { - adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1; - }); - - int c = adaptive_spin_count; + if (adaptive_spin_count_.load(std::memory_order_relaxed) == 0) { + int current_spin_count = 0; + int new_spin_count = NumCPUs() > 1 ? 1000 : 1; + // If this fails, the value will remain unchanged. We may not spin for the + // intended duration, but that is still safe. We will try again on the next + // call to SpinLoop. + adaptive_spin_count_.compare_exchange_weak( + current_spin_count, new_spin_count, std::memory_order_relaxed, + std::memory_order_relaxed); + } + int c = adaptive_spin_count_.load(std::memory_order_relaxed); uint32_t lock_value; do { lock_value = lockword_.load(std::memory_order_relaxed); @@ -100,11 +102,11 @@ void SpinLock::SlowLock() { return; } - base_internal::SchedulingMode scheduling_mode; + SchedulingMode scheduling_mode; if ((lock_value & kSpinLockCooperative) != 0) { - scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + scheduling_mode = SCHEDULE_COOPERATIVE_AND_KERNEL; } else { - scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + scheduling_mode = SCHEDULE_KERNEL_ONLY; } // The lock was not obtained initially, so this thread needs to wait for @@ -134,7 +136,7 @@ void SpinLock::SlowLock() { // new lock state will be the number of cycles this thread waited if // this thread obtains the lock. lock_value = TryLockInternal(lock_value, wait_cycles); - continue; // Skip the delay at the end of the loop. + continue; // Skip the delay at the end of the loop. } else if ((lock_value & kWaitTimeMask) == 0) { // The lock is still held, without a waiter being marked, but something // else about the lock word changed, causing our CAS to fail. For @@ -150,8 +152,8 @@ void SpinLock::SlowLock() { // synchronization there to avoid false positives. ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); // Wait for an OS specific delay. - base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, - scheduling_mode); + SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); // Spin again after returning from the wait routine to give this thread // some chance of obtaining the lock. @@ -162,8 +164,8 @@ void SpinLock::SlowLock() { } void SpinLock::SlowUnlock(uint32_t lock_value) { - base_internal::SpinLockWake(&lockword_, - false); // wake waiter if necessary + SpinLockWake(&lockword_, + false); // wake waiter if necessary // If our acquisition was contended, collect contentionz profile info. We // reserve a unitary wait time to represent that a waiter exists without our diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h index 2a1089697..d535093ef 100644 --- a/absl/base/internal/spinlock.h +++ b/absl/base/internal/spinlock.h @@ -19,7 +19,7 @@ // - for use by Abseil internal code that Mutex itself depends on // - for async signal safety (see below) -// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async +// SpinLock with a SchedulingMode::SCHEDULE_KERNEL_ONLY is async // signal safe. If a spinlock is used within a signal handler, all code that // acquires the lock must ensure that the signal cannot arrive while they are // holding the lock. Typically, this is done by blocking the signal. @@ -31,20 +31,24 @@ #include #include +#include +#include #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/const_init.h" -#include "absl/base/dynamic_annotations.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/macros.h" #include "absl/base/thread_annotations.h" namespace tcmalloc { namespace tcmalloc_internal { class AllocationGuardSpinLockHolder; +class Static; } // namespace tcmalloc_internal } // namespace tcmalloc @@ -55,17 +59,31 @@ namespace base_internal { class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { public: - SpinLock() : lockword_(kSpinLockCooperative) { - ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); - } + constexpr SpinLock() : lockword_(kSpinLockCooperative) { RegisterWithTsan(); } // Constructors that allow non-cooperative spinlocks to be created for use // inside thread schedulers. Normal clients should not use these. - explicit SpinLock(base_internal::SchedulingMode mode); + constexpr explicit SpinLock(SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + RegisterWithTsan(); + } + +#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(_WIN32) + // Constructor to inline users of the default scheduling mode. + // + // This only needs to exists for inliner runs, but doesn't work correctly in + // clang+windows builds, likely due to mangling differences. + ABSL_DEPRECATE_AND_INLINE() + constexpr explicit SpinLock(SchedulingMode mode) + __attribute__((enable_if(mode == SCHEDULE_COOPERATIVE_AND_KERNEL, + "Cooperative use default constructor"))) + : SpinLock() {} +#endif // Constructor for global SpinLock instances. See absl/base/const_init.h. - constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) - : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + ABSL_DEPRECATE_AND_INLINE() + constexpr SpinLock(absl::ConstInitType, SchedulingMode mode) + : SpinLock(mode) {} // For global SpinLock instances prefer trivial destructor when possible. // Default but non-trivial destructor in some build configurations causes an @@ -77,7 +95,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { #endif // Acquire this SpinLock. - inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + inline void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); if (!TryLockImpl()) { SlowLock(); @@ -85,11 +103,14 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); } + ABSL_DEPRECATE_AND_INLINE() + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { return lock(); } + // Try to acquire this SpinLock without blocking and return true if the // acquisition was successful. If the lock was not acquired, false is - // returned. If this SpinLock is free at the time of the call, TryLock - // will return true with high probability. - [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + // returned. If this SpinLock is free at the time of the call, try_lock will + // return true with high probability. + [[nodiscard]] inline bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); bool res = TryLockImpl(); ABSL_TSAN_MUTEX_POST_LOCK( @@ -98,15 +119,20 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { return res; } + ABSL_DEPRECATE_AND_INLINE() + [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return try_lock(); + } + // Release this SpinLock, which must be held by the calling thread. - inline void Unlock() ABSL_UNLOCK_FUNCTION() { + inline void unlock() ABSL_UNLOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); uint32_t lock_value = lockword_.load(std::memory_order_relaxed); lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, std::memory_order_release); if ((lock_value & kSpinLockDisabledScheduling) != 0) { - base_internal::SchedulingGuard::EnableRescheduling(true); + SchedulingGuard::EnableRescheduling(true); } if ((lock_value & kWaitTimeMask) != 0) { // Collect contentionz profile info, and speed the wakeup of any waiter. @@ -117,6 +143,9 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); } + ABSL_DEPRECATE_AND_INLINE() + inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } + // Determine if the lock is held. When the lock is held by the invoking // thread, true will always be returned. Intended to be used as // CHECK(lock.IsHeld()). @@ -146,6 +175,16 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { // Provide access to protected method above. Use for testing only. friend struct SpinLockTest; friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder; + friend class tcmalloc::tcmalloc_internal::Static; + + static int GetAdaptiveSpinCount() { + return adaptive_spin_count_.load(std::memory_order_relaxed); + } + static void SetAdaptiveSpinCount(int count) { + adaptive_spin_count_.store(count, std::memory_order_relaxed); + } + + static std::atomic adaptive_spin_count_; private: // lockword_ is used to store the following: @@ -175,9 +214,16 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); // Returns true if the provided scheduling mode is cooperative. - static constexpr bool IsCooperative( - base_internal::SchedulingMode scheduling_mode) { - return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + static constexpr bool IsCooperative(SchedulingMode scheduling_mode) { + return scheduling_mode == SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + constexpr void RegisterWithTsan() { +#if ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated) + if (!__builtin_is_constant_evaluated()) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } +#endif } bool IsCooperative() const { @@ -202,19 +248,18 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { // Corresponding locker object that arranges to acquire a spinlock for // the duration of a C++ scope. -class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder { +class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder + : public std::lock_guard { public: + inline explicit SpinLockHolder( + SpinLock& l ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) + ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : std::lock_guard(l) {} + ABSL_DEPRECATE_AND_INLINE() inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) - : lock_(l) { - l->Lock(); - } - inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + : SpinLockHolder(*l) {} - SpinLockHolder(const SpinLockHolder&) = delete; - SpinLockHolder& operator=(const SpinLockHolder&) = delete; - - private: - SpinLock* lock_; + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() = default; }; // Register a hook for profiling support. @@ -243,7 +288,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, if ((lock_value & kSpinLockCooperative) == 0) { // For non-cooperative locks we must make sure we mark ourselves as // non-reschedulable before we attempt to CompareAndSwap. - if (base_internal::SchedulingGuard::DisableRescheduling()) { + if (SchedulingGuard::DisableRescheduling()) { sched_disabled_bit = kSpinLockDisabledScheduling; } } @@ -252,7 +297,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, lock_value, kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, std::memory_order_acquire, std::memory_order_relaxed)) { - base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); } return lock_value; diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc index 1937db307..a62dd31c2 100644 --- a/absl/base/internal/sysinfo.cc +++ b/absl/base/internal/sysinfo.cc @@ -456,15 +456,6 @@ pid_t GetTID() { return getthrid(); } pid_t GetTID() { return static_cast(_lwp_self()); } -#elif defined(__native_client__) - -pid_t GetTID() { - auto* thread = pthread_self(); - static_assert(sizeof(pid_t) == sizeof(thread), - "In NaCL int expected to be the same size as a pointer"); - return reinterpret_cast(thread); -} - #elif defined(__Fuchsia__) pid_t GetTID() { diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc index 68f92730a..73e4145e5 100644 --- a/absl/base/internal/unscaledcycleclock.cc +++ b/absl/base/internal/unscaledcycleclock.cc @@ -62,7 +62,7 @@ double UnscaledCycleClock::Frequency() { int64_t UnscaledCycleClock::Now() { #ifdef __GLIBC__ - return __ppc_get_timebase(); + return static_cast(__ppc_get_timebase()); #else #ifdef __powerpc64__ int64_t tbr; @@ -85,6 +85,10 @@ int64_t UnscaledCycleClock::Now() { double UnscaledCycleClock::Frequency() { #ifdef __GLIBC__ return __ppc_get_timebase_freq(); +#elif defined(__linux__) + // Fallback for musl + ppc64le: use constant timebase frequency (512 MHz) + // Must come after __GLIBC__. + return static_cast(512000000); #elif defined(_AIX) // This is the same constant value as returned by // __ppc_get_timebase_freq(). diff --git a/absl/base/internal/unscaledcycleclock_config.h b/absl/base/internal/unscaledcycleclock_config.h index 43a3dabee..9a0841dfa 100644 --- a/absl/base/internal/unscaledcycleclock_config.h +++ b/absl/base/internal/unscaledcycleclock_config.h @@ -34,7 +34,7 @@ // CycleClock that runs at atleast 1 MHz. We've found some Android // ARM64 devices where this is not the case, so we disable it by // default on Android ARM64. -#if defined(__native_client__) || (defined(__APPLE__)) || \ +#if defined(__APPLE__) || \ (defined(__ANDROID__) && defined(__aarch64__)) #define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 #else diff --git a/absl/base/macros.h b/absl/base/macros.h index ff89944ae..446a4452a 100644 --- a/absl/base/macros.h +++ b/absl/base/macros.h @@ -169,42 +169,65 @@ ABSL_NAMESPACE_END #define ABSL_INTERNAL_RETHROW do {} while (false) #endif // ABSL_HAVE_EXCEPTIONS -// ABSL_DEPRECATE_AND_INLINE() +// ABSL_REFACTOR_INLINE +// +// Marks a function or type for automated refactoring by go/cpp-inliner. It can +// be used on inline function definitions or type aliases in header files and +// should be combined with the `[[deprecated]]` attribute. // -// Marks a function or type alias as deprecated and tags it to be picked up for -// automated refactoring by go/cpp-inliner. It can added to inline function -// definitions or type aliases. It should only be used within a header file. It -// differs from `ABSL_DEPRECATED` in the following ways: +// Using `ABSL_REFACTOR_INLINE` differs from using the `[[deprecated]]` alone in +// the following ways: // // 1. New uses of the function or type will be discouraged via Tricorder // warnings. // 2. If enabled via `METADATA`, automated changes will be sent out inlining the // functions's body or replacing the type where it is used. // -// For example: +// Examples: // -// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) { +// [[deprecated("Use NewFunc() instead")]] ABSL_REFACTOR_INLINE +// inline int OldFunc(int x) { // return NewFunc(x, 0); // } // -// will mark `OldFunc` as deprecated, and the go/cpp-inliner service will -// replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)`. Once all calls -// to `OldFunc` have been replaced, `OldFunc` can be deleted. +// using OldType [[deprecated("Use NewType instead")]] ABSL_REFACTOR_INLINE = +// NewType; +// +// will mark `OldFunc` and `OldType` as deprecated, and the go/cpp-inliner +// service will replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)` and +// `OldType` with `NewType`. Once all replacements have been completed, the old +// function or type can be deleted. // // See go/cpp-inliner for more information. // // Note: go/cpp-inliner is Google-internal service for automated refactoring. // While open-source users do not have access to this service, the macro is -// provided for compatibility, and so that users receive deprecation warnings. -#if ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && \ - ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) -#define ABSL_DEPRECATE_AND_INLINE() [[deprecated, clang::annotate("inline-me")]] -#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated) -#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]] +// provided for compatibility. +#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) +#define ABSL_REFACTOR_INLINE [[clang::annotate("inline-me")]] #else -#define ABSL_DEPRECATE_AND_INLINE() +#define ABSL_REFACTOR_INLINE #endif +// ABSL_DEPRECATE_AND_INLINE() +// +// This is the original macro used by go/cpp-inliner that combines +// [[deprecated]] and ABSL_REFACTOR_INLINE. +// +// Examples: +// +// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) { +// return NewFunc(x, 0); +// } +// +// using OldType ABSL_DEPRECATE_AND_INLINE() = NewType; +// +// The combination of `[[deprecated("Use X instead")]]` and +// `ABSL_REFACTOR_INLINE` is preferred because it provides a more informative +// deprecation message to developers, especially those that do not have access +// to the automated refactoring capabilities of go/cpp-inliner. +#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]] ABSL_REFACTOR_INLINE + // Requires the compiler to prove that the size of the given object is at least // the expected amount. #if ABSL_HAVE_ATTRIBUTE(diagnose_if) && ABSL_HAVE_BUILTIN(__builtin_object_size) diff --git a/absl/base/nullability.h b/absl/base/nullability.h index 3a5d6e83e..facc6422e 100644 --- a/absl/base/nullability.h +++ b/absl/base/nullability.h @@ -81,7 +81,7 @@ // const Employee* absl_nonnull e; // // // A non-null pointer to a const nullable pointer to an `Employee`. -// Employee* absl_nullable const* absl_nonnull e = nullptr; +// Employee* absl_nullable const* absl_nonnull e; // // // A non-null function pointer. // void (*absl_nonnull func)(int, double); @@ -184,7 +184,6 @@ #define ABSL_BASE_NULLABILITY_H_ #include "absl/base/config.h" -#include "absl/base/internal/nullability_deprecated.h" // ABSL_POINTERS_DEFAULT_NONNULL // diff --git a/absl/base/optimization.h b/absl/base/optimization.h index 429ea9ce7..04678c493 100644 --- a/absl/base/optimization.h +++ b/absl/base/optimization.h @@ -53,9 +53,7 @@ // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); // return result; // } -#if defined(__pnacl__) -#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } -#elif defined(__clang__) +#if defined(__clang__) // Clang will not tail call given inline volatile assembly. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") #elif defined(__GNUC__) diff --git a/absl/base/options.h b/absl/base/options.h index f904f6446..9bd398139 100644 --- a/absl/base/options.h +++ b/absl/base/options.h @@ -73,32 +73,6 @@ // Type Compatibility Options // ----------------------------------------------------------------------------- -// ABSL_OPTION_USE_STD_STRING_VIEW -// -// This option controls whether absl::string_view is implemented as an alias to -// std::string_view, or as an independent implementation. -// -// A value of 0 means to use Abseil's implementation. This requires only C++11 -// support, and is expected to work on every toolchain we support. -// -// A value of 1 means to use an alias to std::string_view. This requires that -// all code using Abseil is built in C++17 mode or later. -// -// A value of 2 means to detect the C++ version being used to compile Abseil, -// and use an alias only if a working std::string_view is available. This -// option is useful when you are building your program from source. It should -// not be used otherwise -- for example, if you are distributing Abseil in a -// binary package manager -- since in mode 2, absl::string_view will name a -// different type, with a different mangled name and binary layout, depending on -// the compiler flags passed by the end user. For more info, see -// https://abseil.io/about/design/dropin-types. -// -// User code should not inspect this macro. To check in the preprocessor if -// absl::string_view is a typedef of std::string_view, use the feature macro -// ABSL_USES_STD_STRING_VIEW. - -#define ABSL_OPTION_USE_STD_STRING_VIEW 2 - // ABSL_OPTION_USE_STD_ORDERING // // This option controls whether absl::{partial,weak,strong}_ordering are @@ -149,7 +123,7 @@ // allowed. #define ABSL_OPTION_USE_INLINE_NAMESPACE 1 -#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20250512 +#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20260107 // ABSL_OPTION_HARDENED // diff --git a/absl/cleanup/cleanup.h b/absl/cleanup/cleanup.h index 311e4828c..632ec6ef4 100644 --- a/absl/cleanup/cleanup.h +++ b/absl/cleanup/cleanup.h @@ -19,6 +19,10 @@ // `absl::Cleanup` implements the scope guard idiom, invoking the contained // callback's `operator()() &&` on scope exit. // +// This class doesn't allocate or take any locks, and is safe to use in a signal +// handler. Of course the callback with which it is constructed also must be +// signal safe in order for this to be useful. +// // Example: // // ``` diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index 32a82ef06..0746f72bd 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -57,18 +57,44 @@ #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ +#include +#include +#include +#include + #include "absl/base/attributes.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export +#include "absl/container/internal/common.h" +#include "absl/container/internal/container_memory.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { +template +struct map_params_impl; + +template +struct btree_map_defaults { + using Compare = std::less; + using Alloc = std::allocator>; + using TargetNodeSize = std::integral_constant; + using IsMulti = std::false_type; +}; + template -struct map_params; +using map_params = typename ApplyWithoutDefaultSuffix< + map_params_impl, + TypeList::Compare, + typename btree_map_defaults::Alloc, + typename btree_map_defaults::TargetNodeSize, + typename btree_map_defaults::IsMulti>, + TypeList, + std::integral_constant>>::type; } // namespace container_internal @@ -117,8 +143,8 @@ class ABSL_ATTRIBUTE_OWNER btree_map // // * Copy assignment operator // - // absl::btree_map map4; - // map4 = map3; + // absl::btree_map map4; + // map4 = map3; // // * Move constructor // @@ -555,8 +581,8 @@ class ABSL_ATTRIBUTE_OWNER btree_multimap // // * Copy assignment operator // - // absl::btree_multimap map4; - // map4 = map3; + // absl::btree_multimap map4; + // map4 = map3; // // * Move constructor // @@ -855,11 +881,20 @@ namespace container_internal { // A parameters structure for holding the type parameters for a btree_map. // Compare and Alloc should be nothrow copy-constructible. -template -struct map_params : common_params> { - using super_type = typename map_params::common_params; +template +struct map_params_impl + : common_params< + Key, + GetFromListOr::Compare, 0, + Params...>, + GetFromListOr::Alloc, 1, + Params...>, + GetFromListOr::TargetNodeSize, + 2, Params...>::value, + GetFromListOr::IsMulti, 3, + Params...>::value, + /*IsMap=*/true, map_slot_policy> { + using super_type = typename map_params_impl::common_params; using mapped_type = Data; // This type allows us to move keys when it is safe to do so. It is safe // for maps in which value_type and mutable_value_type are layout compatible. @@ -868,6 +903,21 @@ struct map_params : common_params::Compare, 0, + Params...>, + GetFromListOr::Alloc, 1, + Params...>, + GetFromListOr< + typename btree_map_defaults::TargetNodeSize, 2, + Params...>::value, + GetFromListOr::IsMulti, 3, + Params...>::value>, + map_params_impl>); + template static auto key(const V &value ABSL_ATTRIBUTE_LIFETIME_BOUND) -> decltype((value.first)) { diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h index 16181de57..991cb89c0 100644 --- a/absl/container/btree_set.h +++ b/absl/container/btree_set.h @@ -56,9 +56,15 @@ #ifndef ABSL_CONTAINER_BTREE_SET_H_ #define ABSL_CONTAINER_BTREE_SET_H_ +#include +#include +#include +#include + #include "absl/base/attributes.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export +#include "absl/container/internal/common.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -68,9 +74,27 @@ namespace container_internal { template struct set_slot_policy; +template +struct set_params_impl; + +template +struct btree_set_defaults { + using Compare = std::less; + using Alloc = std::allocator; + using TargetNodeSize = std::integral_constant; + using IsMulti = std::false_type; +}; + template -struct set_params; +using set_params = typename ApplyWithoutDefaultSuffix< + set_params_impl, + TypeList::Compare, + typename btree_set_defaults::Alloc, + typename btree_set_defaults::TargetNodeSize, + typename btree_set_defaults::IsMulti>, + TypeList, + std::integral_constant>>::type; } // namespace container_internal @@ -119,8 +143,8 @@ class ABSL_ATTRIBUTE_OWNER btree_set // // * Copy assignment operator // - // absl::btree_set set4; - // set4 = set3; + // absl::btree_set set4; + // set4 = set3; // // * Move constructor // @@ -475,8 +499,8 @@ class ABSL_ATTRIBUTE_OWNER btree_multiset // // * Copy assignment operator // - // absl::btree_multiset set4; - // set4 = set3; + // absl::btree_multiset set4; + // set4 = set3; // // * Move constructor // @@ -803,12 +827,34 @@ struct set_slot_policy { // A parameters structure for holding the type parameters for a btree_set. // Compare and Alloc should be nothrow copy-constructible. -template -struct set_params : common_params> { +template +struct set_params_impl + : common_params< + Key, + GetFromListOr::Compare, 0, + Params...>, + GetFromListOr::Alloc, 1, Params...>, + GetFromListOr::TargetNodeSize, 2, + Params...>::value, + GetFromListOr::IsMulti, 3, + Params...>::value, + /*IsMap=*/false, set_slot_policy> { using value_type = Key; - using slot_type = typename set_params::common_params::slot_type; + using slot_type = typename set_params_impl::common_params::slot_type; + + static_assert( + std::is_same_v< + set_params< + Key, + GetFromListOr::Compare, 0, + Params...>, + GetFromListOr::Alloc, 1, + Params...>, + GetFromListOr::TargetNodeSize, 2, + Params...>::value, + GetFromListOr::IsMulti, 3, + Params...>::value>, + set_params_impl>); template static const V &key(const V &value) { diff --git a/absl/container/chunked_queue.h b/absl/container/chunked_queue.h new file mode 100644 index 000000000..d5b1184a8 --- /dev/null +++ b/absl/container/chunked_queue.h @@ -0,0 +1,755 @@ +// Copyright 2025 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: chunked_queue.h +// ----------------------------------------------------------------------------- +// +// `std::deque` provides random access and fast push/pop back/front. It is +// implemented as an array of fixed blocks. It provides no control of block size +// and implementations differ; libstdc++ tries to allocate blocks of ~512 bytes +// and libc++ tries for blocks of ~4k bytes. +// +// `absl::chunked_queue` provides the same minus random access. It is +// implemented as a double-linked list of fixed or variable sized blocks. +// +// `absl::chunked_queue` is useful when memory usage is paramount as it provides +// finegrained and configurable block sizing. +// +// The interface supported by this class is limited to: +// +// empty() +// size() +// max_size() +// shrink_to_fit() +// resize() +// assign() +// push_back() +// emplace_back() +// pop_front() +// front() +// back() +// swap() +// clear() +// begin(), end() +// cbegin(), cend() +// +// === ADVANCED USAGE +// +// == clear() +// +// As an optimization clear() leaves the first block of the chunked_queue +// allocated (but empty). So clear will not delete all memory of the container. +// In order to do so, call shrink_to_fit() or swap the container with an empty +// one. +// +// absl::chunked_queue q = {1, 2, 3}; +// q.clear(); +// q.shrink_to_fit(); +// +// == block size customization +// +// chunked_queue allows customization of the block size for each block. By +// default the block size is set to 1 element and the size doubles for the next +// block until it reaches the default max block size, which is 128 elements. +// +// = fixed size +// +// When only the first block size parameter is specified, it sets a fixed block +// size for all blocks: +// +// chunked_queue: 32 elements per block +// +// The smaller the block size, the less the memory usage for small queues at the +// cost of performance. Caveat: For large queues, a smaller block size will +// increase memory usage, and reduce performance. +// +// = variable size +// +// When both block size parameters are specified, they set the min and max block +// sizes for the blocks. Initially the queue starts with the min block size and +// as it grows, the size of each block grows until it reaches the max block +// size. +// New blocks are double the size of the tail block (so they at least +// double the size of the queue). +// +// chunked_queue: first block 4 elements, second block 8 elements, +// third block 16 elements, fourth block 32 elements, +// all other blocks 64 elements +// +// One can specify a min and max such that small queues will not waste memory +// and large queues will not have too many blocks. + +#ifndef ABSL_CONTAINER_CHUNKED_QUEUE_H_ +#define ABSL_CONTAINER_CHUNKED_QUEUE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/iterator_traits.h" +#include "absl/base/macros.h" +#include "absl/container/internal/chunked_queue.h" +#include "absl/container/internal/layout.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +template > +class chunked_queue { + public: + static constexpr size_t kBlockSizeMin = (BLo == 0 && BHi == 0) ? 1 : BLo; + static constexpr size_t kBlockSizeMax = (BLo == 0 && BHi == 0) ? 128 : BHi; + + private: + static_assert(kBlockSizeMin > 0, "Min block size cannot be zero"); + static_assert(kBlockSizeMin <= kBlockSizeMax, "Invalid block size bounds"); + + using Block = container_internal::ChunkedQueueBlock; + using AllocatorTraits = std::allocator_traits; + + class iterator_common { + public: + friend bool operator==(const iterator_common& a, const iterator_common& b) { + return a.ptr == b.ptr; + } + + friend bool operator!=(const iterator_common& a, const iterator_common& b) { + return !(a == b); + } + + protected: + iterator_common() = default; + explicit iterator_common(Block* b) + : block(b), ptr(b->start()), limit(b->limit()) {} + + void Incr() { + // If we do not have a next block, make ptr point one past the end of this + // block. If we do have a next block, make ptr point to the first element + // of the next block. + ++ptr; + if (ptr == limit && block->next()) *this = iterator_common(block->next()); + } + + void IncrBy(size_t n) { + while (ptr + n > limit) { + n -= limit - ptr; + *this = iterator_common(block->next()); + } + ptr += n; + } + + Block* block = nullptr; + T* ptr = nullptr; + T* limit = nullptr; + }; + + // CT can be either T or const T. + template + class basic_iterator : public iterator_common { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = typename AllocatorTraits::value_type; + using difference_type = typename AllocatorTraits::difference_type; + using pointer = + typename std::conditional::value, + typename AllocatorTraits::const_pointer, + typename AllocatorTraits::pointer>::type; + using reference = CT&; + + basic_iterator() = default; + + // Copy ctor if CT is T. + // Otherwise it's a conversion of iterator to const_iterator. + basic_iterator(const basic_iterator& it) // NOLINT(runtime/explicit) + : iterator_common(it) {} + + basic_iterator& operator=(const basic_iterator& other) = default; + + reference operator*() const { return *this->ptr; } + pointer operator->() const { return this->ptr; } + basic_iterator& operator++() { + this->Incr(); + return *this; + } + basic_iterator operator++(int) { + basic_iterator t = *this; + ++*this; + return t; + } + + private: + explicit basic_iterator(Block* b) : iterator_common(b) {} + + friend chunked_queue; + }; + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + using reference = value_type&; + using const_reference = const value_type&; + using iterator = basic_iterator; + using const_iterator = basic_iterator; + + // Constructs an empty queue. + chunked_queue() : chunked_queue(allocator_type()) {} + + // Constructs an empty queue with a custom allocator. + explicit chunked_queue(const allocator_type& alloc) + : alloc_and_size_(alloc) {} + + // Constructs a queue with `count` default-inserted elements. + explicit chunked_queue(size_type count, + const allocator_type& alloc = allocator_type()) + : alloc_and_size_(alloc) { + resize(count); + } + + // Constructs a queue with `count` copies of `value`. + chunked_queue(size_type count, const T& value, + const allocator_type& alloc = allocator_type()) + : alloc_and_size_(alloc) { + assign(count, value); + } + + // Constructs a queue with the contents of the range [first, last). + template ::value>> + chunked_queue(Iter first, Iter last, + const allocator_type& alloc = allocator_type()) + : alloc_and_size_(alloc) { + using Tag = typename std::iterator_traits::iterator_category; + RangeInit(first, last, Tag()); + } + + // Constructs a queue with the contents of the initializer list `list`. + chunked_queue(std::initializer_list list, + const allocator_type& alloc = allocator_type()) + : chunked_queue(list.begin(), list.end(), alloc) {} + + ~chunked_queue(); + + // Copy constructor. + chunked_queue(const chunked_queue& other) + : chunked_queue(other, + AllocatorTraits::select_on_container_copy_construction( + other.alloc_and_size_.allocator())) {} + + // Copy constructor with specific allocator. + chunked_queue(const chunked_queue& other, const allocator_type& alloc) + : alloc_and_size_(alloc) { + for (const_reference item : other) { + push_back(item); + } + } + + // Move constructor. + chunked_queue(chunked_queue&& other) noexcept + : head_(other.head_), + tail_(other.tail_), + alloc_and_size_(std::move(other.alloc_and_size_)) { + other.head_ = {}; + other.tail_ = {}; + other.alloc_and_size_.size = 0; + } + + // Replaces contents with those from initializer list `il`. + chunked_queue& operator=(std::initializer_list il) { + assign(il.begin(), il.end()); + return *this; + } + + // Copy assignment operator. + chunked_queue& operator=(const chunked_queue& other) { + if (this == &other) { + return *this; + } + if (AllocatorTraits::propagate_on_container_copy_assignment::value && + (alloc_and_size_.allocator() != other.alloc_and_size_.allocator())) { + // Destroy all current elements and blocks with the current allocator, + // before switching this to use the allocator propagated from "other". + DestroyAndDeallocateAll(); + alloc_and_size_ = AllocatorAndSize(other.alloc_and_size_.allocator()); + } + assign(other.begin(), other.end()); + return *this; + } + + // Move assignment operator. + chunked_queue& operator=(chunked_queue&& other) noexcept; + + // Returns true if the queue contains no elements. + bool empty() const { return alloc_and_size_.size == 0; } + + // Returns the number of elements in the queue. + size_t size() const { return alloc_and_size_.size; } + + // Returns the maximum number of elements the queue is able to hold. + size_type max_size() const noexcept { + return AllocatorTraits::max_size(alloc_and_size_.allocator()); + } + + // Resizes the container to contain `new_size` elements. + // If `new_size > size()`, additional default-inserted elements are appended. + // If `new_size < size()`, elements are removed from the end. + void resize(size_t new_size); + + // Resizes the container to contain `new_size` elements. + // If `new_size > size()`, additional copies of `value` are appended. + // If `new_size < size()`, elements are removed from the end. + void resize(size_type new_size, const T& value) { + if (new_size > size()) { + size_t to_add = new_size - size(); + for (size_t i = 0; i < to_add; ++i) { + push_back(value); + } + } else { + resize(new_size); + } + } + + // Requests the removal of unused capacity. + void shrink_to_fit() { + // As an optimization clear() leaves the first block of the chunked_queue + // allocated (but empty). When empty, shrink_to_fit() deallocates the first + // block by swapping it a newly constructed container that has no first + // block. + if (empty()) { + chunked_queue(alloc_and_size_.allocator()).swap(*this); + } + } + + // Replaces the contents with copies of those in the range [first, last). + template ::value>> + void assign(Iter first, Iter last) { + auto out = begin(); + Block* prev_block = nullptr; + + // Overwrite existing elements. + for (; out != end() && first != last; ++first) { + // Track the previous block so we can correctly update tail_ if we stop + // exactly at a block boundary. + if (out.ptr + 1 == out.block->limit()) { + prev_block = out.block; + } + *out = *first; + ++out; + } + + // If we stopped exactly at the start of a block (meaning the previous block + // was full), we must ensure tail_ points to the end of the previous block, + // not the start of the current (now empty and to be deleted) block. + // This maintains the invariant required by back() which assumes tail_ + // never points to the start of a block (unless it's the only block). + if (!empty() && out.block != nullptr && out.ptr == out.block->start() && + prev_block != nullptr) { + // Delete the current block and all subsequent blocks. + // + // NOTE: Calling EraseAllFrom on an iterator that points to the limit of + // the previous block will not delete any element from the previous block. + iterator prev_block_end(prev_block); + prev_block_end.ptr = prev_block->limit(); + EraseAllFrom(prev_block_end); + + // Update tail_ to point to the end of the previous block. + tail_ = prev_block_end; + prev_block->set_next(nullptr); + } else { + // Standard erase from the current position to the end. + EraseAllFrom(out); + } + + // Append any remaining new elements. + for (; first != last; ++first) { + push_back(*first); + } + } + + // Replaces the contents with `count` copies of `value`. + void assign(size_type count, const T& value) { + clear(); + for (size_type i = 0; i < count; ++i) { + push_back(value); + } + } + + // Replaces the contents with the elements from the initializer list `il`. + void assign(std::initializer_list il) { assign(il.begin(), il.end()); } + + // Appends the given element value to the end of the container. + // Invalidates `end()` iterator. References to other elements remain valid. + void push_back(const T& val) { emplace_back(val); } + void push_back(T&& val) { emplace_back(std::move(val)); } + + // Appends a new element to the end of the container. + // The element is constructed in-place with `args`. + // Returns a reference to the new element. + // Invalidates `end()` iterator. References to other elements remain valid. + template + T& emplace_back(A&&... args) { + T* storage = AllocateBack(); + AllocatorTraits::construct(alloc_and_size_.allocator(), storage, + std::forward(args)...); + return *storage; + } + + // Removes the first element of the container. + // Invalidates iterators to the removed element. + // REQUIRES: !empty() + void pop_front(); + + // Returns a reference to the first element in the container. + // REQUIRES: !empty() + T& front() { + ABSL_HARDENING_ASSERT(!empty()); + return *head_; + } + const T& front() const { + ABSL_HARDENING_ASSERT(!empty()); + return *head_; + } + + // Returns a reference to the last element in the container. + // REQUIRES: !empty() + T& back() { + ABSL_HARDENING_ASSERT(!empty()); + return *(&*tail_ - 1); + } + const T& back() const { + ABSL_HARDENING_ASSERT(!empty()); + return *(&*tail_ - 1); + } + + // Swaps the contents of this queue with `other`. + void swap(chunked_queue& other) noexcept { + using std::swap; + swap(head_, other.head_); + swap(tail_, other.tail_); + if (AllocatorTraits::propagate_on_container_swap::value) { + swap(alloc_and_size_, other.alloc_and_size_); + } else { + // Swap only the sizes; each object keeps its allocator. + // + // (It is undefined behavior to swap between two containers with unequal + // allocators if propagate_on_container_swap is false, so we don't have to + // handle that here like we do in the move-assignment operator.) + ABSL_HARDENING_ASSERT(get_allocator() == other.get_allocator()); + swap(alloc_and_size_.size, other.alloc_and_size_.size); + } + } + + // Erases all elements from the container. + // Note: Leaves one empty block allocated as an optimization. + // To free all memory, call shrink_to_fit() after calling clear(). + void clear(); + + iterator begin() { return head_; } + iterator end() { return tail_; } + + const_iterator begin() const { return head_; } + const_iterator end() const { return tail_; } + + const_iterator cbegin() const { return head_; } + const_iterator cend() const { return tail_; } + + // Returns the allocator associated with the container. + allocator_type get_allocator() const { return alloc_and_size_.allocator(); } + + private: + // Empty base-class optimization: bundle storage for our allocator together + // with a field we had to store anyway (size), via inheriting from the + // allocator, so this allocator instance doesn't consume any storage + // when its type has no data members. + struct AllocatorAndSize : private allocator_type { + explicit AllocatorAndSize(const allocator_type& alloc) + : allocator_type(alloc) {} + const allocator_type& allocator() const { return *this; } + allocator_type& allocator() { return *this; } + size_t size = 0; + }; + + template + void RangeInit(Iter first, Iter last, std::input_iterator_tag) { + while (first != last) { + AddTailBlock(); + for (; first != last && tail_.ptr != tail_.limit; + ++alloc_and_size_.size, ++tail_.ptr, ++first) { + AllocatorTraits::construct(alloc_and_size_.allocator(), tail_.ptr, + *first); + } + } + } + + void Construct(T* start, T* limit) { + ABSL_ASSERT(start <= limit); + for (; start != limit; ++start) { + AllocatorTraits::construct(alloc_and_size_.allocator(), start); + } + } + + size_t Destroy(T* start, T* limit) { + ABSL_ASSERT(start <= limit); + const size_t n = limit - start; + for (; start != limit; ++start) { + AllocatorTraits::destroy(alloc_and_size_.allocator(), start); + } + return n; + } + + T* block_begin(Block* b) const { + return b == head_.block ? head_.ptr : b->start(); + } + T* block_end(Block* b) const { + // We have the choice of !b->next or b == tail_.block to determine if b is + // the tail or not. !b->next is usually faster because the caller of + // block_end() is most likely traversing the list of blocks so b->next is + // already fetched into some register. + return !b->next() ? tail_.ptr : b->limit(); + } + + void AddTailBlock(); + size_t NewBlockSize() { + // Double the last block size and bound to [kBlockSizeMin, kBlockSizeMax]. + if (!tail_.block) return kBlockSizeMin; + return (std::min)(kBlockSizeMax, 2 * tail_.block->size()); + } + + T* AllocateBack(); + void EraseAllFrom(iterator i); + + // Destroys any contained elements and destroys all allocated storage. + // (Like clear(), except this doesn't leave any empty blocks behind.) + void DestroyAndDeallocateAll(); + + // The set of elements in the queue is the following: + // + // (1) When we have just one block: + // [head_.ptr .. tail_.ptr-1] + // (2) When we have multiple blocks: + // [head_.ptr .. head_.limit-1] + // ... concatenation of all elements from interior blocks ... + // [tail_.ptr .. tail_.limit-1] + // + // Rep invariants: + // When have just one block: + // head_.limit == tail_.limit == &head_.block->element[kBlockSize] + // Always: + // head_.ptr <= head_.limit + // tail_.ptr <= tail_.limit + + iterator head_; + iterator tail_; + AllocatorAndSize alloc_and_size_; +}; + +template +constexpr size_t chunked_queue::kBlockSizeMin; + +template +constexpr size_t chunked_queue::kBlockSizeMax; + +template +inline void swap(chunked_queue& a, + chunked_queue& b) noexcept { + a.swap(b); +} + +template +chunked_queue& +chunked_queue::operator=( + chunked_queue&& other) noexcept { + if (this == &other) { + return *this; + } + DestroyAndDeallocateAll(); + + if constexpr (AllocatorTraits::propagate_on_container_move_assignment:: + value) { + // Take over the storage of "other", along with its allocator. + head_ = other.head_; + tail_ = other.tail_; + alloc_and_size_ = std::move(other.alloc_and_size_); + other.head_ = {}; + other.tail_ = {}; + other.alloc_and_size_.size = 0; + } else if (get_allocator() == other.get_allocator()) { + // Take over the storage of "other", with which we share an allocator. + head_ = other.head_; + tail_ = other.tail_; + alloc_and_size_.size = other.alloc_and_size_.size; + other.head_ = {}; + other.tail_ = {}; + other.alloc_and_size_.size = 0; + } else { + // We cannot take over of the storage from "other", since it has a different + // allocator; we're stuck move-assigning elements individually. + for (auto& elem : other) { + push_back(std::move(elem)); + } + } + return *this; +} + +template +inline chunked_queue::~chunked_queue() { + Block* b = head_.block; + while (b) { + Block* next = b->next(); + Destroy(block_begin(b), block_end(b)); + Block::Delete(b, &alloc_and_size_.allocator()); + b = next; + } +} + +template +void chunked_queue::resize(size_t new_size) { + while (new_size > size()) { + ptrdiff_t to_add = new_size - size(); + if (tail_.ptr == tail_.limit) { + AddTailBlock(); + } + T* start = tail_.ptr; + T* limit = (std::min)(tail_.limit, start + to_add); + Construct(start, limit); + tail_.ptr = limit; + alloc_and_size_.size += limit - start; + } + if (size() == new_size) { + return; + } + ABSL_ASSERT(new_size < size()); + auto new_end = begin(); + new_end.IncrBy(new_size); + ABSL_ASSERT(new_end != end()); + EraseAllFrom(new_end); +} + +template +inline void chunked_queue::AddTailBlock() { + ABSL_ASSERT(tail_.ptr == tail_.limit); + auto* b = Block::New(NewBlockSize(), &alloc_and_size_.allocator()); + if (!head_.block) { + ABSL_ASSERT(!tail_.block); + head_ = iterator(b); + } else { + ABSL_ASSERT(tail_.block); + tail_.block->set_next(b); + } + tail_ = iterator(b); +} + +template +inline T* chunked_queue::AllocateBack() { + if (tail_.ptr == tail_.limit) { + AddTailBlock(); + } + ++alloc_and_size_.size; + return tail_.ptr++; +} + +template +inline void chunked_queue::EraseAllFrom(iterator i) { + if (!i.block) { + return; + } + ABSL_ASSERT(i.ptr); + ABSL_ASSERT(i.limit); + alloc_and_size_.size -= Destroy(i.ptr, block_end(i.block)); + Block* b = i.block->next(); + while (b) { + Block* next = b->next(); + alloc_and_size_.size -= Destroy(b->start(), block_end(b)); + Block::Delete(b, &alloc_and_size_.allocator()); + b = next; + } + tail_ = i; + tail_.block->set_next(nullptr); +} + +template +inline void chunked_queue::DestroyAndDeallocateAll() { + Block* b = head_.block; + while (b) { + Block* next = b->next(); + Destroy(block_begin(b), block_end(b)); + Block::Delete(b, &alloc_and_size_.allocator()); + b = next; + } + head_ = iterator(); + tail_ = iterator(); + alloc_and_size_.size = 0; +} + +template +inline void chunked_queue::pop_front() { + ABSL_HARDENING_ASSERT(!empty()); + ABSL_ASSERT(head_.block); + AllocatorTraits::destroy(alloc_and_size_.allocator(), head_.ptr); + ++head_.ptr; + --alloc_and_size_.size; + if (empty()) { + // Reset head and tail to the start of the (only) block. + ABSL_ASSERT(head_.block == tail_.block); + head_.ptr = tail_.ptr = head_.block->start(); + return; + } + if (head_.ptr == head_.limit) { + Block* n = head_.block->next(); + Block::Delete(head_.block, &alloc_and_size_.allocator()); + head_ = iterator(n); + } +} + +template +void chunked_queue::clear() { + // NOTE: As an optimization we leave one block allocated. + Block* b = head_.block; + if (!b) { + ABSL_ASSERT(empty()); + return; + } + while (b) { + Block* next = b->next(); + Destroy(block_begin(b), block_end(b)); + if (head_.block != b) { + Block::Delete(b, &alloc_and_size_.allocator()); + } + b = next; + } + b = head_.block; + b->set_next(nullptr); + head_ = tail_ = iterator(b); + alloc_and_size_.size = 0; +} + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_CHUNKED_QUEUE_H_ diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h index 6c238fc38..d47b0e441 100644 --- a/absl/container/fixed_array.h +++ b/absl/container/fixed_array.h @@ -84,11 +84,9 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray { static constexpr size_t kInlineBytesDefault = 256; using AllocatorTraits = std::allocator_traits; - // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, - // but this seems to be mostly pedantic. template - using EnableIfForwardIterator = std::enable_if_t< - base_internal::IsAtLeastForwardIterator::value>; + using EnableIfInputIterator = + std::enable_if_t::value>; static constexpr bool NoexceptCopyable() { return std::is_nothrow_copy_constructible::value && absl::allocator_is_nothrow::value; @@ -161,8 +159,8 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray { // Creates an array initialized with the elements from the input // range. The array's size will always be `std::distance(first, last)`. - // REQUIRES: Iterator must be a forward_iterator or better. - template * = nullptr> + // REQUIRES: Iterator must be a input_iterator or better. + template * = nullptr> FixedArray(Iterator first, Iterator last, const allocator_type& a = allocator_type()) : storage_(std::distance(first, last), a) { @@ -392,8 +390,7 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray { template friend H AbslHashValue(H h, const FixedArray& v) { - return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), - hash_internal::WeaklyMixedInteger{v.size()}); + return H::combine_contiguous(std::move(h), v.data(), v.size()); } private: diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index bc86ced99..7ce3353e5 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -115,25 +115,29 @@ struct FlatHashMapPolicy; // absl::flat_hash_map ducks = // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; // -// // Insert a new element into the flat hash map -// ducks.insert({"d", "donald"}); +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}); // -// // Force a rehash of the flat hash map -// ducks.rehash(0); +// // Force a rehash of the flat hash map +// ducks.rehash(0); // -// // Find the element with the key "b" -// std::string search_key = "b"; -// auto result = ducks.find(search_key); -// if (result != ducks.end()) { -// std::cout << "Result: " << result->second << std::endl; -// } -template , - class Eq = DefaultHashContainerEq, - class Allocator = std::allocator>> +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } +template < + class K, class V, + class Hash = + typename container_internal::FlatHashMapPolicy::DefaultHash, + class Eq = typename container_internal::FlatHashMapPolicy::DefaultEq, + class Allocator = + typename container_internal::FlatHashMapPolicy::DefaultAlloc> class ABSL_ATTRIBUTE_OWNER flat_hash_map - : public absl::container_internal::raw_hash_map< + : public absl::container_internal::InstantiateRawHashMap< absl::container_internal::FlatHashMapPolicy, Hash, Eq, - Allocator> { + Allocator>::type { using Base = typename flat_hash_map::raw_hash_map; public: @@ -158,9 +162,9 @@ class ABSL_ATTRIBUTE_OWNER flat_hash_map // // * Copy assignment operator // - // // Hash functor and Comparator are copied as well - // absl::flat_hash_map map4; - // map4 = map3; + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; // // * Move constructor // @@ -462,7 +466,9 @@ class ABSL_ATTRIBUTE_OWNER flat_hash_map // // Sets the number of slots in the `flat_hash_map` to the number needed to // accommodate at least `count` total elements without exceeding the current - // maximum load factor, and may rehash the container if needed. + // maximum load factor, and may rehash the container if needed. After this + // returns, it is guaranteed that `count - size()` elements can be inserted + // into the `flat_hash_map` without another rehash. using Base::reserve; // flat_hash_map::at() @@ -635,6 +641,10 @@ struct FlatHashMapPolicy { using mapped_type = V; using init_type = std::pair; + using DefaultHash = DefaultHashContainerHash; + using DefaultEq = DefaultHashContainerEq; + using DefaultAlloc = std::allocator>; + template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { slot_policy::construct(alloc, slot, std::forward(args)...); @@ -660,10 +670,10 @@ struct FlatHashMapPolicy { std::forward(args)...); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return memory_internal::IsLayoutCompatible::value - ? &TypeErasedApplyToSlotFn + ? &TypeErasedApplyToSlotFn : nullptr; } diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h index bf63eb598..a469fa0af 100644 --- a/absl/container/flat_hash_set.h +++ b/absl/container/flat_hash_set.h @@ -114,22 +114,26 @@ struct FlatHashSetPolicy; // absl::flat_hash_set ducks = // {"huey", "dewey", "louie"}; // -// // Insert a new element into the flat hash set -// ducks.insert("donald"); +// // Insert a new element into the flat hash set +// ducks.insert("donald"); // -// // Force a rehash of the flat hash set -// ducks.rehash(0); +// // Force a rehash of the flat hash set +// ducks.rehash(0); // -// // See if "dewey" is present -// if (ducks.contains("dewey")) { -// std::cout << "We found dewey!" << std::endl; -// } -template , - class Eq = DefaultHashContainerEq, - class Allocator = std::allocator> +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } +template < + class T, + class Hash = typename container_internal::FlatHashSetPolicy::DefaultHash, + class Eq = typename container_internal::FlatHashSetPolicy::DefaultEq, + class Allocator = + typename container_internal::FlatHashSetPolicy::DefaultAlloc> class ABSL_ATTRIBUTE_OWNER flat_hash_set - : public absl::container_internal::raw_hash_set< - absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { + : public absl::container_internal::InstantiateRawHashSet< + absl::container_internal::FlatHashSetPolicy, Hash, Eq, + Allocator>::type { using Base = typename flat_hash_set::raw_hash_set; public: @@ -154,9 +158,9 @@ class ABSL_ATTRIBUTE_OWNER flat_hash_set // // * Copy assignment operator // - // // Hash functor and Comparator are copied as well - // absl::flat_hash_set set4; - // set4 = set3; + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; // // * Move constructor // @@ -396,7 +400,9 @@ class ABSL_ATTRIBUTE_OWNER flat_hash_set // // Sets the number of slots in the `flat_hash_set` to the number needed to // accommodate at least `count` total elements without exceeding the current - // maximum load factor, and may rehash the container if needed. + // maximum load factor, and may rehash the container if needed. After this + // returns, it is guaranteed that `count - size()` elements can be inserted + // into the `flat_hash_set` without another rehash. using Base::reserve; // flat_hash_set::contains() @@ -533,6 +539,10 @@ struct FlatHashSetPolicy { using init_type = T; using constant_iterators = std::true_type; + using DefaultHash = DefaultHashContainerHash; + using DefaultEq = DefaultHashContainerEq; + using DefaultAlloc = std::allocator; + template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { absl::allocator_traits::construct(*alloc, slot, @@ -558,9 +568,9 @@ struct FlatHashSetPolicy { static size_t space_used(const T*) { return 0; } - template + template static constexpr HashSlotFn get_hash_slot_fn() { - return &TypeErasedApplyToSlotFn; + return &TypeErasedApplyToSlotFn; } }; } // namespace container_internal diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index f871b3491..6b05d92c5 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -815,13 +815,11 @@ class ABSL_ATTRIBUTE_WARN_UNUSED InlinedVector { // `InlinedVector::clear()` // // Destroys all elements in the inlined vector, setting the size to `0` and - // deallocating any held memory. + // preserving capacity. void clear() noexcept { inlined_vector_internal::DestroyAdapter::DestroyElements( storage_.GetAllocator(), data(), size()); - storage_.DeallocateIfAllocated(); - - storage_.SetInlinedSize(0); + storage_.SetSize(0); } // `InlinedVector::reserve(...)` @@ -1008,9 +1006,17 @@ bool operator>=(const absl::InlinedVector& a, // call this directly. template H AbslHashValue(H h, const absl::InlinedVector& a) { - auto size = a.size(); - return H::combine(H::combine_contiguous(std::move(h), a.data(), size), - hash_internal::WeaklyMixedInteger{size}); + return H::combine_contiguous(std::move(h), a.data(), a.size()); +} + +template +constexpr typename InlinedVector::size_type erase_if( + InlinedVector& v, Predicate pred) { + const auto it = std::remove_if(v.begin(), v.end(), std::move(pred)); + const auto removed = static_cast::size_type>( + std::distance(it, v.end())); + v.erase(it, v.end()); + return removed; } ABSL_NAMESPACE_END diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h index 21f00ae41..e1649e3aa 100644 --- a/absl/container/internal/btree_container.h +++ b/absl/container/internal/btree_container.h @@ -640,12 +640,12 @@ class btree_map_container : public btree_set_container { } template >()> mapped_type &operator[](key_arg &&k) ABSL_ATTRIBUTE_LIFETIME_BOUND { - return try_emplace(std::forward(k)).first->second; + return try_emplace(std::forward>(k)).first->second; } template > = 0> mapped_type &operator[](key_arg &&k ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY( this)) ABSL_ATTRIBUTE_LIFETIME_BOUND { - return this->template operator[](std::forward(k)); + return this->template operator[](std::forward>(k)); } template @@ -672,27 +672,36 @@ class btree_map_container : public btree_set_container { std::pair insert_or_assign_impl(K &&k, M &&obj) { const std::pair ret = this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); + if (!ret.second) { + // NOLINTNEXTLINE(bugprone-use-after-move) + ret.first->second = std::forward(obj); + } return ret; } template iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) { const std::pair ret = this->tree_.insert_hint_unique( iterator(hint), k, std::forward(k), std::forward(obj)); - if (!ret.second) ret.first->second = std::forward(obj); + if (!ret.second) { + // NOLINTNEXTLINE(bugprone-use-after-move) + ret.first->second = std::forward(obj); + } return ret.first; } template std::pair try_emplace_impl(K &&k, Args &&... args) { return this->tree_.insert_unique( + // NOLINTNEXTLINE(bugprone-use-after-move) k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)); } template iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) { return this->tree_ - .insert_hint_unique(iterator(hint), k, std::piecewise_construct, + .insert_hint_unique(iterator(hint), + // NOLINTNEXTLINE(bugprone-use-after-move) + k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)) .first; diff --git a/absl/container/internal/chunked_queue.h b/absl/container/internal/chunked_queue.h new file mode 100644 index 000000000..c3718ac3b --- /dev/null +++ b/absl/container/internal/chunked_queue.h @@ -0,0 +1,173 @@ +// Copyright 2025 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_ +#define ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/macros.h" +#include "absl/container/internal/layout.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { + +// ChunkedQueueBlock defines a node in a forward list of uninitialized storage +// of size T's. The user is responsible for constructing and destroying T's in +// said storage. +// +// ChunkedQueueBlock::New(size) returns said node, with at least size_hint T's +// of uninitialized storage. +template +class ChunkedQueueBlock { + private: + using ChunkedQueueBlockAllocator = typename std::allocator_traits< + Allocator>::template rebind_alloc; + using ByteAllocator = + typename std::allocator_traits::template rebind_alloc; + + public: + // NB, instances of this must not be created or destroyed directly, only via + // the New() and Delete() methods. (This notionally-private constructor is + // public only to allow access from allocator types used by New().) + explicit ChunkedQueueBlock(size_t size) + : next_(nullptr), limit_(start() + size) {} + + // Must be deleted by ChunkedQueueBlock::Delete. + static ChunkedQueueBlock* New(size_t size_hint, Allocator* alloc) { // NOLINT + ABSL_ASSERT(size_hint >= size_t{1}); + size_t allocation_bytes = AllocSize(size_hint); + void* mem; + std::tie(mem, allocation_bytes) = Allocate(allocation_bytes, alloc); + const size_t element_count = + (allocation_bytes - start_offset()) / sizeof(T); + ChunkedQueueBlock* as_block = static_cast(mem); + ChunkedQueueBlockAllocator block_alloc(*alloc); + std::allocator_traits::construct( + block_alloc, as_block, element_count); + return as_block; + } + + static void Delete(ChunkedQueueBlock* ptr, Allocator* alloc) { + const size_t allocation_bytes = AllocSize(ptr->size()); + ChunkedQueueBlockAllocator block_alloc(*alloc); + std::allocator_traits::destroy(block_alloc, + ptr); + if constexpr (std::is_same_v>) { +#ifdef __STDCPP_DEFAULT_NEW_ALIGNMENT__ + if (alignment() > __STDCPP_DEFAULT_NEW_ALIGNMENT__) { + ::operator delete(ptr +#ifdef __cpp_sized_deallocation + , + allocation_bytes +#endif + , + std::align_val_t(alignment())); + return; + } +#endif + ::operator delete(ptr); + } else { + void* mem = ptr; + ByteAllocator byte_alloc(*alloc); + std::allocator_traits::deallocate( + byte_alloc, static_cast(mem), allocation_bytes); + } + } + + ChunkedQueueBlock* next() const { return next_; } + void set_next(ChunkedQueueBlock* next) { next_ = next; } + T* start() { + return reinterpret_cast(reinterpret_cast(this) + + start_offset()); + } + T* limit() { return limit_; } + size_t size() { return limit() - start(); } + + static constexpr size_t block_size_from_bytes(size_t bytes) { + return bytes <= static_cast(start_offset()) + ? size_t{1} + : elements_in_bytes(bytes - start_offset()); + } + + private: + ChunkedQueueBlock(const ChunkedQueueBlock&) = delete; + ChunkedQueueBlock& operator=(const ChunkedQueueBlock&) = delete; + + // The byte size to allocate to ensure space for `min_element_count` elements. + static constexpr size_t AllocSize(size_t min_element_count) { + return absl::container_internal::Layout( + 1, min_element_count) + .AllocSize(); + } + + static constexpr ptrdiff_t start_offset() { + return absl::container_internal::Layout(1, 1) + .template Offset<1>(); + } + + static constexpr size_t alignment() { + return absl::container_internal::Layout(1, 1) + .Alignment(); + } + + static constexpr size_t elements_in_bytes(size_t bytes) { + return (bytes + sizeof(T) - 1) / sizeof(T); + } + + static std::pair Allocate(size_t allocation_bytes, + Allocator* alloc) { + // If we're using the default allocator, then we can use new. + void* mem; + if constexpr (std::is_same_v>) { + // Older GCC versions have an unused variable warning on `alloc` inside + // this constexpr branch. + static_cast(alloc); +#ifdef __STDCPP_DEFAULT_NEW_ALIGNMENT__ + if (alignment() > __STDCPP_DEFAULT_NEW_ALIGNMENT__) { + // Align the allocation to respect alignof(T). + mem = ::operator new(allocation_bytes, std::align_val_t(alignment())); + return {mem, allocation_bytes}; + } +#endif + mem = ::operator new(allocation_bytes); + } else { + ByteAllocator byte_alloc(*alloc); + mem = std::allocator_traits::allocate(byte_alloc, + allocation_bytes); + } + return {mem, allocation_bytes}; + } + + ChunkedQueueBlock* next_; + T* limit_; +}; + +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_ diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h index 5ef6c569a..3e263a31e 100644 --- a/absl/container/internal/common.h +++ b/absl/container/internal/common.h @@ -15,7 +15,10 @@ #ifndef ABSL_CONTAINER_INTERNAL_COMMON_H_ #define ABSL_CONTAINER_INTERNAL_COMMON_H_ +#include #include +#include +#include #include #include "absl/meta/type_traits.h" @@ -243,6 +246,54 @@ struct InsertReturnType { NodeType node; }; +// Utilities to strip redundant template parameters from the underlying +// implementation types. +// We use a variadic pack (ie Params...) to specify required prefix of types for +// non-default types, and then we use GetFromListOr to select the provided types +// or the default ones otherwise. +// +// These default types do not contribute information for debugging and just +// bloat the binary. +// Removing the redundant tail types reduces mangled names and stringified +// function names like __PRETTY_FUNCTION__. +// +// How to use: +// 1. Define a template with `typename ...Params` +// 2. Instantiate it via `ApplyWithoutDefaultSuffix<>` to only pass the minimal +// set of types. +// 3. Inside the template use `GetFromListOr` to map back from the existing +// `Params` list to the actual types, filling the gaps when types are +// missing. + +template +using GetFromListOr = std::tuple_element_t<(std::min)(N, sizeof...(Params)), + std::tuple>; + +template +struct TypeList { + template