diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index f01021bb02e..a0036492859 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake @@ -25,7 +25,6 @@ set(ABSL_INTERNAL_DLL_FILES "base/internal/low_level_alloc.cc" "base/internal/low_level_alloc.h" "base/internal/low_level_scheduling.h" - "base/internal/nullability_deprecated.h" "base/internal/per_thread_tls.h" "base/internal/poison.cc" "base/internal/poison.h" @@ -160,8 +159,6 @@ set(ABSL_INTERNAL_DLL_FILES "hash/internal/hash.h" "hash/internal/hash.cc" "hash/internal/spy_hash_state.h" - "hash/internal/low_level_hash.h" - "hash/internal/low_level_hash.cc" "hash/internal/weakly_mixed_integer.h" "log/absl_check.h" "log/absl_log.h" @@ -204,6 +201,7 @@ set(ABSL_INTERNAL_DLL_FILES "log/initialize.cc" "log/initialize.h" "log/log.h" + "log/log_entry.cc" "log/log_entry.h" "log/log_sink.cc" "log/log_sink.h" @@ -218,10 +216,14 @@ set(ABSL_INTERNAL_DLL_FILES "numeric/int128.h" "numeric/internal/bits.h" "numeric/internal/representation.h" + "profiling/hashtable.cc" + "profiling/hashtable.h" "profiling/internal/exponential_biased.cc" "profiling/internal/exponential_biased.h" "profiling/internal/periodic_sampler.cc" "profiling/internal/periodic_sampler.h" + "profiling/internal/profile_builder.cc" + "profiling/internal/profile_builder.h" "profiling/internal/sample_recorder.h" "random/bernoulli_distribution.h" "random/beta_distribution.h" @@ -719,8 +721,10 @@ int main() { return 0; } if(ABSL_INTERNAL_AT_LEAST_CXX20) set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_20) -else() +elseif(ABSL_INTERNAL_AT_LEAST_CXX17) set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17) +else() + message(FATAL_ERROR "The compiler defaults to or is configured for C++ < 17. C++ >= 17 is required and Abseil and all libraries that use Abseil must use the same C++ language standard") endif() function(absl_internal_dll_contains) @@ -825,6 +829,9 @@ function(absl_make_dll) ${_dll_libs} ${ABSL_DEFAULT_LINKOPTS} $<$:-llog> + $<$:-ladvapi32> + $<$:-ldbghelp> + $<$:-lbcrypt> ) set_target_properties(${_dll} PROPERTIES LINKER_LANGUAGE "CXX" diff --git a/MODULE.bazel b/MODULE.bazel index 29e624af495..bc5f777a4a2 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -25,13 +25,13 @@ cc_configure = use_extension("@rules_cc//cc:extensions.bzl", dev_dependency = True) use_repo(cc_configure, "local_config_cc") -bazel_dep(name = "rules_cc", version = "0.1.1") -bazel_dep(name = "bazel_skylib", version = "1.7.1") -bazel_dep(name = "platforms", version = "0.0.11") +bazel_dep(name = "rules_cc", version = "0.2.0") +bazel_dep(name = "bazel_skylib", version = "1.8.1") +bazel_dep(name = "platforms", version = "1.0.0") bazel_dep( name = "google_benchmark", - version = "1.9.2", + version = "1.9.4", dev_dependency = True, ) diff --git a/absl/abseil.podspec.gen.py b/absl/abseil.podspec.gen.py index e1afa210bc9..e19f951193c 100755 --- a/absl/abseil.podspec.gen.py +++ b/absl/abseil.podspec.gen.py @@ -42,6 +42,7 @@ 'USER_HEADER_SEARCH_PATHS' => '$(inherited) "$(PODS_TARGET_SRCROOT)"', 'USE_HEADERMAP' => 'NO', 'ALWAYS_SEARCH_USER_PATHS' => 'NO', + 'CLANG_CXX_LANGUAGE_STANDARD' => 'c++17', } s.ios.deployment_target = '12.0' s.osx.deployment_target = '10.13' diff --git a/absl/algorithm/BUILD.bazel b/absl/algorithm/BUILD.bazel index 0ec8b921e72..7d8350c73e3 100644 --- a/absl/algorithm/BUILD.bazel +++ b/absl/algorithm/BUILD.bazel @@ -14,6 +14,8 @@ # limitations under the License. # +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -67,7 +69,6 @@ cc_library( ":algorithm", "//absl/base:config", "//absl/base:core_headers", - "//absl/base:nullability", "//absl/meta:type_traits", ], ) diff --git a/absl/algorithm/CMakeLists.txt b/absl/algorithm/CMakeLists.txt index f3dd138a2b4..cdd5d146096 100644 --- a/absl/algorithm/CMakeLists.txt +++ b/absl/algorithm/CMakeLists.txt @@ -51,7 +51,6 @@ absl_cc_library( absl::config absl::core_headers absl::meta - absl::nullability PUBLIC ) @@ -64,7 +63,6 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::algorithm_container - absl::base absl::config absl::core_headers absl::memory diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h index 913268ddad3..c0b8a10ae25 100644 --- a/absl/algorithm/container.h +++ b/absl/algorithm/container.h @@ -53,7 +53,6 @@ #include "absl/algorithm/algorithm.h" #include "absl/base/config.h" #include "absl/base/macros.h" -#include "absl/base/nullability.h" #include "absl/meta/type_traits.h" namespace absl { @@ -75,8 +74,8 @@ using ContainerIter = decltype(begin(std::declval())); // An MSVC bug involving template parameter substitution requires us to use // decltype() here instead of just std::pair. template -using ContainerIterPairType = - decltype(std::make_pair(ContainerIter(), ContainerIter())); +using ContainerIterPairType = decltype(std::make_pair( + std::declval>(), std::declval>())); template using ContainerDifferenceType = decltype(std::distance( @@ -522,7 +521,8 @@ ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 // Container-based version of the `std::copy()` function to copy a // container's elements into an iterator. template -OutputIterator c_copy(const InputSequence& input, OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_copy(const InputSequence& input, OutputIterator output) { return std::copy(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output); } @@ -532,7 +532,8 @@ OutputIterator c_copy(const InputSequence& input, OutputIterator output) { // Container-based version of the `std::copy_n()` function to copy a // container's first N elements into an iterator. template -OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_copy_n(const C& input, Size n, OutputIterator output) { return std::copy_n(container_algorithm_internal::c_begin(input), n, output); } @@ -541,8 +542,8 @@ OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { // Container-based version of the `std::copy_if()` function to copy // a container's elements satisfying some condition into an iterator. template -OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_copy_if(const InputSequence& input, OutputIterator output, Pred&& pred) { return std::copy_if(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(pred)); @@ -553,8 +554,8 @@ OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, // Container-based version of the `std::copy_backward()` function to // copy a container's elements in reverse order into an iterator. template -BidirectionalIterator c_copy_backward(const C& src, - BidirectionalIterator dest) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 BidirectionalIterator +c_copy_backward(const C& src, BidirectionalIterator dest) { return std::copy_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -564,7 +565,8 @@ BidirectionalIterator c_copy_backward(const C& src, // Container-based version of the `std::move()` function to move // a container's elements into an iterator. template -OutputIterator c_move(C&& src, OutputIterator dest) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_move(C&& src, + OutputIterator dest) { return std::move(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -574,7 +576,8 @@ OutputIterator c_move(C&& src, OutputIterator dest) { // Container-based version of the `std::move_backward()` function to // move a container's elements into an iterator in reverse order. template -BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 BidirectionalIterator +c_move_backward(C&& src, BidirectionalIterator dest) { return std::move_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } @@ -585,7 +588,9 @@ BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { // swap a container's elements with another container's elements. Swaps the // first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). template -container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_swap_ranges(C1& c1, C2& c2) { auto first1 = container_algorithm_internal::c_begin(c1); auto last1 = container_algorithm_internal::c_end(c1); auto first2 = container_algorithm_internal::c_begin(c2); @@ -605,8 +610,8 @@ container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { // result in an iterator pointing to the last transformed element in the output // range. template -OutputIterator c_transform(const InputSequence& input, OutputIterator output, - UnaryOp&& unary_op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_transform( + const InputSequence& input, OutputIterator output, UnaryOp&& unary_op) { return std::transform(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(unary_op)); @@ -617,9 +622,9 @@ OutputIterator c_transform(const InputSequence& input, OutputIterator output, // where N = min(size(c1), size(c2)). template -OutputIterator c_transform(const InputSequence1& input1, - const InputSequence2& input2, OutputIterator output, - BinaryOp&& binary_op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_transform(const InputSequence1& input1, const InputSequence2& input2, + OutputIterator output, BinaryOp&& binary_op) { auto first1 = container_algorithm_internal::c_begin(input1); auto last1 = container_algorithm_internal::c_end(input1); auto first2 = container_algorithm_internal::c_begin(input2); @@ -638,7 +643,9 @@ OutputIterator c_transform(const InputSequence1& input1, // replace a container's elements of some value with a new value. The container // is modified in place. template -void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_replace(Sequence& sequence, + const T& old_value, + const T& new_value) { std::replace(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), old_value, new_value); @@ -650,7 +657,8 @@ void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { // replace a container's elements of some value with a new value based on some // condition. The container is modified in place. template -void c_replace_if(C& c, Pred&& pred, T&& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_replace_if(C& c, Pred&& pred, + T&& new_value) { std::replace_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred), std::forward(new_value)); @@ -662,8 +670,8 @@ void c_replace_if(C& c, Pred&& pred, T&& new_value) { // replace a container's elements of some value with a new value and return the // results within an iterator. template -OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, - T&& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_replace_copy( + const C& c, OutputIterator result, T&& old_value, T&& new_value) { return std::replace_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(old_value), @@ -676,8 +684,8 @@ OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, // to replace a container's elements of some value with a new value based on // some condition, and return the results within an iterator. template -OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, - const T& new_value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_replace_copy_if( + const C& c, OutputIterator result, Pred&& pred, const T& new_value) { return std::replace_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred), new_value); @@ -688,7 +696,7 @@ OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, // Container-based version of the `std::fill()` function to fill a // container with some value. template -void c_fill(C& c, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_fill(C& c, const T& value) { std::fill(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), value); } @@ -698,7 +706,8 @@ void c_fill(C& c, const T& value) { // Container-based version of the `std::fill_n()` function to fill // the first N elements in a container with some value. template -void c_fill_n(C& c, Size n, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_fill_n(C& c, Size n, + const T& value) { std::fill_n(container_algorithm_internal::c_begin(c), n, value); } @@ -707,7 +716,7 @@ void c_fill_n(C& c, Size n, const T& value) { // Container-based version of the `std::generate()` function to // assign a container's elements to the values provided by the given generator. template -void c_generate(C& c, Generator&& gen) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_generate(C& c, Generator&& gen) { std::generate(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); @@ -719,8 +728,9 @@ void c_generate(C& c, Generator&& gen) { // assign a container's first N elements to the values provided by the given // generator. template -container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, - Generator&& gen) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_generate_n(C& c, Size n, Generator&& gen) { return std::generate_n(container_algorithm_internal::c_begin(c), n, std::forward(gen)); } @@ -736,8 +746,8 @@ container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, // copy a container's elements while removing any elements matching the given // `value`. template -OutputIterator c_remove_copy(const C& c, OutputIterator result, - const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_remove_copy(const C& c, OutputIterator result, const T& value) { return std::remove_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, value); @@ -749,8 +759,8 @@ OutputIterator c_remove_copy(const C& c, OutputIterator result, // to copy a container's elements while removing any elements matching the given // condition. template -OutputIterator c_remove_copy_if(const C& c, OutputIterator result, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_remove_copy_if(const C& c, OutputIterator result, Pred&& pred) { return std::remove_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); @@ -762,7 +772,8 @@ OutputIterator c_remove_copy_if(const C& c, OutputIterator result, // copy a container's elements while removing any elements containing duplicate // values. template -OutputIterator c_unique_copy(const C& c, OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_unique_copy(const C& c, OutputIterator result) { return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result); } @@ -770,8 +781,8 @@ OutputIterator c_unique_copy(const C& c, OutputIterator result) { // Overload of c_unique_copy() for using a predicate evaluation other than // `==` for comparing uniqueness of the element values. template -OutputIterator c_unique_copy(const C& c, OutputIterator result, - BinaryPredicate&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_unique_copy(const C& c, OutputIterator result, BinaryPredicate&& pred) { return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); @@ -782,7 +793,7 @@ OutputIterator c_unique_copy(const C& c, OutputIterator result, // Container-based version of the `std::reverse()` function to // reverse a container's elements. template -void c_reverse(Sequence& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_reverse(Sequence& sequence) { std::reverse(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -792,7 +803,8 @@ void c_reverse(Sequence& sequence) { // Container-based version of the `std::reverse()` function to // reverse a container's elements and write them to an iterator range. template -OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_reverse_copy(const C& sequence, OutputIterator result) { return std::reverse_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), result); @@ -805,7 +817,8 @@ OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { // the first element in the container. template > -Iterator c_rotate(C& sequence, Iterator middle) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 Iterator c_rotate(C& sequence, + Iterator middle) { return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); } @@ -816,10 +829,10 @@ Iterator c_rotate(C& sequence, Iterator middle) { // shift a container's elements leftward such that the `middle` element becomes // the first element in a new iterator range. template -OutputIterator c_rotate_copy( - const C& sequence, - container_algorithm_internal::ContainerIter middle, - OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_rotate_copy(const C& sequence, + container_algorithm_internal::ContainerIter middle, + OutputIterator result) { return std::rotate_copy(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), result); @@ -861,7 +874,8 @@ OutputIterator c_sample(const C& c, OutputIterator result, Distance n, // to test whether all elements in the container for which `pred` returns `true` // precede those for which `pred` is `false`. template -bool c_is_partitioned(const C& c, Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_partitioned(const C& c, + Pred&& pred) { return std::is_partitioned(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); @@ -874,7 +888,9 @@ bool c_is_partitioned(const C& c, Pred&& pred) { // which `pred` returns `true` precede all those for which it returns `false`, // returning an iterator to the first element of the second group. template -container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partition(C& c, Pred&& pred) { return std::partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); @@ -903,9 +919,9 @@ container_algorithm_internal::ContainerIter c_stable_partition(C& c, template -std::pair c_partition_copy( - const C& c, OutputIterator1 out_true, OutputIterator2 out_false, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 std::pair +c_partition_copy(const C& c, OutputIterator1 out_true, + OutputIterator2 out_false, Pred&& pred) { return std::partition_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), out_true, out_false, std::forward(pred)); @@ -917,8 +933,9 @@ std::pair c_partition_copy( // to return the first element of an already partitioned container for which // the given `pred` is not `true`. template -container_algorithm_internal::ContainerIter c_partition_point(C& c, - Pred&& pred) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partition_point(C& c, Pred&& pred) { return std::partition_point(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); @@ -933,7 +950,7 @@ container_algorithm_internal::ContainerIter c_partition_point(C& c, // Container-based version of the `std::sort()` function // to sort elements in ascending order of their values. template -void c_sort(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort(C& c) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -941,7 +958,7 @@ void c_sort(C& c) { // Overload of c_sort() for performing a `comp` comparison other than the // default `operator<`. template -void c_sort(C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort(C& c, LessThan&& comp) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -972,7 +989,7 @@ void c_stable_sort(C& c, LessThan&& comp) { // Container-based version of the `std::is_sorted()` function // to evaluate whether the given container is sorted in ascending order. template -bool c_is_sorted(const C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_sorted(const C& c) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -980,7 +997,8 @@ bool c_is_sorted(const C& c) { // c_is_sorted() overload for performing a `comp` comparison other than the // default `operator<`. template -bool c_is_sorted(const C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_sorted(const C& c, + LessThan&& comp) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -992,7 +1010,7 @@ bool c_is_sorted(const C& c, LessThan&& comp) { // to rearrange elements within a container such that elements before `middle` // are sorted in ascending order. template -void c_partial_sort( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle) { std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, @@ -1002,7 +1020,7 @@ void c_partial_sort( // Overload of c_partial_sort() for performing a `comp` comparison other than // the default `operator<`. template -void c_partial_sort( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle, LessThan&& comp) { @@ -1019,8 +1037,9 @@ void c_partial_sort( // At most min(result.last - result.first, sequence.last - sequence.first) // elements from the sequence will be stored in the result. template -container_algorithm_internal::ContainerIter -c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), @@ -1030,9 +1049,10 @@ c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { // Overload of c_partial_sort_copy() for performing a `comp` comparison other // than the default `operator<`. template -container_algorithm_internal::ContainerIter -c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, + LessThan&& comp) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), @@ -1046,7 +1066,9 @@ c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, // to return the first element within a container that is not sorted in // ascending order as an iterator. template -container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_sorted_until(C& c) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -1054,8 +1076,9 @@ container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { // Overload of c_is_sorted_until() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIter c_is_sorted_until( - C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_sorted_until(C& c, LessThan&& comp) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -1069,7 +1092,7 @@ container_algorithm_internal::ContainerIter c_is_sorted_until( // any order, except that all preceding `nth` will be less than that element, // and all following `nth` will be greater than that element. template -void c_nth_element( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth) { std::nth_element(container_algorithm_internal::c_begin(sequence), nth, @@ -1079,7 +1102,7 @@ void c_nth_element( // Overload of c_nth_element() for performing a `comp` comparison other than // the default `operator<`. template -void c_nth_element( +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth, LessThan&& comp) { @@ -1098,8 +1121,9 @@ void c_nth_element( // to return an iterator pointing to the first element in a sorted container // which does not compare less than `value`. template -container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_lower_bound(Sequence& sequence, const T& value) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1107,8 +1131,9 @@ container_algorithm_internal::ContainerIter c_lower_bound( // Overload of c_lower_bound() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIter c_lower_bound( - Sequence& sequence, const T& value, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_lower_bound(Sequence& sequence, const T& value, LessThan&& comp) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1120,8 +1145,9 @@ container_algorithm_internal::ContainerIter c_lower_bound( // to return an iterator pointing to the first element in a sorted container // which is greater than `value`. template -container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_upper_bound(Sequence& sequence, const T& value) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1129,8 +1155,9 @@ container_algorithm_internal::ContainerIter c_upper_bound( // Overload of c_upper_bound() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIter c_upper_bound( - Sequence& sequence, const T& value, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_upper_bound(Sequence& sequence, const T& value, LessThan&& comp) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1142,8 +1169,9 @@ container_algorithm_internal::ContainerIter c_upper_bound( // to return an iterator pair pointing to the first and last elements in a // sorted container which compare equal to `value`. template -container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, const T& value) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1151,8 +1179,9 @@ c_equal_range(Sequence& sequence, const T& value) { // Overload of c_equal_range() for performing a `comp` comparison other than // the default `operator<`. template -container_algorithm_internal::ContainerIterPairType -c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIterPairType + c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1164,7 +1193,8 @@ c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) { // to test if any element in the sorted container contains a value equivalent to // 'value'. template -bool c_binary_search(const Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_binary_search( + const Sequence& sequence, const T& value) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); @@ -1173,8 +1203,8 @@ bool c_binary_search(const Sequence& sequence, const T& value) { // Overload of c_binary_search() for performing a `comp` comparison other than // the default `operator<`. template -bool c_binary_search(const Sequence& sequence, const T& value, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_binary_search( + const Sequence& sequence, const T& value, LessThan&& comp) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value, std::forward(comp)); @@ -1189,7 +1219,8 @@ bool c_binary_search(const Sequence& sequence, const T& value, // Container-based version of the `std::merge()` function // to merge two sorted containers into a single sorted iterator. template -OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_merge(const C1& c1, const C2& c2, OutputIterator result) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1199,8 +1230,8 @@ OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { // Overload of c_merge() for performing a `comp` comparison other than // the default `operator<`. template -OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_merge(const C1& c1, const C2& c2, OutputIterator result, LessThan&& comp) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1236,7 +1267,8 @@ void c_inplace_merge(C& c, // to test whether a sorted container `c1` entirely contains another sorted // container `c2`. template -bool c_includes(const C1& c1, const C2& c2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_includes(const C1& c1, + const C2& c2) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1246,7 +1278,8 @@ bool c_includes(const C1& c1, const C2& c2) { // Overload of c_includes() for performing a merge using a `comp` other than // `operator<`. template -bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_includes(const C1& c1, const C2& c2, + LessThan&& comp) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1266,7 +1299,8 @@ template ::value, void>::type> -OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_union(const C1& c1, const C2& c2, OutputIterator output) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1282,8 +1316,8 @@ template ::value, void>::type> -OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_union( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1302,13 +1336,13 @@ template ::value, void>::type> -OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) { // In debug builds, ensure that both containers are sorted with respect to the // default comparator. std::set_intersection requires the containers be sorted // using operator<. - assert(absl::c_is_sorted(c1)); - assert(absl::c_is_sorted(c2)); + ABSL_ASSERT(absl::c_is_sorted(c1)); + ABSL_ASSERT(absl::c_is_sorted(c2)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1324,13 +1358,13 @@ template ::value, void>::type> -OutputIterator c_set_intersection(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_intersection( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { // In debug builds, ensure that both containers are sorted with respect to the // default comparator. std::set_intersection requires the containers be sorted // using the same comparator. - assert(absl::c_is_sorted(c1, comp)); - assert(absl::c_is_sorted(c2, comp)); + ABSL_ASSERT(absl::c_is_sorted(c1, comp)); + ABSL_ASSERT(absl::c_is_sorted(c2, comp)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1350,8 +1384,8 @@ template ::value, void>::type> -OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1367,8 +1401,8 @@ template ::value, void>::type> -OutputIterator c_set_difference(const C1& c1, const C2& c2, - OutputIterator output, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_difference( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), @@ -1388,8 +1422,8 @@ template ::value, void>::type> -OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, - OutputIterator output) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator +c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), @@ -1406,9 +1440,8 @@ template ::value, void>::type> -OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, - OutputIterator output, - LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIterator c_set_symmetric_difference( + const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), @@ -1426,7 +1459,8 @@ OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, // Container-based version of the `std::push_heap()` function // to push a value onto a container heap. template -void c_push_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_push_heap( + RandomAccessContainer& sequence) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1434,7 +1468,8 @@ void c_push_heap(RandomAccessContainer& sequence) { // Overload of c_push_heap() for performing a push operation on a heap using a // `comp` other than `operator<`. template -void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_push_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1445,7 +1480,8 @@ void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::pop_heap()` function // to pop a value from a heap container. template -void c_pop_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_pop_heap( + RandomAccessContainer& sequence) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1453,7 +1489,8 @@ void c_pop_heap(RandomAccessContainer& sequence) { // Overload of c_pop_heap() for performing a pop operation on a heap using a // `comp` other than `operator<`. template -void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_pop_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1464,7 +1501,8 @@ void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::make_heap()` function // to make a container a heap. template -void c_make_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_make_heap( + RandomAccessContainer& sequence) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1472,7 +1510,8 @@ void c_make_heap(RandomAccessContainer& sequence) { // Overload of c_make_heap() for performing heap comparisons using a // `comp` other than `operator<` template -void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_make_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1483,7 +1522,8 @@ void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::sort_heap()` function // to sort a heap into ascending order (after which it is no longer a heap). template -void c_sort_heap(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort_heap( + RandomAccessContainer& sequence) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1491,7 +1531,8 @@ void c_sort_heap(RandomAccessContainer& sequence) { // Overload of c_sort_heap() for performing heap comparisons using a // `comp` other than `operator<` template -void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_sort_heap( + RandomAccessContainer& sequence, LessThan&& comp) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1502,7 +1543,8 @@ void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::is_heap()` function // to check whether the given container is a heap. template -bool c_is_heap(const RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_heap( + const RandomAccessContainer& sequence) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1510,7 +1552,8 @@ bool c_is_heap(const RandomAccessContainer& sequence) { // Overload of c_is_heap() for performing heap comparisons using a // `comp` other than `operator<` template -bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_heap( + const RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1521,8 +1564,9 @@ bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { // Container-based version of the `std::is_heap_until()` function // to find the first element in a given container which is not in heap order. template -container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } @@ -1530,8 +1574,9 @@ c_is_heap_until(RandomAccessContainer& sequence) { // Overload of c_is_heap_until() for performing heap comparisons using a // `comp` other than `operator<` template -container_algorithm_internal::ContainerIter -c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 + container_algorithm_internal::ContainerIter + c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); @@ -1626,8 +1671,8 @@ ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 // that capital letters ("A-Z") have ASCII values less than lowercase letters // ("a-z"). template -bool c_lexicographical_compare(const Sequence1& sequence1, - const Sequence2& sequence2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_lexicographical_compare( + const Sequence1& sequence1, const Sequence2& sequence2) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), @@ -1638,8 +1683,8 @@ bool c_lexicographical_compare(const Sequence1& sequence1, // Overload of c_lexicographical_compare() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template -bool c_lexicographical_compare(const Sequence1& sequence1, - const Sequence2& sequence2, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_lexicographical_compare( + const Sequence1& sequence1, const Sequence2& sequence2, LessThan&& comp) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), @@ -1654,7 +1699,7 @@ bool c_lexicographical_compare(const Sequence1& sequence1, // to rearrange a container's elements into the next lexicographically greater // permutation. template -bool c_next_permutation(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_next_permutation(C& c) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -1662,7 +1707,8 @@ bool c_next_permutation(C& c) { // Overload of c_next_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template -bool c_next_permutation(C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_next_permutation(C& c, + LessThan&& comp) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -1674,7 +1720,7 @@ bool c_next_permutation(C& c, LessThan&& comp) { // to rearrange a container's elements into the next lexicographically lesser // permutation. template -bool c_prev_permutation(C& c) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_prev_permutation(C& c) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } @@ -1682,7 +1728,8 @@ bool c_prev_permutation(C& c) { // Overload of c_prev_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template -bool c_prev_permutation(C& c, LessThan&& comp) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_prev_permutation(C& c, + LessThan&& comp) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); @@ -1698,7 +1745,8 @@ bool c_prev_permutation(C& c, LessThan&& comp) { // to compute successive values of `value`, as if incremented with `++value` // after each element is written, and write them to the container. template -void c_iota(Sequence& sequence, const T& value) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 void c_iota(Sequence& sequence, + const T& value) { std::iota(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), value); } @@ -1713,7 +1761,8 @@ void c_iota(Sequence& sequence, const T& value) { // absl::decay_t. As a user of this function you can casually read // this as "returns T by value" and assume it does the right thing. template -decay_t c_accumulate(const Sequence& sequence, T&& init) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_accumulate( + const Sequence& sequence, T&& init) { return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init)); @@ -1722,8 +1771,8 @@ decay_t c_accumulate(const Sequence& sequence, T&& init) { // Overload of c_accumulate() for using a binary operations other than // addition for computing the accumulation. template -decay_t c_accumulate(const Sequence& sequence, T&& init, - BinaryOp&& binary_op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_accumulate( + const Sequence& sequence, T&& init, BinaryOp&& binary_op) { return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init), @@ -1739,8 +1788,8 @@ decay_t c_accumulate(const Sequence& sequence, T&& init, // absl::decay_t. As a user of this function you can casually read // this as "returns T by value" and assume it does the right thing. template -decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, - T&& sum) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_inner_product( + const Sequence1& factors1, const Sequence2& factors2, T&& sum) { return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), @@ -1752,8 +1801,9 @@ decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, // the product between the two container's element pair). template -decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, - T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t c_inner_product( + const Sequence1& factors1, const Sequence2& factors2, T&& sum, + BinaryOp1&& op1, BinaryOp2&& op2) { return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), @@ -1767,8 +1817,8 @@ decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, // function to compute the difference between each element and the one preceding // it and write it to an iterator. template -OutputIt c_adjacent_difference(const InputSequence& input, - OutputIt output_first) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt +c_adjacent_difference(const InputSequence& input, OutputIt output_first) { return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); @@ -1777,8 +1827,8 @@ OutputIt c_adjacent_difference(const InputSequence& input, // Overload of c_adjacent_difference() for using a binary operation other than // subtraction to compute the adjacent difference. template -OutputIt c_adjacent_difference(const InputSequence& input, - OutputIt output_first, BinaryOp&& op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt c_adjacent_difference( + const InputSequence& input, OutputIt output_first, BinaryOp&& op) { return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); @@ -1791,7 +1841,8 @@ OutputIt c_adjacent_difference(const InputSequence& input, // to an iterator. The partial sum is the sum of all element values so far in // the sequence. template -OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt +c_partial_sum(const InputSequence& input, OutputIt output_first) { return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); @@ -1800,8 +1851,8 @@ OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { // Overload of c_partial_sum() for using a binary operation other than addition // to compute the "partial sum". template -OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, - BinaryOp&& op) { +ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 OutputIt c_partial_sum( + const InputSequence& input, OutputIt output_first, BinaryOp&& op) { return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); diff --git a/absl/algorithm/container_test.cc b/absl/algorithm/container_test.cc index cb063355b7b..347b41fd70e 100644 --- a/absl/algorithm/container_test.cc +++ b/absl/algorithm/container_test.cc @@ -1408,6 +1408,824 @@ TEST(ConstexprTest, SearchNWithPredicate) { kArray.begin()); } +TEST(ConstexprTest, Copy) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayCopy = [] { + std::array array; + absl::c_copy(kArray, array.begin()); + return array; + }(); + static_assert(kArrayCopy == kArray); +} + +TEST(ConstexprTest, CopyN) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayCopy = [] { + std::array array; + absl::c_copy_n(kArray, 2, array.begin()); + return array; + }(); + static_assert(kArrayCopy == std::array{1, 2}); +} + +TEST(ConstexprTest, CopyIf) { + static constexpr std::array kArray = {1, 2, 3, 4}; + static constexpr auto kArrayCopy = [] { + std::array array; + absl::c_copy_if(kArray, array.begin(), [](int x) { return x > 1; }); + return array; + }(); + static_assert(kArrayCopy == std::array{2, 3, 4}); +} + +TEST(ConstexprTest, CopyBackward) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayCopy = [] { + std::array array; + absl::c_copy_backward(kArray, array.end()); + return array; + }(); + static_assert(kArrayCopy == kArray); +} + +TEST(ConstexprTest, Move) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayMove = [] { + std::array array; + absl::c_move(kArray, array.begin()); + return array; + }(); + static_assert(kArrayMove == kArray); +} + +TEST(ConstexprTest, MoveBackward) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayMove = [] { + std::array array; + absl::c_move_backward(kArray, array.end()); + return array; + }(); + static_assert(kArrayMove == kArray); +} + +TEST(ConstexprTest, SwapRanges) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {4, 5, 6}; + + static constexpr auto kSwapped = [] { + std::array arr1 = kArray1; + std::array arr2 = kArray2; + absl::c_swap_ranges(arr1, arr2); + return std::make_pair(arr1, arr2); + }(); + + static_assert(kSwapped.first == kArray2); + static_assert(kSwapped.second == kArray1); +} + +TEST(ConstexprTest, Transform) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayTransform = [] { + std::array array; + absl::c_transform(kArray, array.begin(), [](int x) { return x + 1; }); + return array; + }(); + static_assert(kArrayTransform == std::array{2, 3, 4}); +} + +TEST(ConstexprTest, Replace) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayReplace = [] { + std::array array = kArray; + absl::c_replace(array, 1, 4); + return array; + }(); + static_assert(kArrayReplace == std::array{4, 2, 3}); +} + +TEST(ConstexprTest, ReplaceIf) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayReplaceIf = [] { + std::array array = kArray; + absl::c_replace_if(array, [](int x) { return x == 1; }, 4); + return array; + }(); + static_assert(kArrayReplaceIf == std::array{4, 2, 3}); +} + +TEST(ConstexprTest, ReplaceCopy) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayReplaceCopy = [] { + std::array array; + absl::c_replace_copy(kArray, array.begin(), 1, 4); + return array; + }(); + static_assert(kArrayReplaceCopy == std::array{4, 2, 3}); +} + +TEST(ConstexprTest, ReplaceCopyIf) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayReplaceCopyIf = [] { + std::array array; + absl::c_replace_copy_if( + kArray, array.begin(), [](int x) { return x == 1; }, 4); + return array; + }(); + static_assert(kArrayReplaceCopyIf == std::array{4, 2, 3}); +} + +TEST(ConstexprTest, Fill) { + static constexpr auto kArrayFill = [] { + std::array array; + absl::c_fill(array, 4); + return array; + }(); + static_assert(kArrayFill == std::array{4, 4, 4}); +} + +TEST(ConstexprTest, FillN) { + static constexpr auto kArrayFillN = [] { + std::array array = {0, 0, 0}; + absl::c_fill_n(array, 2, 4); + return array; + }(); + static_assert(kArrayFillN == std::array{4, 4, 0}); +} + +TEST(ConstexprTest, Generate) { + static constexpr auto kArrayGenerate = [] { + std::array array; + absl::c_generate(array, []() { return 4; }); + return array; + }(); + static_assert(kArrayGenerate == std::array{4, 4, 4}); +} + +TEST(ConstexprTest, GenerateN) { + static constexpr auto kArrayGenerateN = [] { + std::array array = {0, 0, 0}; + absl::c_generate_n(array, 2, []() { return 4; }); + return array; + }(); + static_assert(kArrayGenerateN == std::array{4, 4, 0}); +} + +TEST(ConstexprTest, RemoveCopy) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayRemoveCopy = [] { + std::array array; + absl::c_remove_copy(kArray, array.begin(), 1); + return array; + }(); + static_assert(kArrayRemoveCopy == std::array{2, 3}); +} + +TEST(ConstexprTest, RemoveCopyIf) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayRemoveCopyIf = [] { + std::array array; + absl::c_remove_copy_if(kArray, array.begin(), [](int x) { return x == 1; }); + return array; + }(); + static_assert(kArrayRemoveCopyIf == std::array{2, 3}); +} + +TEST(ConstexprTest, UniqueCopy) { + static constexpr std::array kArray = {1, 2, 2, 3}; + static constexpr auto kArrayUniqueCopy = [] { + std::array array; + absl::c_unique_copy(kArray, array.begin()); + return array; + }(); + static_assert(kArrayUniqueCopy == std::array{1, 2, 3}); +} + +TEST(ConstexprTest, UniqueCopyWithPredicate) { + static constexpr std::array kArray = {1, 2, 2, 3}; + static constexpr auto kArrayUniqueCopy = [] { + std::array array; + absl::c_unique_copy(kArray, array.begin(), std::equal_to<>()); + return array; + }(); + static_assert(kArrayUniqueCopy == std::array{1, 2, 3}); +} + +TEST(ConstexprTest, Reverse) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayReverse = [] { + std::array array = kArray; + absl::c_reverse(array); + return array; + }(); + static_assert(kArrayReverse == std::array{3, 2, 1}); +} + +TEST(ConstexprTest, ReverseCopy) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayReverseCopy = [] { + std::array array; + absl::c_reverse_copy(kArray, array.begin()); + return array; + }(); + static_assert(kArrayReverseCopy == std::array{3, 2, 1}); +} + +TEST(ConstexprTest, Rotate) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayRotate = [] { + std::array array = kArray; + absl::c_rotate(array, array.begin() + 1); + return array; + }(); + static_assert(kArrayRotate == std::array{2, 3, 1}); +} + +TEST(ConstexprTest, RotateCopy) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayRotateCopy = [] { + std::array array; + absl::c_rotate_copy(kArray, kArray.begin() + 1, array.begin()); + return array; + }(); + static_assert(kArrayRotateCopy == std::array{2, 3, 1}); +} + +TEST(ConstexprTest, IsPartitioned) { + static constexpr std::array kArray = {1, 2, 3}; + static_assert(!absl::c_is_partitioned(kArray, [](int x) { return x > 1; })); + + static constexpr std::array kPartitionedArray = {2, 3, 1}; + static_assert( + absl::c_is_partitioned(kPartitionedArray, [](int x) { return x > 1; })); +} + +TEST(ConstexprTest, Partition) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayPartition = [] { + std::array array = kArray; + absl::c_partition(array, [](int x) { return x > 1; }); + return array; + }(); + static_assert( + absl::c_is_partitioned(kArrayPartition, [](int x) { return x > 1; })); +} + +TEST(ConstexprTest, PartitionCopy) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kPartitioned = [] { + std::array true_part; + std::array false_part; + absl::c_partition_copy(kArray, true_part.begin(), false_part.begin(), + [](int x) { return x > 1; }); + return std::make_pair(true_part, false_part); + }(); + static_assert(kPartitioned.first == std::array{2, 3}); + static_assert(kPartitioned.second == std::array{1}); +} + +TEST(ConstexprTest, PartitionPoint) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kPartitionPoint = + absl::c_partition_point(kArray, [](int x) { return x > 1; }); + static_assert(kPartitionPoint == kArray.end()); +} + +TEST(ConstexprTest, Sort) { + static constexpr std::array kArray = {2, 1, 3}; + static constexpr auto kArraySort = [] { + std::array array = kArray; + absl::c_sort(array); + return array; + }(); + static_assert(kArraySort == std::array{1, 2, 3}); +} + +TEST(ConstexprTest, SortWithPredicate) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArraySort = [] { + std::array array = kArray; + absl::c_sort(array, std::greater<>()); + return array; + }(); + static_assert(kArraySort == std::array{3, 2, 1}); +} + +TEST(ConstexprTest, IsSorted) { + static constexpr std::array kSortedArray = {1, 2, 3}; + static_assert(absl::c_is_sorted(kSortedArray)); + static constexpr std::array kUnsortedArray = {1, 3, 2}; + static_assert(!absl::c_is_sorted(kUnsortedArray)); +} + +TEST(ConstexprTest, IsSortedWithPredicate) { + static constexpr std::array kSortedArray = {3, 2, 1}; + static_assert(absl::c_is_sorted(kSortedArray, std::greater<>())); + static constexpr std::array kUnsortedArray = {1, 3, 2}; + static_assert(!absl::c_is_sorted(kUnsortedArray, std::greater<>())); +} + +TEST(ConstexprTest, PartialSort) { + static constexpr std::array kArray = {3, 1, 4, 2}; + static constexpr auto kArrayPartialSort = [] { + std::array array = kArray; + absl::c_partial_sort(array, array.begin() + 2); + return array; + }(); + static_assert(kArrayPartialSort[0] == 1); + static_assert(kArrayPartialSort[1] == 2); +} + +TEST(ConstexprTest, PartialSortWithPredicate) { + static constexpr std::array kArray = {3, 1, 4, 2}; + static constexpr auto kArrayPartialSort = [] { + std::array array = kArray; + absl::c_partial_sort(array, array.begin() + 2, std::greater<>()); + return array; + }(); + static_assert(kArrayPartialSort[0] == 4); + static_assert(kArrayPartialSort[1] == 3); +} + +TEST(ConstexprTest, PartialSortCopy) { + static constexpr std::array kArray = {3, 1, 4, 2}; + static constexpr auto kArrayPartialSort = [] { + std::array array; + absl::c_partial_sort_copy(kArray, array); + return array; + }(); + static_assert(kArrayPartialSort[0] == 1); + static_assert(kArrayPartialSort[1] == 2); +} + +TEST(ConstexprTest, PartialSortCopyWithPredicate) { + static constexpr std::array kArray = {3, 1, 4, 2}; + static constexpr auto kArrayPartialSort = [] { + std::array array; + absl::c_partial_sort_copy(kArray, array, std::greater<>()); + return array; + }(); + static_assert(kArrayPartialSort[0] == 4); + static_assert(kArrayPartialSort[1] == 3); +} + +TEST(ConstexprTest, IsSortedUntil) { + static constexpr std::array kSortedArray = {1, 2, 3}; + static_assert(absl::c_is_sorted_until(kSortedArray) == kSortedArray.end()); + static constexpr std::array kUnsortedArray = {1, 3, 2}; + static_assert(absl::c_is_sorted_until(kUnsortedArray) == + kUnsortedArray.begin() + 2); +} + +TEST(ConstexprTest, IsSortedUntilWithPredicate) { + static constexpr std::array kSortedArray = {3, 2, 1}; + static_assert(absl::c_is_sorted_until(kSortedArray, std::greater<>()) == + kSortedArray.end()); + static constexpr std::array kUnsortedArray = {1, 3, 2}; + static_assert(absl::c_is_sorted_until(kUnsortedArray, std::greater<>()) == + kUnsortedArray.begin() + 1); +} + +TEST(ConstexprTest, NthElement) { + static constexpr std::array kArray = {2, 1, 3, 4}; + static constexpr auto kArrayNthElement = [] { + std::array array = kArray; + absl::c_nth_element(array, array.begin() + 2); + return array; + }(); + static_assert(kArrayNthElement[2] == 3); + static_assert(kArrayNthElement[0] <= kArrayNthElement[2]); + static_assert(kArrayNthElement[1] <= kArrayNthElement[2]); + static_assert(kArrayNthElement[3] >= kArrayNthElement[2]); +} + +TEST(ConstexprTest, NthElementWithPredicate) { + static constexpr std::array kArray = {1, 2, 3, 4}; + static constexpr auto kArrayNthElement = [] { + std::array array = kArray; + absl::c_nth_element(array, array.begin() + 2, std::greater<>()); + return array; + }(); + static_assert(kArrayNthElement[2] == 2); + static_assert(std::greater<>()(kArrayNthElement[0], kArrayNthElement[2]) || + kArrayNthElement[0] == kArrayNthElement[2]); + static_assert(std::greater<>()(kArrayNthElement[1], kArrayNthElement[2]) || + kArrayNthElement[1] == kArrayNthElement[2]); + static_assert(std::greater<>()(kArrayNthElement[2], kArrayNthElement[3]) || + kArrayNthElement[2] == kArrayNthElement[3]); +} + +TEST(ConstexprTest, LowerBound) { + static constexpr std::array kArray = {1, 2, 3, 4}; + static constexpr auto kLowerBound = absl::c_lower_bound(kArray, 2); + static_assert(kLowerBound == kArray.begin() + 1); +} + +TEST(ConstexprTest, LowerBoundWithPredicate) { + static constexpr std::array kArray = {4, 3, 2, 1}; + static constexpr auto kLowerBound = + absl::c_lower_bound(kArray, 2, std::greater<>()); + static_assert(kLowerBound == kArray.begin() + 2); +} + +TEST(ConstexprTest, UpperBound) { + static constexpr std::array kArray = {1, 2, 3, 4}; + static constexpr auto kUpperBound = absl::c_upper_bound(kArray, 2); + static_assert(kUpperBound == kArray.begin() + 2); +} + +TEST(ConstexprTest, UpperBoundWithPredicate) { + static constexpr std::array kArray = {4, 3, 2, 1}; + static constexpr auto kUpperBound = + absl::c_upper_bound(kArray, 2, std::greater<>()); + static_assert(kUpperBound == kArray.begin() + 3); +} + +TEST(ConstexprTest, EqualRange) { + static constexpr std::array kArray = {1, 2, 3, 4}; + static constexpr auto kEqualRange = absl::c_equal_range(kArray, 2); + static_assert(kEqualRange.first == kArray.begin() + 1); + static_assert(kEqualRange.second == kArray.begin() + 2); +} + +TEST(ConstexprTest, EqualRangeWithPredicate) { + static constexpr std::array kArray = {4, 3, 2, 1}; + static constexpr auto kEqualRange = + absl::c_equal_range(kArray, 2, std::greater<>()); + static_assert(kEqualRange.first == kArray.begin() + 2); + static_assert(kEqualRange.second == kArray.begin() + 3); +} + +TEST(ConstexprTest, BinarySearch) { + static constexpr std::array kArray = {1, 2, 3, 4}; + static constexpr bool kBinarySearch = absl::c_binary_search(kArray, 2); + static_assert(kBinarySearch); +} + +TEST(ConstexprTest, BinarySearchWithPredicate) { + static constexpr std::array kArray = {4, 3, 2, 1}; + static constexpr bool kBinarySearch = + absl::c_binary_search(kArray, 2, std::greater<>()); + static_assert(kBinarySearch); +} + +TEST(ConstexprTest, Merge) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {4, 5, 6}; + static constexpr auto kArrayMerge = [] { + std::array array; + absl::c_merge(kArray1, kArray2, array.begin()); + return array; + }(); + static_assert(kArrayMerge == std::array{1, 2, 3, 4, 5, 6}); +} + +TEST(ConstexprTest, MergeWithPredicate) { + static constexpr std::array kArray1 = {3, 2, 1}; + static constexpr std::array kArray2 = {6, 5, 4}; + static constexpr auto kArrayMerge = [] { + std::array array; + absl::c_merge(kArray1, kArray2, array.begin(), std::greater<>()); + return array; + }(); + static_assert(kArrayMerge == std::array{6, 5, 4, 3, 2, 1}); +} + +TEST(ConstexprTest, Includes) { + static constexpr std::array kArray1 = {1, 2, 3, 4, 5, 6}; + static constexpr std::array kArray2 = {2, 3, 5}; + static constexpr bool kIncludes = absl::c_includes(kArray1, kArray2); + static_assert(kIncludes); +} + +TEST(ConstexprTest, IncludesWithPredicate) { + static constexpr std::array kArray1 = {6, 5, 4, 3, 2, 1}; + static constexpr std::array kArray2 = {5, 3, 2}; + static constexpr bool kIncludes = + absl::c_includes(kArray1, kArray2, std::greater<>()); + static_assert(kIncludes); +} + +TEST(ConstexprTest, SetUnion) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {1, 3, 4}; + static constexpr auto kArraySetUnion = [] { + std::array array; + absl::c_set_union(kArray1, kArray2, array.begin()); + return array; + }(); + static_assert(kArraySetUnion == std::array{1, 2, 3, 4}); +} + +TEST(ConstexprTest, SetUnionWithPredicate) { + static constexpr std::array kArray1 = {3, 2, 1}; + static constexpr std::array kArray2 = {4, 3, 1}; + static constexpr auto kArraySetUnion = [] { + std::array array; + absl::c_set_union(kArray1, kArray2, array.begin(), std::greater<>()); + return array; + }(); + static_assert(kArraySetUnion == std::array{4, 3, 2, 1}); +} + +TEST(ConstexprTest, SetIntersection) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {1, 3, 4}; + static constexpr auto kArraySetIntersection = [] { + std::array array; + absl::c_set_intersection(kArray1, kArray2, array.begin()); + return array; + }(); + static_assert(kArraySetIntersection == std::array{1, 3}); +} + +TEST(ConstexprTest, SetIntersectionWithPredicate) { + static constexpr std::array kArray1 = {3, 2, 1}; + static constexpr std::array kArray2 = {4, 3, 1}; + static constexpr auto kArraySetIntersection = [] { + std::array array; + absl::c_set_intersection(kArray1, kArray2, array.begin(), std::greater<>()); + return array; + }(); + static_assert(kArraySetIntersection == std::array{3, 1}); +} + +TEST(ConstexprTest, SetDifference) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {1, 3, 4}; + static constexpr auto kArraySetDifference = [] { + std::array array; + absl::c_set_difference(kArray1, kArray2, array.begin()); + return array; + }(); + static_assert(kArraySetDifference == std::array{2}); +} + +TEST(ConstexprTest, SetDifferenceWithPredicate) { + static constexpr std::array kArray1 = {3, 2, 1}; + static constexpr std::array kArray2 = {4, 3, 1}; + static constexpr auto kArraySetDifference = [] { + std::array array; + absl::c_set_difference(kArray1, kArray2, array.begin(), std::greater<>()); + return array; + }(); + static_assert(kArraySetDifference == std::array{2}); +} + +TEST(ConstexprTest, SetSymmetricDifference) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {1, 3, 4}; + static constexpr auto kArraySetSymmetricDifference = [] { + std::array array; + absl::c_set_symmetric_difference(kArray1, kArray2, array.begin()); + return array; + }(); + static_assert(kArraySetSymmetricDifference == std::array{2, 4}); +} + +TEST(ConstexprTest, SetSymmetricDifferenceWithPredicate) { + static constexpr std::array kArray1 = {3, 2, 1}; + static constexpr std::array kArray2 = {4, 3, 1}; + static constexpr auto kArraySetSymmetricDifference = [] { + std::array array; + absl::c_set_symmetric_difference(kArray1, kArray2, array.begin(), + std::greater<>()); + return array; + }(); + static_assert(kArraySetSymmetricDifference == std::array{4, 2}); +} + +TEST(ConstexprTest, PushHeap) { + static constexpr auto kArray = [] { + std::array array = {1, 2, 3, 4}; + absl::c_push_heap(array); + return array; + }(); + static_assert(kArray[0] == 4); +} + +TEST(ConstexprTest, PushHeapWithPredicate) { + static constexpr auto kArray = [] { + std::array array = {4, 3, 2, 1}; + absl::c_push_heap(array, std::greater<>()); + return array; + }(); + static_assert(kArray[0] == 1); +} + +TEST(ConstexprTest, PopHeap) { + static constexpr auto kArray = [] { + std::array array = {4, 3, 2, 1}; + absl::c_pop_heap(array); + return array; + }(); + static_assert(kArray[3] == 4); +} + +TEST(ConstexprTest, PopHeapWithPredicate) { + static constexpr auto kArray = [] { + std::array array = {1, 2, 3, 4}; + absl::c_pop_heap(array, std::greater<>()); + return array; + }(); + static_assert(kArray[3] == 1); +} + +TEST(ConstexprTest, MakeHeap) { + static constexpr auto kArray = [] { + std::array array = {1, 2, 3, 4}; + absl::c_make_heap(array); + return array; + }(); + static_assert(absl::c_is_heap(kArray)); +} + +TEST(ConstexprTest, MakeHeapWithPredicate) { + static constexpr auto kArray = [] { + std::array array = {4, 3, 2, 1}; + absl::c_make_heap(array, std::greater<>()); + return array; + }(); + static_assert(absl::c_is_heap(kArray, std::greater<>())); +} + +TEST(ConstexprTest, SortHeap) { + static constexpr auto kArray = [] { + std::array array = {1, 2, 3, 4}; + absl::c_make_heap(array); + absl::c_sort_heap(array); + return array; + }(); + static_assert(kArray == std::array{1, 2, 3, 4}); +} + +TEST(ConstexprTest, SortHeapWithPredicate) { + static constexpr auto kArray = [] { + std::array array = {4, 3, 2, 1}; + absl::c_make_heap(array, std::greater<>()); + absl::c_sort_heap(array, std::greater<>()); + return array; + }(); + static_assert(kArray == std::array{4, 3, 2, 1}); +} + +TEST(ConstexprTest, IsHeap) { + static constexpr std::array kHeap = {4, 2, 3, 1}; + static_assert(absl::c_is_heap(kHeap)); + static constexpr std::array kNotHeap = {1, 2, 3, 4}; + static_assert(!absl::c_is_heap(kNotHeap)); +} + +TEST(ConstexprTest, IsHeapWithPredicate) { + static constexpr std::array kHeap = {1, 2, 3, 4}; + static_assert(absl::c_is_heap(kHeap, std::greater<>())); + static constexpr std::array kNotHeap = {4, 3, 2, 1}; + static_assert(!absl::c_is_heap(kNotHeap, std::greater<>())); +} + +TEST(ConstexprTest, IsHeapUntil) { + static constexpr std::array kHeap = {4, 2, 3, 1}; + static_assert(absl::c_is_heap_until(kHeap) == kHeap.end()); + static constexpr std::array kNotHeap = {4, 2, 3, 5}; + static_assert(absl::c_is_heap_until(kNotHeap) == kNotHeap.begin() + 3); +} + +TEST(ConstexprTest, IsHeapUntilWithPredicate) { + static constexpr std::array kHeap = {1, 2, 3, 4}; + static_assert(absl::c_is_heap_until(kHeap, std::greater<>()) == kHeap.end()); + static constexpr std::array kNotHeap = {1, 2, 3, 0}; + static_assert(absl::c_is_heap_until(kNotHeap, std::greater<>()) == + kNotHeap.begin() + 3); +} + +TEST(ConstexprTest, LexicographicalCompare) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {1, 2, 4}; + static constexpr std::array kArray3 = {1, 2, 3}; + static_assert(absl::c_lexicographical_compare(kArray1, kArray2)); + static_assert(!absl::c_lexicographical_compare(kArray2, kArray1)); + static_assert(!absl::c_lexicographical_compare(kArray1, kArray3)); +} + +TEST(ConstexprTest, LexicographicalCompareWithPredicate) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {1, 2, 4}; + static constexpr std::array kArray3 = {1, 2, 3}; + static_assert( + !absl::c_lexicographical_compare(kArray1, kArray2, std::greater<>())); + static_assert( + absl::c_lexicographical_compare(kArray2, kArray1, std::greater<>())); + static_assert( + !absl::c_lexicographical_compare(kArray1, kArray3, std::greater<>())); +} + +TEST(ConstexprTest, NextPermutation) { + static constexpr auto kArray = [] { + std::array array = {1, 2, 3}; + absl::c_next_permutation(array); + return array; + }(); + static_assert(kArray == std::array{1, 3, 2}); +} + +TEST(ConstexprTest, NextPermutationWithPredicate) { + static constexpr auto kArray = [] { + std::array array = {3, 2, 1}; + absl::c_next_permutation(array, std::greater<>()); + return array; + }(); + static_assert(kArray == std::array{3, 1, 2}); +} + +TEST(ConstexprTest, PrevPermutation) { + static constexpr auto kArray = [] { + std::array array = {1, 3, 2}; + absl::c_prev_permutation(array); + return array; + }(); + static_assert(kArray == std::array{1, 2, 3}); +} + +TEST(ConstexprTest, PrevPermutationWithPredicate) { + static constexpr auto kArray = [] { + std::array array = {1, 2, 3}; + absl::c_prev_permutation(array, std::greater<>()); + return array; + }(); + static_assert(kArray == std::array{1, 3, 2}); +} + +TEST(ConstexprTest, Iota) { + static constexpr auto kArray = [] { + std::array array; + absl::c_iota(array, 1); + return array; + }(); + static_assert(kArray == std::array{1, 2, 3}); +} + +TEST(ConstexprTest, Accumulate) { + static constexpr std::array kArray = {1, 2, 3}; + static_assert(absl::c_accumulate(kArray, 0) == 6); +} + +TEST(ConstexprTest, AccumulateWithPredicate) { + static constexpr std::array kArray = {1, 2, 3}; + static_assert(absl::c_accumulate(kArray, 1, std::multiplies<>()) == 6); +} + +TEST(ConstexprTest, InnerProduct) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {4, 5, 6}; + static_assert(absl::c_inner_product(kArray1, kArray2, 0) == 32); +} + +TEST(ConstexprTest, InnerProductWithPredicate) { + static constexpr std::array kArray1 = {1, 2, 3}; + static constexpr std::array kArray2 = {4, 5, 6}; + static_assert(absl::c_inner_product(kArray1, kArray2, 1, std::multiplies<>(), + std::plus<>()) == 315); +} + +TEST(ConstexprTest, AdjacentDifference) { + static constexpr std::array kArray = {1, 2, 4}; + static constexpr auto kArrayAdjacentDifference = [] { + std::array array; + absl::c_adjacent_difference(kArray, array.begin()); + return array; + }(); + static_assert(kArrayAdjacentDifference == std::array{1, 1, 2}); +} + +TEST(ConstexprTest, AdjacentDifferenceWithPredicate) { + static constexpr std::array kArray = {1, 2, 4}; + static constexpr auto kArrayAdjacentDifference = [] { + std::array array; + absl::c_adjacent_difference(kArray, array.begin(), std::multiplies<>()); + return array; + }(); + static_assert(kArrayAdjacentDifference == std::array{1, 2, 8}); +} + +TEST(ConstexprTest, PartialSum) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayPartialSum = [] { + std::array array; + absl::c_partial_sum(kArray, array.begin()); + return array; + }(); + static_assert(kArrayPartialSum == std::array{1, 3, 6}); +} + +TEST(ConstexprTest, PartialSumWithPredicate) { + static constexpr std::array kArray = {1, 2, 3}; + static constexpr auto kArrayPartialSum = [] { + std::array array; + absl::c_partial_sum(kArray, array.begin(), std::multiplies<>()); + return array; + }(); + static_assert(kArrayPartialSum == std::array{1, 2, 6}); +} + #endif // defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel index ef97b4ee8b2..2b380e61835 100644 --- a/absl/base/BUILD.bazel +++ b/absl/base/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -82,7 +85,6 @@ cc_library( cc_library( name = "nullability", - srcs = ["internal/nullability_deprecated.h"], hdrs = ["nullability.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, @@ -685,7 +687,6 @@ cc_test( cc_test( name = "thread_identity_test", - size = "small", srcs = ["internal/thread_identity_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, @@ -721,6 +722,7 @@ cc_library( testonly = True, srcs = ["internal/scoped_set_env.cc"], hdrs = ["internal/scoped_set_env.h"], + copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt index 23942c04b21..4eb1390b692 100644 --- a/absl/base/CMakeLists.txt +++ b/absl/base/CMakeLists.txt @@ -72,8 +72,6 @@ absl_cc_library( nullability HDRS "nullability.h" - SRCS - "internal/nullability_deprecated.h" DEPS absl::config absl::core_headers diff --git a/absl/base/attributes.h b/absl/base/attributes.h index d009f6d4912..33b2c284952 100644 --- a/absl/base/attributes.h +++ b/absl/base/attributes.h @@ -553,7 +553,7 @@ // // Prevents the compiler from complaining about variables that appear unused. // -// Deprecated: Use the standard C++17 `[[maybe_unused]` instead. +// Deprecated: Use the standard C++17 `[[maybe_unused]]` instead. // // Due to differences in positioning requirements between the old, compiler // specific __attribute__ syntax and the now standard `[[maybe_unused]]`, this diff --git a/absl/base/config.h b/absl/base/config.h index 7514b86e5e7..a38c5826b82 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -237,6 +237,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error ABSL_HAVE_TLS cannot be directly set #elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) #define ABSL_HAVE_TLS 1 +#elif defined(__INTEL_LLVM_COMPILER) +#define ABSL_HAVE_TLS 1 #endif // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE @@ -358,10 +360,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Darwin (macOS and iOS) __APPLE__ // Akaros (http://akaros.org) __ros__ // Windows _WIN32 -// NaCL __native_client__ // AsmJS __asmjs__ // WebAssembly (Emscripten) __EMSCRIPTEN__ // Fuchsia __Fuchsia__ +// WebAssembly (WASI) _WASI_EMULATED_MMAN (implies __wasi__) // // Note that since Android defines both __ANDROID__ and __linux__, one // may probe for either Linux or Android by simply testing for __linux__. @@ -372,12 +374,13 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set -#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ - defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \ - defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) || \ - defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) || \ - defined(__VXWORKS__) || defined(__hexagon__) || defined(__XTENSA__) +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(_AIX) || defined(__ros__) || defined(__asmjs__) || \ + defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__myriad2__) || defined(__HAIKU__) || defined(__OpenBSD__) || \ + defined(__NetBSD__) || defined(__QNX__) || defined(__VXWORKS__) || \ + defined(__hexagon__) || defined(__XTENSA__) || \ + defined(_WASI_EMULATED_MMAN) #define ABSL_HAVE_MMAP 1 #endif @@ -453,8 +456,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // WASI doesn't support signals #elif defined(__Fuchsia__) // Signals don't exist on fuchsia. -#elif defined(__native_client__) -// Signals don't exist on hexagon/QuRT #elif defined(__hexagon__) #else // other standard libraries @@ -530,13 +531,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // ABSL_HAVE_STD_STRING_VIEW // -// Checks whether C++17 std::string_view is available. +// Deprecated: always defined to 1. +// std::string_view was added in C++17, which means all versions of C++ +// supported by Abseil have it. #ifdef ABSL_HAVE_STD_STRING_VIEW #error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." -#elif defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L -#define ABSL_HAVE_STD_STRING_VIEW 1 -#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ - ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L +#else #define ABSL_HAVE_STD_STRING_VIEW 1 #endif @@ -561,13 +561,10 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || // Indicates whether absl::string_view is an alias for std::string_view. #if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) #error options.h is misconfigured. -#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ - (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ - !defined(ABSL_HAVE_STD_STRING_VIEW)) +#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 #undef ABSL_USES_STD_STRING_VIEW #elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ - (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ - defined(ABSL_HAVE_STD_STRING_VIEW)) + ABSL_OPTION_USE_STD_STRING_VIEW == 2 #define ABSL_USES_STD_STRING_VIEW 1 #else #error options.h is misconfigured. diff --git a/absl/base/internal/dynamic_annotations.h b/absl/base/internal/dynamic_annotations.h index b23c5ec1c41..537a2fe677e 100644 --- a/absl/base/internal/dynamic_annotations.h +++ b/absl/base/internal/dynamic_annotations.h @@ -89,7 +89,7 @@ #endif // Memory annotations are also made available to LLVM's Memory Sanitizer -#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#if defined(ABSL_HAVE_MEMORY_SANITIZER) #define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 #endif diff --git a/absl/base/internal/low_level_alloc.cc b/absl/base/internal/low_level_alloc.cc index 158b60982f1..a5bd71da6de 100644 --- a/absl/base/internal/low_level_alloc.cc +++ b/absl/base/internal/low_level_alloc.cc @@ -19,6 +19,9 @@ #include "absl/base/internal/low_level_alloc.h" +#include + +#include #include #include "absl/base/call_once.h" @@ -219,6 +222,32 @@ struct LowLevelAlloc::Arena { uint32_t random ABSL_GUARDED_BY(mu); }; +// --------------------------------------------------------------- +// An async-signal-safe arena for LowLevelAlloc +static std::atomic g_sig_safe_arena; + +base_internal::LowLevelAlloc::Arena *SigSafeArena() { + return g_sig_safe_arena.load(std::memory_order_acquire); +} + +void InitSigSafeArena() { + if (SigSafeArena() == nullptr) { + uint32_t flags = 0; +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + flags |= base_internal::LowLevelAlloc::kAsyncSignalSafe; +#endif + base_internal::LowLevelAlloc::Arena *new_arena = + base_internal::LowLevelAlloc::NewArena(flags); + base_internal::LowLevelAlloc::Arena *old_value = nullptr; + if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena, + std::memory_order_release, + std::memory_order_relaxed)) { + // We lost a race to allocate an arena; deallocate. + base_internal::LowLevelAlloc::DeleteArena(new_arena); + } + } +} + namespace { // Static storage space for the lazily-constructed, default global arena // instances. We require this space because the whole point of LowLevelAlloc @@ -289,11 +318,11 @@ class ABSL_SCOPED_LOCKABLE ArenaLock { mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; } #endif - arena_->mu.Lock(); + arena_->mu.lock(); } ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } void Leave() ABSL_UNLOCK_FUNCTION() { - arena_->mu.Unlock(); + arena_->mu.unlock(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (mask_valid_) { const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); @@ -544,7 +573,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { } // we unlock before mmap() both because mmap() may call a callback hook, // and because it may be slow. - arena->mu.Unlock(); + arena->mu.unlock(); // mmap generous 64K chunks to decrease // the chances/impact of fragmentation: size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); @@ -583,7 +612,7 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { #endif #endif // __linux__ #endif // _WIN32 - arena->mu.Lock(); + arena->mu.lock(); s = reinterpret_cast(new_pages); s->header.size = new_pages_size; // Pretend the block is allocated; call AddToFreelist() to free it. diff --git a/absl/base/internal/low_level_alloc.h b/absl/base/internal/low_level_alloc.h index c2f1f25d8e3..23218dd5a6f 100644 --- a/absl/base/internal/low_level_alloc.h +++ b/absl/base/internal/low_level_alloc.h @@ -120,6 +120,12 @@ class LowLevelAlloc { LowLevelAlloc(); // no instances }; +// Returns a global async-signal-safe arena for LowLevelAlloc. +LowLevelAlloc::Arena *SigSafeArena(); + +// Ensures the global async-signal-safe arena for LowLevelAlloc is initialized. +void InitSigSafeArena(); + } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/base/internal/nullability_deprecated.h b/absl/base/internal/nullability_deprecated.h deleted file mode 100644 index 1174a96eaa3..00000000000 --- a/absl/base/internal/nullability_deprecated.h +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2023 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#ifndef ABSL_BASE_INTERNAL_NULLABILITY_DEPRECATED_H_ -#define ABSL_BASE_INTERNAL_NULLABILITY_DEPRECATED_H_ - -#include "absl/base/attributes.h" -#include "absl/base/config.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace nullability_internal { - -template -using NullableImpl -#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) - [[clang::annotate("Nullable")]] -#endif -// Don't add the _Nullable attribute in Objective-C compiles. Many Objective-C -// projects enable the `-Wnullable-to-nonnull-conversion warning`, which is -// liable to produce false positives. -#if ABSL_HAVE_FEATURE(nullability_on_classes) && !defined(__OBJC__) - = T _Nullable; -#else - = T; -#endif - -template -using NonnullImpl -#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) - [[clang::annotate("Nonnull")]] -#endif -#if ABSL_HAVE_FEATURE(nullability_on_classes) && !defined(__OBJC__) - = T _Nonnull; -#else - = T; -#endif - -template -using NullabilityUnknownImpl -#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate) - [[clang::annotate("Nullability_Unspecified")]] -#endif -#if ABSL_HAVE_FEATURE(nullability_on_classes) && !defined(__OBJC__) - = T _Null_unspecified; -#else - = T; -#endif - -} // namespace nullability_internal - -// The following template aliases are deprecated forms of nullability -// annotations. They have some limitations, for example, an incompatibility with -// `auto*` pointers, as `auto` cannot be used in a template argument. -// -// It is important to note that these annotations are not distinct strong -// *types*. They are alias templates defined to be equal to the underlying -// pointer type. A pointer annotated `Nonnull`, for example, is simply a -// pointer of type `T*`. -// -// Prefer the macro style annotations in `absl/base/nullability.h` instead. - -// absl::Nonnull, analogous to absl_nonnull -// -// Example: -// absl::Nonnull foo; -// Is equivalent to: -// int* absl_nonnull foo; -template -using Nonnull [[deprecated("Use `absl_nonnull`.")]] = - nullability_internal::NonnullImpl; - -// absl::Nullable, analogous to absl_nullable -// -// Example: -// absl::Nullable foo; -// Is equivalent to: -// int* absl_nullable foo; -template -using Nullable [[deprecated("Use `absl_nullable`.")]] = - nullability_internal::NullableImpl; - -// absl::NullabilityUnknown, analogous to absl_nullability_unknown -// -// Example: -// absl::NullabilityUnknown foo; -// Is equivalent to: -// int* absl_nullability_unknown foo; -template -using NullabilityUnknown [[deprecated("Use `absl_nullability_unknown`.")]] = - nullability_internal::NullabilityUnknownImpl; - -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_BASE_INTERNAL_NULLABILITY_DEPRECATED_H_ diff --git a/absl/base/internal/poison.cc b/absl/base/internal/poison.cc index b33d4c2d3d5..c639c966628 100644 --- a/absl/base/internal/poison.cc +++ b/absl/base/internal/poison.cc @@ -57,19 +57,20 @@ size_t GetPageSize() { void* InitializePoisonedPointerInternal() { const size_t block_size = GetPageSize(); + void* data = nullptr; #if defined(ABSL_HAVE_ADDRESS_SANITIZER) - void* data = malloc(block_size); + data = malloc(block_size); ASAN_POISON_MEMORY_REGION(data, block_size); #elif defined(ABSL_HAVE_MEMORY_SANITIZER) - void* data = malloc(block_size); + data = malloc(block_size); __msan_poison(data, block_size); #elif defined(ABSL_HAVE_MMAP) - void* data = DirectMmap(nullptr, block_size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + data = DirectMmap(nullptr, block_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, + -1, 0); if (data == MAP_FAILED) return GetBadPointerInternal(); #elif defined(_WIN32) - void* data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT, - PAGE_NOACCESS); + data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT, + PAGE_NOACCESS); if (data == nullptr) return GetBadPointerInternal(); #else return GetBadPointerInternal(); diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc index 35a08f0ac03..8537f3ec520 100644 --- a/absl/base/internal/raw_logging.cc +++ b/absl/base/internal/raw_logging.cc @@ -41,9 +41,8 @@ // // This preprocessor token is also defined in raw_io.cc. If you need to copy // this, consider moving both to config.h instead. -#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ - defined(__hexagon__) || defined(__Fuchsia__) || \ - defined(__native_client__) || defined(__OpenBSD__) || \ +#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__hexagon__) || defined(__Fuchsia__) || defined(__OpenBSD__) || \ defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include @@ -158,7 +157,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line, #endif #ifdef ABSL_MIN_LOG_LEVEL - if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && + if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && severity < absl::LogSeverity::kFatal) { enabled = false; } diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc index 430f775bdf9..4168b8b728a 100644 --- a/absl/base/internal/spinlock.cc +++ b/absl/base/internal/spinlock.cc @@ -16,15 +16,18 @@ #include #include +#include #include #include "absl/base/attributes.h" +#include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/cycleclock.h" +#include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock_wait.h" #include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ -#include "absl/base/call_once.h" +#include "absl/base/internal/tsan_mutex_interface.h" // Description of lock-word: // 31..00: [............................3][2][1][0] @@ -58,7 +61,7 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook submit_profile_data; @@ -67,12 +70,6 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, submit_profile_data.Store(fn); } -// Uncommon constructors. -SpinLock::SpinLock(base_internal::SchedulingMode mode) - : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { - ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); -} - // Monitor the lock to see if its value changes within some time period // (adaptive_spin_count loop iterations). The last value read from the lock // is returned from the method. @@ -81,9 +78,8 @@ uint32_t SpinLock::SpinLoop() { // adaptive_spin_count here. ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count; ABSL_CONST_INIT static int adaptive_spin_count = 0; - base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() { - adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1; - }); + LowLevelCallOnce(&init_adaptive_spin_count, + []() { adaptive_spin_count = NumCPUs() > 1 ? 1000 : 1; }); int c = adaptive_spin_count; uint32_t lock_value; @@ -100,11 +96,11 @@ void SpinLock::SlowLock() { return; } - base_internal::SchedulingMode scheduling_mode; + SchedulingMode scheduling_mode; if ((lock_value & kSpinLockCooperative) != 0) { - scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + scheduling_mode = SCHEDULE_COOPERATIVE_AND_KERNEL; } else { - scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; + scheduling_mode = SCHEDULE_KERNEL_ONLY; } // The lock was not obtained initially, so this thread needs to wait for @@ -134,7 +130,7 @@ void SpinLock::SlowLock() { // new lock state will be the number of cycles this thread waited if // this thread obtains the lock. lock_value = TryLockInternal(lock_value, wait_cycles); - continue; // Skip the delay at the end of the loop. + continue; // Skip the delay at the end of the loop. } else if ((lock_value & kWaitTimeMask) == 0) { // The lock is still held, without a waiter being marked, but something // else about the lock word changed, causing our CAS to fail. For @@ -150,8 +146,8 @@ void SpinLock::SlowLock() { // synchronization there to avoid false positives. ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); // Wait for an OS specific delay. - base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, - scheduling_mode); + SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, + scheduling_mode); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); // Spin again after returning from the wait routine to give this thread // some chance of obtaining the lock. @@ -162,8 +158,8 @@ void SpinLock::SlowLock() { } void SpinLock::SlowUnlock(uint32_t lock_value) { - base_internal::SpinLockWake(&lockword_, - false); // wake waiter if necessary + SpinLockWake(&lockword_, + false); // wake waiter if necessary // If our acquisition was contended, collect contentionz profile info. We // reserve a unitary wait time to represent that a waiter exists without our diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h index 2a108969767..ff2b08726e5 100644 --- a/absl/base/internal/spinlock.h +++ b/absl/base/internal/spinlock.h @@ -19,7 +19,7 @@ // - for use by Abseil internal code that Mutex itself depends on // - for async signal safety (see below) -// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async +// SpinLock with a SchedulingMode::SCHEDULE_KERNEL_ONLY is async // signal safe. If a spinlock is used within a signal handler, all code that // acquires the lock must ensure that the signal cannot arrive while they are // holding the lock. Typically, this is done by blocking the signal. @@ -31,14 +31,16 @@ #include #include +#include #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/const_init.h" -#include "absl/base/dynamic_annotations.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/macros.h" #include "absl/base/thread_annotations.h" namespace tcmalloc { @@ -55,17 +57,31 @@ namespace base_internal { class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { public: - SpinLock() : lockword_(kSpinLockCooperative) { - ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); - } + constexpr SpinLock() : lockword_(kSpinLockCooperative) { RegisterWithTsan(); } // Constructors that allow non-cooperative spinlocks to be created for use // inside thread schedulers. Normal clients should not use these. - explicit SpinLock(base_internal::SchedulingMode mode); + constexpr explicit SpinLock(SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { + RegisterWithTsan(); + } + +#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(_WIN32) + // Constructor to inline users of the default scheduling mode. + // + // This only needs to exists for inliner runs, but doesn't work correctly in + // clang+windows builds, likely due to mangling differences. + ABSL_DEPRECATE_AND_INLINE() + constexpr explicit SpinLock(SchedulingMode mode) + __attribute__((enable_if(mode == SCHEDULE_COOPERATIVE_AND_KERNEL, + "Cooperative use default constructor"))) + : SpinLock() {} +#endif // Constructor for global SpinLock instances. See absl/base/const_init.h. - constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) - : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + ABSL_DEPRECATE_AND_INLINE() + constexpr SpinLock(absl::ConstInitType, SchedulingMode mode) + : SpinLock(mode) {} // For global SpinLock instances prefer trivial destructor when possible. // Default but non-trivial destructor in some build configurations causes an @@ -77,7 +93,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { #endif // Acquire this SpinLock. - inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + inline void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); if (!TryLockImpl()) { SlowLock(); @@ -85,11 +101,14 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); } + ABSL_DEPRECATE_AND_INLINE() + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { return lock(); } + // Try to acquire this SpinLock without blocking and return true if the // acquisition was successful. If the lock was not acquired, false is - // returned. If this SpinLock is free at the time of the call, TryLock - // will return true with high probability. - [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + // returned. If this SpinLock is free at the time of the call, try_lock will + // return true with high probability. + [[nodiscard]] inline bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); bool res = TryLockImpl(); ABSL_TSAN_MUTEX_POST_LOCK( @@ -98,15 +117,20 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { return res; } + ABSL_DEPRECATE_AND_INLINE() + [[nodiscard]] inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return try_lock(); + } + // Release this SpinLock, which must be held by the calling thread. - inline void Unlock() ABSL_UNLOCK_FUNCTION() { + inline void unlock() ABSL_UNLOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); uint32_t lock_value = lockword_.load(std::memory_order_relaxed); lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, std::memory_order_release); if ((lock_value & kSpinLockDisabledScheduling) != 0) { - base_internal::SchedulingGuard::EnableRescheduling(true); + SchedulingGuard::EnableRescheduling(true); } if ((lock_value & kWaitTimeMask) != 0) { // Collect contentionz profile info, and speed the wakeup of any waiter. @@ -117,6 +141,9 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); } + ABSL_DEPRECATE_AND_INLINE() + inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } + // Determine if the lock is held. When the lock is held by the invoking // thread, true will always be returned. Intended to be used as // CHECK(lock.IsHeld()). @@ -175,9 +202,16 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); // Returns true if the provided scheduling mode is cooperative. - static constexpr bool IsCooperative( - base_internal::SchedulingMode scheduling_mode) { - return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + static constexpr bool IsCooperative(SchedulingMode scheduling_mode) { + return scheduling_mode == SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + constexpr void RegisterWithTsan() { +#if ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated) + if (!__builtin_is_constant_evaluated()) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } +#endif } bool IsCooperative() const { @@ -204,17 +238,23 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { // the duration of a C++ scope. class ABSL_SCOPED_LOCKABLE [[nodiscard]] SpinLockHolder { public: - inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + inline explicit SpinLockHolder( + SpinLock& l ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) + ABSL_EXCLUSIVE_LOCK_FUNCTION(l) : lock_(l) { - l->Lock(); + l.lock(); } - inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + ABSL_DEPRECATE_AND_INLINE() + inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : SpinLockHolder(*l) {} + + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_.unlock(); } SpinLockHolder(const SpinLockHolder&) = delete; SpinLockHolder& operator=(const SpinLockHolder&) = delete; private: - SpinLock* lock_; + SpinLock& lock_; }; // Register a hook for profiling support. @@ -243,7 +283,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, if ((lock_value & kSpinLockCooperative) == 0) { // For non-cooperative locks we must make sure we mark ourselves as // non-reschedulable before we attempt to CompareAndSwap. - if (base_internal::SchedulingGuard::DisableRescheduling()) { + if (SchedulingGuard::DisableRescheduling()) { sched_disabled_bit = kSpinLockDisabledScheduling; } } @@ -252,7 +292,7 @@ inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, lock_value, kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, std::memory_order_acquire, std::memory_order_relaxed)) { - base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); } return lock_value; diff --git a/absl/base/internal/spinlock_benchmark.cc b/absl/base/internal/spinlock_benchmark.cc index 1790d9678e5..c79f49fefb0 100644 --- a/absl/base/internal/spinlock_benchmark.cc +++ b/absl/base/internal/spinlock_benchmark.cc @@ -35,7 +35,7 @@ static void BM_TryLock(benchmark::State& state) { static absl::NoDestructor spinlock( scheduling_mode); for (auto _ : state) { - if (spinlock->TryLock()) spinlock->Unlock(); + if (spinlock->try_lock()) spinlock->unlock(); } } @@ -50,7 +50,7 @@ static void BM_SpinLock(benchmark::State& state) { static absl::NoDestructor spinlock( scheduling_mode); for (auto _ : state) { - absl::base_internal::SpinLockHolder holder(spinlock.get()); + absl::base_internal::SpinLockHolder holder(*spinlock.get()); } } diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc index 1937db30796..a62dd31c22e 100644 --- a/absl/base/internal/sysinfo.cc +++ b/absl/base/internal/sysinfo.cc @@ -456,15 +456,6 @@ pid_t GetTID() { return getthrid(); } pid_t GetTID() { return static_cast(_lwp_self()); } -#elif defined(__native_client__) - -pid_t GetTID() { - auto* thread = pthread_self(); - static_assert(sizeof(pid_t) == sizeof(thread), - "In NaCL int expected to be the same size as a pointer"); - return reinterpret_cast(thread); -} - #elif defined(__Fuchsia__) pid_t GetTID() { diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc index c2b59aa6fa4..b4c75f50ee9 100644 --- a/absl/base/internal/sysinfo_test.cc +++ b/absl/base/internal/sysinfo_test.cc @@ -39,12 +39,6 @@ TEST(SysinfoTest, NumCPUs) { TEST(SysinfoTest, GetTID) { EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. -#ifdef __native_client__ - // Native Client has a race condition bug that leads to memory - // exhaustion when repeatedly creating and joining threads. - // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 - return; -#endif // Test that TIDs are unique to each thread. // Uses a few loops to exercise implementations that reallocate IDs. for (int i = 0; i < 10; ++i) { @@ -59,7 +53,7 @@ TEST(SysinfoTest, GetTID) { threads.push_back(std::thread([&]() { pid_t id = GetTID(); { - MutexLock lock(&mutex); + MutexLock lock(mutex); ASSERT_TRUE(tids.find(id) == tids.end()); tids.insert(id); } diff --git a/absl/base/internal/thread_identity_test.cc b/absl/base/internal/thread_identity_test.cc index 5f17553e648..3ef2ffe540e 100644 --- a/absl/base/internal/thread_identity_test.cc +++ b/absl/base/internal/thread_identity_test.cc @@ -31,7 +31,7 @@ namespace base_internal { namespace { ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock( - absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + base_internal::SCHEDULE_KERNEL_ONLY); ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock); static const void* const kCheckNoIdentity = reinterpret_cast(1); @@ -58,7 +58,7 @@ static void TestThreadIdentityCurrent(const void* assert_no_identity) { PerThreadSynch::kAlignment); EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); - absl::base_internal::SpinLockHolder l(&map_lock); + absl::base_internal::SpinLockHolder l(map_lock); num_identities_reused++; } @@ -90,7 +90,7 @@ TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { // We should have recycled ThreadIdentity objects above; while (external) // library threads allocating their own identities may preclude some // reuse, we should have sufficient repetitions to exclude this. - absl::base_internal::SpinLockHolder l(&map_lock); + absl::base_internal::SpinLockHolder l(map_lock); EXPECT_LT(kNumThreads, num_identities_reused); } @@ -112,7 +112,7 @@ TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { threads.push_back(std::thread([&]() { for (int l = 0; l < kNumLockLoops; ++l) { for (int m = 0; m < kNumMutexes; ++m) { - MutexLock lock(&mutexes[m]); + MutexLock lock(mutexes[m]); } } })); diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc index 68f92730a8a..dca7cbaa4c0 100644 --- a/absl/base/internal/unscaledcycleclock.cc +++ b/absl/base/internal/unscaledcycleclock.cc @@ -85,6 +85,10 @@ int64_t UnscaledCycleClock::Now() { double UnscaledCycleClock::Frequency() { #ifdef __GLIBC__ return __ppc_get_timebase_freq(); +#elif defined(__linux__) + // Fallback for musl + ppc64le: use constant timebase frequency (512 MHz) + // Must come after __GLIBC__. + return static_cast(512000000); #elif defined(_AIX) // This is the same constant value as returned by // __ppc_get_timebase_freq(). diff --git a/absl/base/internal/unscaledcycleclock_config.h b/absl/base/internal/unscaledcycleclock_config.h index 43a3dabeeab..9a0841dfa79 100644 --- a/absl/base/internal/unscaledcycleclock_config.h +++ b/absl/base/internal/unscaledcycleclock_config.h @@ -34,7 +34,7 @@ // CycleClock that runs at atleast 1 MHz. We've found some Android // ARM64 devices where this is not the case, so we disable it by // default on Android ARM64. -#if defined(__native_client__) || (defined(__APPLE__)) || \ +#if defined(__APPLE__) || \ (defined(__ANDROID__) && defined(__aarch64__)) #define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 #else diff --git a/absl/base/nullability.h b/absl/base/nullability.h index 3a5d6e83e20..2796a36125b 100644 --- a/absl/base/nullability.h +++ b/absl/base/nullability.h @@ -184,7 +184,6 @@ #define ABSL_BASE_NULLABILITY_H_ #include "absl/base/config.h" -#include "absl/base/internal/nullability_deprecated.h" // ABSL_POINTERS_DEFAULT_NONNULL // diff --git a/absl/base/nullability_test.cc b/absl/base/nullability_test.cc index bccc388beb9..bccf1af45fe 100644 --- a/absl/base/nullability_test.cc +++ b/absl/base/nullability_test.cc @@ -14,16 +14,13 @@ #include "absl/base/nullability.h" -#include #include #include #include #include "gtest/gtest.h" -#include "absl/base/attributes.h" namespace { -namespace macro_annotations { void funcWithNonnullArg(int* absl_nonnull /*arg*/) {} template void funcWithDeducedNonnullArg(T* absl_nonnull /*arg*/) {} @@ -90,117 +87,4 @@ TEST(PassThroughTest, PassesThroughPointerToMemberFunction) { EXPECT_TRUE((std::is_same::value)); EXPECT_TRUE((std::is_same::value)); } -} // namespace macro_annotations - -// Allow testing of the deprecated type alias annotations. -ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING - -using ::absl::Nonnull; -using ::absl::NullabilityUnknown; -using ::absl::Nullable; -namespace type_alias_annotations { - -void funcWithNonnullArg(Nonnull /*arg*/) {} -template -void funcWithDeducedNonnullArg(Nonnull /*arg*/) {} - -TEST(NonnullTest, NonnullArgument) { - int var = 0; - funcWithNonnullArg(&var); - funcWithDeducedNonnullArg(&var); -} - -Nonnull funcWithNonnullReturn() { - static int var = 0; - return &var; -} - -TEST(NonnullTest, NonnullReturn) { - auto var = funcWithNonnullReturn(); - (void)var; -} - -TEST(PassThroughTest, PassesThroughRawPointerToInt) { - EXPECT_TRUE((std::is_same, int*>::value)); - EXPECT_TRUE((std::is_same, int*>::value)); - EXPECT_TRUE((std::is_same, int*>::value)); -} - -TEST(PassThroughTest, PassesThroughRawPointerToVoid) { - EXPECT_TRUE((std::is_same, void*>::value)); - EXPECT_TRUE((std::is_same, void*>::value)); - EXPECT_TRUE((std::is_same, void*>::value)); -} - -TEST(PassThroughTest, PassesThroughUniquePointerToInt) { - using T = std::unique_ptr; - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); -} - -TEST(PassThroughTest, PassesThroughSharedPointerToInt) { - using T = std::shared_ptr; - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); -} - -TEST(PassThroughTest, PassesThroughSharedPointerToVoid) { - using T = std::shared_ptr; - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); -} - -TEST(PassThroughTest, PassesThroughPointerToMemberObject) { - using T = decltype(&std::pair::first); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); -} - -TEST(PassThroughTest, PassesThroughPointerToMemberFunction) { - using T = decltype(&std::unique_ptr::reset); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); - EXPECT_TRUE((std::is_same, T>::value)); -} - -} // namespace type_alias_annotations -} // namespace - -// Nullable ADL lookup test -namespace util { -// Helper for NullableAdlTest. Returns true, denoting that argument-dependent -// lookup found this implementation of DidAdlWin. Must be in namespace -// util itself, not a nested anonymous namespace. -template -bool DidAdlWin(T*) { - return true; -} - -// Because this type is defined in namespace util, an unqualified call to -// DidAdlWin with a pointer to MakeAdlWin will find the above implementation. -struct MakeAdlWin {}; -} // namespace util - -namespace { -// Returns false, denoting that ADL did not inspect namespace util. If it -// had, the better match (T*) above would have won out over the (...) here. -bool DidAdlWin(...) { return false; } - -TEST(NullableAdlTest, NullableAddsNothingToArgumentDependentLookup) { - // Treatment: util::Nullable contributes nothing to ADL because - // int* itself doesn't. - EXPECT_FALSE(DidAdlWin((int*)nullptr)); - EXPECT_FALSE(DidAdlWin((Nullable)nullptr)); - - // Control: Argument-dependent lookup does find the implementation in - // namespace util when the underlying pointee type resides there. - EXPECT_TRUE(DidAdlWin((util::MakeAdlWin*)nullptr)); - EXPECT_TRUE(DidAdlWin((Nullable)nullptr)); -} - -ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING } // namespace diff --git a/absl/base/optimization.h b/absl/base/optimization.h index 429ea9ce79d..04678c4938b 100644 --- a/absl/base/optimization.h +++ b/absl/base/optimization.h @@ -53,9 +53,7 @@ // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); // return result; // } -#if defined(__pnacl__) -#define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } -#elif defined(__clang__) +#if defined(__clang__) // Clang will not tail call given inline volatile assembly. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") #elif defined(__GNUC__) diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc index e9047158cbf..9c2feb3316c 100644 --- a/absl/base/spinlock_test_common.cc +++ b/absl/base/spinlock_test_common.cc @@ -18,6 +18,7 @@ #include #include +#include // NOLINT(build/c++11) #include #include // NOLINT(build/c++11) #include @@ -60,30 +61,47 @@ namespace { static constexpr size_t kArrayLength = 10; static uint32_t values[kArrayLength]; -ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( - absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); +ABSL_CONST_INIT static SpinLock static_cooperative_spinlock; ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock( - absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + base_internal::SCHEDULE_KERNEL_ONLY); // Simple integer hash function based on the public domain lookup2 hash. // http://burtleburtle.net/bob/c/lookup2.c static uint32_t Hash32(uint32_t a, uint32_t c) { uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value. - a -= b; a -= c; a ^= (c >> 13); - b -= c; b -= a; b ^= (a << 8); - c -= a; c -= b; c ^= (b >> 13); - a -= b; a -= c; a ^= (c >> 12); - b -= c; b -= a; b ^= (a << 16); - c -= a; c -= b; c ^= (b >> 5); - a -= b; a -= c; a ^= (c >> 3); - b -= c; b -= a; b ^= (a << 10); - c -= a; c -= b; c ^= (b >> 15); + a -= b; + a -= c; + a ^= (c >> 13); + b -= c; + b -= a; + b ^= (a << 8); + c -= a; + c -= b; + c ^= (b >> 13); + a -= b; + a -= c; + a ^= (c >> 12); + b -= c; + b -= a; + b ^= (a << 16); + c -= a; + c -= b; + c ^= (b >> 5); + a -= b; + a -= c; + a ^= (c >> 3); + b -= c; + b -= a; + b ^= (a << 10); + c -= a; + c -= b; + c ^= (b >> 15); return c; } static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) { for (int i = 0; i < kIters; i++) { - SpinLockHolder h(spinlock); + SpinLockHolder h(*spinlock); for (size_t j = 0; j < kArrayLength; j++) { const size_t index = (j + thread_salt) % kArrayLength; values[index] = Hash32(values[index], thread_salt); @@ -102,7 +120,7 @@ static void ThreadedTest(SpinLock* spinlock) { thread.join(); } - SpinLockHolder h(spinlock); + SpinLockHolder h(*spinlock); for (size_t i = 1; i < kArrayLength; i++) { EXPECT_EQ(values[0], values[i]); } @@ -114,15 +132,13 @@ static_assert(std::is_trivially_destructible(), ""); TEST(SpinLock, StackNonCooperativeDisablesScheduling) { SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); - spinlock.Lock(); + SpinLockHolder l(spinlock); EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); - spinlock.Unlock(); } TEST(SpinLock, StaticNonCooperativeDisablesScheduling) { - static_noncooperative_spinlock.Lock(); + SpinLockHolder l(static_noncooperative_spinlock); EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); - static_noncooperative_spinlock.Unlock(); } TEST(SpinLock, WaitCyclesEncoding) { @@ -134,7 +150,7 @@ TEST(SpinLock, WaitCyclesEncoding) { // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping // but the lower kProfileTimestampShift will be dropped. const int kMaxCyclesShift = - 32 - kLockwordReservedShift + kProfileTimestampShift; + 32 - kLockwordReservedShift + kProfileTimestampShift; const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; // These bits should be zero after encoding. @@ -171,22 +187,22 @@ TEST(SpinLock, WaitCyclesEncoding) { SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask)); // Check that we cannot produce kSpinLockSleeper during encoding. - int64_t sleeper_cycles = - kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift); + int64_t sleeper_cycles = kSpinLockSleeper + << (kProfileTimestampShift - kLockwordReservedShift); uint32_t sleeper_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles); EXPECT_NE(sleeper_value, kSpinLockSleeper); // Test clamping uint32_t max_value = - SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); + SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; EXPECT_EQ(expected_max_value_decoded, max_value_decoded); const int64_t step = (1 << kProfileTimestampShift); - uint32_t after_max_value = - SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); + uint32_t after_max_value = SpinLockTest::EncodeWaitCycles( + start_time, start_time + kMaxCycles + step); int64_t after_max_value_decoded = SpinLockTest::DecodeWaitCycles(after_max_value); EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); @@ -204,7 +220,7 @@ TEST(SpinLockWithThreads, StackSpinLock) { } TEST(SpinLockWithThreads, StackCooperativeSpinLock) { - SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock spinlock; ThreadedTest(&spinlock); } @@ -227,12 +243,12 @@ TEST(SpinLockWithThreads, DoesNotDeadlock) { BlockingCounter* b) { locked->WaitForNotification(); // Wait for LockThenWait() to hold "s". b->DecrementCount(); - SpinLockHolder l(spinlock); + SpinLockHolder l(*spinlock); } static void LockThenWait(Notification* locked, SpinLock* spinlock, BlockingCounter* b) { - SpinLockHolder l(spinlock); + SpinLockHolder l(*spinlock); locked->Notify(); b->Wait(); } @@ -255,8 +271,7 @@ TEST(SpinLockWithThreads, DoesNotDeadlock) { } }; - SpinLock stack_cooperative_spinlock( - base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock stack_cooperative_spinlock; SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY); Helper::DeadlockTest(&stack_cooperative_spinlock, base_internal::NumCPUs() * 2); @@ -272,13 +287,18 @@ TEST(SpinLockTest, IsCooperative) { SpinLock default_constructor; EXPECT_TRUE(SpinLockTest::IsCooperative(default_constructor)); - SpinLock cooperative(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); + SpinLock cooperative; EXPECT_TRUE(SpinLockTest::IsCooperative(cooperative)); SpinLock kernel_only(base_internal::SCHEDULE_KERNEL_ONLY); EXPECT_FALSE(SpinLockTest::IsCooperative(kernel_only)); } +TEST(SpinLockTest, ScopedLock) { + SpinLock s; + std::scoped_lock l(s); +} + } // namespace } // namespace base_internal ABSL_NAMESPACE_END diff --git a/absl/cleanup/BUILD.bazel b/absl/cleanup/BUILD.bazel index d5797816fdc..5475439250c 100644 --- a/absl/cleanup/BUILD.bazel +++ b/absl/cleanup/BUILD.bazel @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 61e816fc83a..68a90087918 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -410,6 +413,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", + "//absl/hash", "//absl/memory", "//absl/meta:type_traits", "//absl/utility", @@ -530,6 +534,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":common_policy_traits", + ":container_memory", "//absl/meta:type_traits", ], ) @@ -697,6 +702,19 @@ cc_library( ], ) +cc_test( + name = "hashtable_control_bytes_test", + srcs = ["internal/hashtable_control_bytes_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":hashtable_control_bytes", + "//absl/base:config", + "@googletest//:gtest", + "@googletest//:gtest_main", + ], +) + cc_library( name = "raw_hash_set_resize_impl", hdrs = ["internal/raw_hash_set_resize_impl.h"], @@ -735,6 +753,7 @@ cc_library( ":hashtable_debug_hooks", ":hashtablez_sampler", ":raw_hash_set_resize_impl", + "//absl/base", "//absl/base:config", "//absl/base:core_headers", "//absl/base:dynamic_annotations", diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index d8cd7d08984..6adba18c383 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -469,6 +469,7 @@ absl_cc_library( ${ABSL_DEFAULT_COPTS} DEPS absl::config + absl::hash absl::memory absl::type_traits absl::utility @@ -583,6 +584,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::container_memory absl::common_policy_traits absl::meta PUBLIC @@ -761,6 +763,19 @@ absl_cc_library( absl::endian ) +absl_cc_test( + NAME + hashtable_control_bytes_test + SRCS + "internal/hashtable_control_bytes_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::config + absl::hashtable_control_bytes + GTest::gmock_main +) + # Internal-only target, do not depend on directly. absl_cc_library( NAME @@ -772,6 +787,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::base absl::bits absl::common_policy_traits absl::compressed_tuple diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h index 32a82ef062c..131f622fef5 100644 --- a/absl/container/btree_map.h +++ b/absl/container/btree_map.h @@ -117,8 +117,8 @@ class ABSL_ATTRIBUTE_OWNER btree_map // // * Copy assignment operator // - // absl::btree_map map4; - // map4 = map3; + // absl::btree_map map4; + // map4 = map3; // // * Move constructor // @@ -555,8 +555,8 @@ class ABSL_ATTRIBUTE_OWNER btree_multimap // // * Copy assignment operator // - // absl::btree_multimap map4; - // map4 = map3; + // absl::btree_multimap map4; + // map4 = map3; // // * Move constructor // diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h index 16181de577f..44a39cf8071 100644 --- a/absl/container/btree_set.h +++ b/absl/container/btree_set.h @@ -119,8 +119,8 @@ class ABSL_ATTRIBUTE_OWNER btree_set // // * Copy assignment operator // - // absl::btree_set set4; - // set4 = set3; + // absl::btree_set set4; + // set4 = set3; // // * Move constructor // @@ -475,8 +475,8 @@ class ABSL_ATTRIBUTE_OWNER btree_multiset // // * Copy assignment operator // - // absl::btree_multiset set4; - // set4 = set3; + // absl::btree_multiset set4; + // set4 = set3; // // * Move constructor // diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h index 6c238fc381f..b08735f4619 100644 --- a/absl/container/fixed_array.h +++ b/absl/container/fixed_array.h @@ -392,8 +392,7 @@ class ABSL_ATTRIBUTE_WARN_UNUSED FixedArray { template friend H AbslHashValue(H h, const FixedArray& v) { - return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), - hash_internal::WeaklyMixedInteger{v.size()}); + return H::combine_contiguous(std::move(h), v.data(), v.size()); } private: diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h index bc86ced9971..a1f4f24aae0 100644 --- a/absl/container/flat_hash_map.h +++ b/absl/container/flat_hash_map.h @@ -115,18 +115,18 @@ struct FlatHashMapPolicy; // absl::flat_hash_map ducks = // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; // -// // Insert a new element into the flat hash map -// ducks.insert({"d", "donald"}); +// // Insert a new element into the flat hash map +// ducks.insert({"d", "donald"}); // -// // Force a rehash of the flat hash map -// ducks.rehash(0); +// // Force a rehash of the flat hash map +// ducks.rehash(0); // -// // Find the element with the key "b" -// std::string search_key = "b"; -// auto result = ducks.find(search_key); -// if (result != ducks.end()) { -// std::cout << "Result: " << result->second << std::endl; -// } +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } template , class Eq = DefaultHashContainerEq, class Allocator = std::allocator>> @@ -158,9 +158,9 @@ class ABSL_ATTRIBUTE_OWNER flat_hash_map // // * Copy assignment operator // - // // Hash functor and Comparator are copied as well - // absl::flat_hash_map map4; - // map4 = map3; + // // Hash functor and Comparator are copied as well + // absl::flat_hash_map map4; + // map4 = map3; // // * Move constructor // @@ -660,10 +660,10 @@ struct FlatHashMapPolicy { std::forward(args)...); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return memory_internal::IsLayoutCompatible::value - ? &TypeErasedApplyToSlotFn + ? &TypeErasedApplyToSlotFn : nullptr; } diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc index 5c83c94136f..e1d9382a231 100644 --- a/absl/container/flat_hash_map_test.cc +++ b/absl/container/flat_hash_map_test.cc @@ -116,15 +116,6 @@ TEST(FlatHashMap, StandardLayout) { TEST(FlatHashMap, Relocatability) { static_assert(absl::is_trivially_relocatable::value); -#if ABSL_INTERNAL_CPLUSPLUS_LANG <= 202002L - // std::pair is not trivially copyable in C++23 in some standard - // library versions. - // See https://github.com/llvm/llvm-project/pull/95444 for instance. - // container_memory.h contains a workaround so what really matters - // is the transfer test below. - static_assert( - absl::is_trivially_relocatable>::value); -#endif static_assert( std::is_same::transfer>(nullptr, diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h index bf63eb59867..2d255529ba1 100644 --- a/absl/container/flat_hash_set.h +++ b/absl/container/flat_hash_set.h @@ -114,16 +114,16 @@ struct FlatHashSetPolicy; // absl::flat_hash_set ducks = // {"huey", "dewey", "louie"}; // -// // Insert a new element into the flat hash set -// ducks.insert("donald"); +// // Insert a new element into the flat hash set +// ducks.insert("donald"); // -// // Force a rehash of the flat hash set -// ducks.rehash(0); +// // Force a rehash of the flat hash set +// ducks.rehash(0); // -// // See if "dewey" is present -// if (ducks.contains("dewey")) { -// std::cout << "We found dewey!" << std::endl; -// } +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } template , class Eq = DefaultHashContainerEq, class Allocator = std::allocator> @@ -154,9 +154,9 @@ class ABSL_ATTRIBUTE_OWNER flat_hash_set // // * Copy assignment operator // - // // Hash functor and Comparator are copied as well - // absl::flat_hash_set set4; - // set4 = set3; + // // Hash functor and Comparator are copied as well + // absl::flat_hash_set set4; + // set4 = set3; // // * Move constructor // @@ -558,9 +558,9 @@ struct FlatHashSetPolicy { static size_t space_used(const T*) { return 0; } - template + template static constexpr HashSlotFn get_hash_slot_fn() { - return &TypeErasedApplyToSlotFn; + return &TypeErasedApplyToSlotFn; } }; } // namespace container_internal diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc index bb90efa2a09..ca069b402ad 100644 --- a/absl/container/flat_hash_set_test.cc +++ b/absl/container/flat_hash_set_test.cc @@ -383,6 +383,20 @@ TEST(FlatHashSet, MoveOnlyKey) { EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3)); } +TEST(FlatHashSet, IsDefaultHash) { + using absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; + EXPECT_EQ(HashtableDebugAccess>::kIsDefaultHash, true); + EXPECT_EQ(HashtableDebugAccess>::kIsDefaultHash, + true); + + struct Hash { + size_t operator()(size_t i) const { return i; } + }; + EXPECT_EQ((HashtableDebugAccess>::kIsDefaultHash), + false); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h index f871b349134..6b05d92c56f 100644 --- a/absl/container/inlined_vector.h +++ b/absl/container/inlined_vector.h @@ -815,13 +815,11 @@ class ABSL_ATTRIBUTE_WARN_UNUSED InlinedVector { // `InlinedVector::clear()` // // Destroys all elements in the inlined vector, setting the size to `0` and - // deallocating any held memory. + // preserving capacity. void clear() noexcept { inlined_vector_internal::DestroyAdapter::DestroyElements( storage_.GetAllocator(), data(), size()); - storage_.DeallocateIfAllocated(); - - storage_.SetInlinedSize(0); + storage_.SetSize(0); } // `InlinedVector::reserve(...)` @@ -1008,9 +1006,17 @@ bool operator>=(const absl::InlinedVector& a, // call this directly. template H AbslHashValue(H h, const absl::InlinedVector& a) { - auto size = a.size(); - return H::combine(H::combine_contiguous(std::move(h), a.data(), size), - hash_internal::WeaklyMixedInteger{size}); + return H::combine_contiguous(std::move(h), a.data(), a.size()); +} + +template +constexpr typename InlinedVector::size_type erase_if( + InlinedVector& v, Predicate pred) { + const auto it = std::remove_if(v.begin(), v.end(), std::move(pred)); + const auto removed = static_cast::size_type>( + std::distance(it, v.end())); + v.erase(it, v.end()); + return removed; } ABSL_NAMESPACE_END diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc index ff0e77b50b1..1e3ff823f1a 100644 --- a/absl/container/inlined_vector_test.cc +++ b/absl/container/inlined_vector_test.cc @@ -51,6 +51,7 @@ using testing::AllOf; using testing::Each; using testing::ElementsAre; using testing::ElementsAreArray; +using testing::IsEmpty; using testing::Eq; using testing::Gt; using testing::Pointee; @@ -919,7 +920,9 @@ TEST(IntVec, Clear) { SCOPED_TRACE(len); IntVec v; Fill(&v, len); + size_t capacity_before_clear = v.capacity(); v.clear(); + EXPECT_EQ(v.capacity(), capacity_before_clear); EXPECT_EQ(0u, v.size()); EXPECT_EQ(v.begin(), v.end()); } @@ -2254,4 +2257,22 @@ TEST(StorageTest, InlinedCapacityAutoIncrease) { sizeof(MySpan) / sizeof(int)); } +TEST(IntVec, EraseIf) { + IntVec v = {3, 1, 2, 0}; + EXPECT_EQ(absl::erase_if(v, [](int i) { return i > 1; }), 2u); + EXPECT_THAT(v, ElementsAre(1, 0)); +} + +TEST(IntVec, EraseIfMatchesNone) { + IntVec v = {1, 2, 3}; + EXPECT_EQ(absl::erase_if(v, [](int i) { return i > 10; }), 0u);; + EXPECT_THAT(v, ElementsAre(1, 2, 3)); +} + +TEST(IntVec, EraseIfMatchesAll) { + IntVec v = {1, 2, 3}; + EXPECT_EQ(absl::erase_if(v, [](int i) { return i > 0; }), 3u); + EXPECT_THAT(v, IsEmpty()); +} + } // anonymous namespace diff --git a/absl/container/internal/compressed_tuple.h b/absl/container/internal/compressed_tuple.h index 6db0468d99a..2dd8d6c323c 100644 --- a/absl/container/internal/compressed_tuple.h +++ b/absl/container/internal/compressed_tuple.h @@ -64,24 +64,24 @@ struct Elem, I> template using ElemT = typename Elem::type; -// We can't use EBCO on other CompressedTuples because that would mean that we -// derive from multiple Storage<> instantiations with the same I parameter, -// and potentially from multiple identical Storage<> instantiations. So anytime -// we use type inheritance rather than encapsulation, we mark -// CompressedTupleImpl, to make this easy to detect. -struct uses_inheritance {}; template constexpr bool ShouldUseBase() { return std::is_class::value && std::is_empty::value && - !std::is_final::value && - !std::is_base_of::value; + !std::is_final::value; } +// Tag type used to disambiguate Storage types for different CompresseedTuples. +// Without it, CompressedTuple> would inherit from +// Storage twice. +template +struct StorageTag; + // The storage class provides two specializations: // - For empty classes, it stores T as a base class. // - For everything else, it stores T as a member. -template ()> +// Tag should be set to StorageTag. +template ()> struct Storage { T value; constexpr Storage() = default; @@ -94,8 +94,8 @@ struct Storage { constexpr T&& get() && { return std::move(*this).value; } }; -template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { +template +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { constexpr Storage() = default; template @@ -111,30 +111,35 @@ template struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< - CompressedTuple, absl::index_sequence, ShouldAnyUseBase> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + CompressedTupleImpl, absl::index_sequence, + ShouldAnyUseBase> // We use the dummy identity function through std::integral_constant to // convince MSVC of accepting and expanding I in that context. Without it // you would get: // error C3548: 'I': parameter pack cannot be used in this context - : uses_inheritance, - Storage::value>... { + : Storage::value, + StorageTag>... { constexpr CompressedTupleImpl() = default; template explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage(absl::in_place, std::forward(args))... {} + : Storage>(absl::in_place, + std::forward(args))... {} friend CompressedTuple; }; template -struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< - CompressedTuple, absl::index_sequence, false> +struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC + CompressedTupleImpl, absl::index_sequence, + false> // We use the dummy identity function as above... - : Storage::value, false>... { + : Storage::value, StorageTag, + false>... { constexpr CompressedTupleImpl() = default; template explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) - : Storage(absl::in_place, std::forward(args))... {} + : Storage, false>(absl::in_place, + std::forward(args))... {} friend CompressedTuple; }; @@ -183,9 +188,7 @@ struct TupleItemsMoveConstructible // Helper class to perform the Empty Base Class Optimization. // Ts can contain classes and non-classes, empty or not. For the ones that // are empty classes, we perform the CompressedTuple. If all types in Ts are -// empty classes, then CompressedTuple is itself an empty class. (This -// does not apply when one or more of those empty classes is itself an empty -// CompressedTuple.) +// empty classes, then CompressedTuple is itself an empty class. // // To access the members, use member .get() function. // @@ -208,7 +211,8 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple using ElemT = internal_compressed_tuple::ElemT; template - using StorageT = internal_compressed_tuple::Storage, I>; + using StorageT = internal_compressed_tuple::Storage< + ElemT, I, internal_compressed_tuple::StorageTag>; public: // There seems to be a bug in MSVC dealing in which using '=default' here will diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc index 01b334e1fac..662f944640b 100644 --- a/absl/container/internal/compressed_tuple_test.cc +++ b/absl/container/internal/compressed_tuple_test.cc @@ -452,14 +452,15 @@ TEST(CompressedTupleTest, EmptyFinalClass) { } #endif -// TODO(b/214288561): enable this test. -TEST(CompressedTupleTest, DISABLED_NestedEbo) { +TEST(CompressedTupleTest, NestedEbo) { struct Empty1 {}; struct Empty2 {}; CompressedTuple, int> x; CompressedTuple y; - // Currently fails with sizeof(x) == 8, sizeof(y) == 4. EXPECT_EQ(sizeof(x), sizeof(y)); + + using NestedEmpty = CompressedTuple>; + EXPECT_TRUE(std::is_empty_v); } } // namespace diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h index e7ac1dba43b..8c974698b11 100644 --- a/absl/container/internal/container_memory.h +++ b/absl/container/internal/container_memory.h @@ -26,6 +26,7 @@ #include #include "absl/base/config.h" +#include "absl/hash/hash.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" @@ -464,24 +465,87 @@ struct map_slot_policy { } }; +// Suppress erroneous uninitialized memory errors on GCC. For example, GCC +// thinks that the call to slot_array() in find_or_prepare_insert() is reading +// uninitialized memory, but slot_array is only called there when the table is +// non-empty and this memory is initialized when the table is non-empty. +#if !defined(__clang__) && defined(__GNUC__) +#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \ + _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \ + _Pragma("GCC diagnostic pop") +#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \ + ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x) +#else +#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x +#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x +#endif + +// Variadic arguments hash function that ignore the rest of the arguments. +// Useful for usage with policy traits. +template +struct HashElement { + HashElement(const Hash& h, size_t s) : hash(h), seed(s) {} + + template + size_t operator()(const K& key, Args&&...) const { + if constexpr (kIsDefault) { + // TODO(b/384509507): resolve `no header providing + // "absl::hash_internal::SupportsHashWithSeed" is directly included`. + // Maybe we should make "internal/hash.h" be a separate library. + return absl::hash_internal::HashWithSeed().hash(hash, key, seed); + } + // NOLINTNEXTLINE(clang-diagnostic-sign-conversion) + return hash(key) ^ seed; + } + const Hash& hash; + size_t seed; +}; + +// No arguments function hash function for a specific key. +template +struct HashKey { + HashKey(const Hash& h, const Key& k) : hash(h), key(k) {} + + size_t operator()(size_t seed) const { + return HashElement{hash, seed}(key); + } + const Hash& hash; + const Key& key; +}; + +// Variadic arguments equality function that ignore the rest of the arguments. +// Useful for usage with policy traits. +template +struct EqualElement { + template + bool operator()(const K2& lhs, Args&&...) const { + ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(eq(lhs, rhs)); + } + const K1& rhs; + const KeyEqual& eq; +}; + // Type erased function for computing hash of the slot. -using HashSlotFn = size_t (*)(const void* hash_fn, void* slot); +using HashSlotFn = size_t (*)(const void* hash_fn, void* slot, size_t seed); // Type erased function to apply `Fn` to data inside of the `slot`. // The data is expected to have type `T`. -template -size_t TypeErasedApplyToSlotFn(const void* fn, void* slot) { +template +size_t TypeErasedApplyToSlotFn(const void* fn, void* slot, size_t seed) { const auto* f = static_cast(fn); - return (*f)(*static_cast(slot)); + return HashElement{*f, seed}(*static_cast(slot)); } // Type erased function to apply `Fn` to data inside of the `*slot_ptr`. // The data is expected to have type `T`. -template -size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr) { +template +size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr, + size_t seed) { const auto* f = static_cast(fn); const T* slot = *static_cast(slot_ptr); - return (*f)(*slot); + return HashElement{*f, seed}(*slot); } } // namespace container_internal diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc index 7e4357d5ed5..97b09f758e7 100644 --- a/absl/container/internal/container_memory_test.cc +++ b/absl/container/internal/container_memory_test.cc @@ -300,16 +300,46 @@ TEST(MapSlotPolicy, DestroyReturnsTrue) { TEST(ApplyTest, TypeErasedApplyToSlotFn) { size_t x = 7; + size_t seed = 100; auto fn = [](size_t v) { return v * 2; }; - EXPECT_EQ((TypeErasedApplyToSlotFn(&fn, &x)), 14); + EXPECT_EQ( + (TypeErasedApplyToSlotFn( + &fn, &x, seed)), + (HashElement(fn, seed)(x))); } TEST(ApplyTest, TypeErasedDerefAndApplyToSlotFn) { size_t x = 7; + size_t seed = 100; auto fn = [](size_t v) { return v * 2; }; size_t* x_ptr = &x; + EXPECT_EQ((TypeErasedDerefAndApplyToSlotFn(&fn, &x_ptr, + seed)), + (HashElement(fn, seed)(x))); +} + +TEST(HashElement, DefaultHash) { + size_t x = 7; + size_t seed = 100; + struct HashWithSeed { + size_t operator()(size_t v) const { return v * 2; } + size_t hash_with_seed(size_t v, size_t seed) const { + return v * 2 + seed * 3; + } + } hash; + EXPECT_EQ((HashElement(hash, seed)(x)), + hash.hash_with_seed(x, seed)); +} + +TEST(HashElement, NonDefaultHash) { + size_t x = 7; + size_t seed = 100; + auto fn = [](size_t v) { return v * 2; }; EXPECT_EQ( - (TypeErasedDerefAndApplyToSlotFn(&fn, &x_ptr)), 14); + (HashElement( + fn, seed)(x)), + fn(x) ^ seed); } } // namespace diff --git a/absl/container/internal/hash_function_defaults.h b/absl/container/internal/hash_function_defaults.h index 0f07bcfe294..eefecabceb7 100644 --- a/absl/container/internal/hash_function_defaults.h +++ b/absl/container/internal/hash_function_defaults.h @@ -49,6 +49,7 @@ #include #include #include +#include #include #include "absl/base/config.h" @@ -58,10 +59,6 @@ #include "absl/strings/cord.h" #include "absl/strings/string_view.h" -#ifdef ABSL_HAVE_STD_STRING_VIEW -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -82,6 +79,18 @@ struct StringHash { size_t operator()(const absl::Cord& v) const { return absl::Hash{}(v); } + + private: + friend struct absl::hash_internal::HashWithSeed; + + size_t hash_with_seed(absl::string_view v, size_t seed) const { + return absl::hash_internal::HashWithSeed().hash( + absl::Hash{}, v, seed); + } + size_t hash_with_seed(const absl::Cord& v, size_t seed) const { + return absl::hash_internal::HashWithSeed().hash(absl::Hash{}, v, + seed); + } }; struct StringEq { @@ -113,8 +122,6 @@ struct HashEq : StringHashEq {}; template <> struct HashEq : StringHashEq {}; -#ifdef ABSL_HAVE_STD_STRING_VIEW - template struct BasicStringHash { using is_transparent = void; @@ -153,8 +160,6 @@ struct HashEq : BasicStringHashEq {}; template <> struct HashEq : BasicStringHashEq {}; -#endif // ABSL_HAVE_STD_STRING_VIEW - // Supports heterogeneous lookup for pointers and smart pointers. template struct HashEq { diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc index 912d1190a6b..9a39b0719cd 100644 --- a/absl/container/internal/hash_function_defaults_test.cc +++ b/absl/container/internal/hash_function_defaults_test.cc @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -28,10 +29,6 @@ #include "absl/strings/cord_test_helpers.h" #include "absl/strings/string_view.h" -#ifdef ABSL_HAVE_STD_STRING_VIEW -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { @@ -118,9 +115,6 @@ TYPED_TEST(HashString, Works) { } TEST(BasicStringViewTest, WStringEqWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_eq eq; EXPECT_TRUE(eq(L"a", L"a")); EXPECT_TRUE(eq(L"a", std::wstring_view(L"a"))); @@ -128,13 +122,9 @@ TEST(BasicStringViewTest, WStringEqWorks) { EXPECT_FALSE(eq(L"a", L"b")); EXPECT_FALSE(eq(L"a", std::wstring_view(L"b"))); EXPECT_FALSE(eq(L"a", std::wstring(L"b"))); -#endif } TEST(BasicStringViewTest, WStringViewEqWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_eq eq; EXPECT_TRUE(eq(L"a", L"a")); EXPECT_TRUE(eq(L"a", std::wstring_view(L"a"))); @@ -142,13 +132,9 @@ TEST(BasicStringViewTest, WStringViewEqWorks) { EXPECT_FALSE(eq(L"a", L"b")); EXPECT_FALSE(eq(L"a", std::wstring_view(L"b"))); EXPECT_FALSE(eq(L"a", std::wstring(L"b"))); -#endif } TEST(BasicStringViewTest, U16StringEqWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_eq eq; EXPECT_TRUE(eq(u"a", u"a")); EXPECT_TRUE(eq(u"a", std::u16string_view(u"a"))); @@ -156,13 +142,9 @@ TEST(BasicStringViewTest, U16StringEqWorks) { EXPECT_FALSE(eq(u"a", u"b")); EXPECT_FALSE(eq(u"a", std::u16string_view(u"b"))); EXPECT_FALSE(eq(u"a", std::u16string(u"b"))); -#endif } TEST(BasicStringViewTest, U16StringViewEqWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_eq eq; EXPECT_TRUE(eq(u"a", u"a")); EXPECT_TRUE(eq(u"a", std::u16string_view(u"a"))); @@ -170,13 +152,9 @@ TEST(BasicStringViewTest, U16StringViewEqWorks) { EXPECT_FALSE(eq(u"a", u"b")); EXPECT_FALSE(eq(u"a", std::u16string_view(u"b"))); EXPECT_FALSE(eq(u"a", std::u16string(u"b"))); -#endif } TEST(BasicStringViewTest, U32StringEqWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_eq eq; EXPECT_TRUE(eq(U"a", U"a")); EXPECT_TRUE(eq(U"a", std::u32string_view(U"a"))); @@ -184,13 +162,9 @@ TEST(BasicStringViewTest, U32StringEqWorks) { EXPECT_FALSE(eq(U"a", U"b")); EXPECT_FALSE(eq(U"a", std::u32string_view(U"b"))); EXPECT_FALSE(eq(U"a", std::u32string(U"b"))); -#endif } TEST(BasicStringViewTest, U32StringViewEqWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_eq eq; EXPECT_TRUE(eq(U"a", U"a")); EXPECT_TRUE(eq(U"a", std::u32string_view(U"a"))); @@ -198,85 +172,60 @@ TEST(BasicStringViewTest, U32StringViewEqWorks) { EXPECT_FALSE(eq(U"a", U"b")); EXPECT_FALSE(eq(U"a", std::u32string_view(U"b"))); EXPECT_FALSE(eq(U"a", std::u32string(U"b"))); -#endif } TEST(BasicStringViewTest, WStringHashWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_hash hash; auto h = hash(L"a"); EXPECT_EQ(h, hash(std::wstring_view(L"a"))); EXPECT_EQ(h, hash(std::wstring(L"a"))); EXPECT_NE(h, hash(std::wstring_view(L"b"))); EXPECT_NE(h, hash(std::wstring(L"b"))); -#endif } TEST(BasicStringViewTest, WStringViewHashWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_hash hash; auto h = hash(L"a"); EXPECT_EQ(h, hash(std::wstring_view(L"a"))); EXPECT_EQ(h, hash(std::wstring(L"a"))); EXPECT_NE(h, hash(std::wstring_view(L"b"))); EXPECT_NE(h, hash(std::wstring(L"b"))); -#endif } TEST(BasicStringViewTest, U16StringHashWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_hash hash; auto h = hash(u"a"); EXPECT_EQ(h, hash(std::u16string_view(u"a"))); EXPECT_EQ(h, hash(std::u16string(u"a"))); EXPECT_NE(h, hash(std::u16string_view(u"b"))); EXPECT_NE(h, hash(std::u16string(u"b"))); -#endif } TEST(BasicStringViewTest, U16StringViewHashWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_hash hash; auto h = hash(u"a"); EXPECT_EQ(h, hash(std::u16string_view(u"a"))); EXPECT_EQ(h, hash(std::u16string(u"a"))); EXPECT_NE(h, hash(std::u16string_view(u"b"))); EXPECT_NE(h, hash(std::u16string(u"b"))); -#endif } TEST(BasicStringViewTest, U32StringHashWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_hash hash; auto h = hash(U"a"); EXPECT_EQ(h, hash(std::u32string_view(U"a"))); EXPECT_EQ(h, hash(std::u32string(U"a"))); EXPECT_NE(h, hash(std::u32string_view(U"b"))); EXPECT_NE(h, hash(std::u32string(U"b"))); -#endif } TEST(BasicStringViewTest, U32StringViewHashWorks) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else hash_default_hash hash; auto h = hash(U"a"); EXPECT_EQ(h, hash(std::u32string_view(U"a"))); EXPECT_EQ(h, hash(std::u32string(U"a"))); EXPECT_NE(h, hash(std::u32string_view(U"b"))); EXPECT_NE(h, hash(std::u32string(U"b"))); -#endif } struct NoDeleter { diff --git a/absl/container/internal/hash_policy_testing.h b/absl/container/internal/hash_policy_testing.h index 66bb12ec457..e9f57579ed7 100644 --- a/absl/container/internal/hash_policy_testing.h +++ b/absl/container/internal/hash_policy_testing.h @@ -119,7 +119,11 @@ struct Alloc : std::allocator { using propagate_on_container_swap = std::true_type; // Using old paradigm for this to ensure compatibility. - explicit Alloc(size_t id = 0) : id_(id) {} + // + // NOTE: As of 2025-05, this constructor cannot be explicit in order to work + // with the libstdc++ that ships with GCC15. + // NOLINTNEXTLINE(google-explicit-constructor) + Alloc(size_t id = 0) : id_(id) {} Alloc(const Alloc&) = default; Alloc& operator=(const Alloc&) = default; diff --git a/absl/container/internal/hash_policy_traits.h b/absl/container/internal/hash_policy_traits.h index cd6b42f9ec6..82eed2a9777 100644 --- a/absl/container/internal/hash_policy_traits.h +++ b/absl/container/internal/hash_policy_traits.h @@ -22,6 +22,7 @@ #include #include "absl/container/internal/common_policy_traits.h" +#include "absl/container/internal/container_memory.h" #include "absl/meta/type_traits.h" namespace absl { @@ -145,9 +146,7 @@ struct hash_policy_traits : common_policy_traits { return P::value(elem); } - using HashSlotFn = size_t (*)(const void* hash_fn, void* slot); - - template + template static constexpr HashSlotFn get_hash_slot_fn() { // get_hash_slot_fn may return nullptr to signal that non type erased function // should be used. GCC warns against comparing function address with nullptr. @@ -156,9 +155,9 @@ struct hash_policy_traits : common_policy_traits { // silent error: the address of * will never be NULL [-Werror=address] #pragma GCC diagnostic ignored "-Waddress" #endif - return Policy::template get_hash_slot_fn() == nullptr - ? &hash_slot_fn_non_type_erased - : Policy::template get_hash_slot_fn(); + return Policy::template get_hash_slot_fn() == nullptr + ? &hash_slot_fn_non_type_erased + : Policy::template get_hash_slot_fn(); #if defined(__GNUC__) && !defined(__clang__) #pragma GCC diagnostic pop #endif @@ -168,19 +167,12 @@ struct hash_policy_traits : common_policy_traits { static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); } private: - template - struct HashElement { - template - size_t operator()(const K& key, Args&&...) const { - return h(key); - } - const Hash& h; - }; - - template - static size_t hash_slot_fn_non_type_erased(const void* hash_fn, void* slot) { - return Policy::apply(HashElement{*static_cast(hash_fn)}, - Policy::element(static_cast(slot))); + template + static size_t hash_slot_fn_non_type_erased(const void* hash_fn, void* slot, + size_t seed) { + return Policy::apply( + HashElement{*static_cast(hash_fn), seed}, + Policy::element(static_cast(slot))); } // Use go/ranked-overloads for dispatching. Rank1 is preferred. diff --git a/absl/container/internal/hash_policy_traits_test.cc b/absl/container/internal/hash_policy_traits_test.cc index 2d2c7c2c38a..03de1322a1f 100644 --- a/absl/container/internal/hash_policy_traits_test.cc +++ b/absl/container/internal/hash_policy_traits_test.cc @@ -45,7 +45,7 @@ struct PolicyWithoutOptionalOps { static std::function apply_impl; static std::function value; - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -99,7 +99,7 @@ struct PolicyNoHashFn { return fn(v); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -108,9 +108,9 @@ struct PolicyNoHashFn { size_t* PolicyNoHashFn::apply_called_count; struct PolicyCustomHashFn : PolicyNoHashFn { - template + template static constexpr HashSlotFn get_hash_slot_fn() { - return &TypeErasedApplyToSlotFn; + return &TypeErasedApplyToSlotFn; } }; @@ -120,9 +120,11 @@ TEST(HashTest, PolicyNoHashFn_get_hash_slot_fn) { Hash hasher; Slot value = 7; - auto* fn = hash_policy_traits::get_hash_slot_fn(); + auto* fn = hash_policy_traits::get_hash_slot_fn< + Hash, /*kIsDefault=*/false>(); EXPECT_NE(fn, nullptr); - EXPECT_EQ(fn(&hasher, &value), hasher(value)); + EXPECT_EQ(fn(&hasher, &value, 100), + (HashElement(hasher, 100)(value))); EXPECT_EQ(apply_called_count, 1); } @@ -132,9 +134,12 @@ TEST(HashTest, PolicyCustomHashFn_get_hash_slot_fn) { Hash hasher; Slot value = 7; - auto* fn = hash_policy_traits::get_hash_slot_fn(); - EXPECT_EQ(fn, PolicyCustomHashFn::get_hash_slot_fn()); - EXPECT_EQ(fn(&hasher, &value), hasher(value)); + auto* fn = hash_policy_traits::get_hash_slot_fn< + Hash, /*kIsDefault=*/false>(); + EXPECT_EQ( + fn, (PolicyCustomHashFn::get_hash_slot_fn())); + EXPECT_EQ(fn(&hasher, &value, 100), + (HashElement(hasher, 100)(value))); EXPECT_EQ(apply_called_count, 0); } diff --git a/absl/container/internal/hashtable_control_bytes.h b/absl/container/internal/hashtable_control_bytes.h index abaadc3bae2..0501e48d32a 100644 --- a/absl/container/internal/hashtable_control_bytes.h +++ b/absl/container/internal/hashtable_control_bytes.h @@ -91,11 +91,6 @@ class NonIterableBitMask { return container_internal::TrailingZeros(mask_) >> Shift; } - // Returns the index of the highest *abstract* bit set in `self`. - uint32_t HighestBitSet() const { - return static_cast((bit_width(mask_) - 1) >> Shift); - } - // Returns the number of trailing zero *abstract* bits. uint32_t TrailingZeros() const { return container_internal::TrailingZeros(mask_) >> Shift; @@ -324,11 +319,13 @@ struct GroupSse2Impl { _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); } - // Returns the number of trailing empty or deleted elements in the group. - uint32_t CountLeadingEmptyOrDeleted() const { - auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); - return TrailingZeros(static_cast( - _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); + // Returns a bitmask representing the positions of full or sentinel slots. + // Note: for `is_small()` tables group may contain the "same" slot twice: + // original and mirrored. + NonIterableBitMaskType MaskFullOrSentinel() const { + auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel) - 1); + return NonIterableBitMaskType(static_cast( + _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(ctrl, special)))); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { @@ -406,17 +403,13 @@ struct GroupAArch64Impl { return NonIterableBitMaskType(mask); } - uint32_t CountLeadingEmptyOrDeleted() const { - uint64_t mask = - vget_lane_u64(vreinterpret_u64_u8(vcle_s8( - vdup_n_s8(static_cast(ctrl_t::kSentinel)), - vreinterpret_s8_u8(ctrl))), - 0); - // Similar to MaskEmptyorDeleted() but we invert the logic to invert the - // produced bitfield. We then count number of trailing zeros. - // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, - // so we should be fine. - return static_cast(countr_zero(mask)) >> 3; + NonIterableBitMaskType MaskFullOrSentinel() const { + uint64_t mask = vget_lane_u64( + vreinterpret_u64_u8( + vcgt_s8(vreinterpret_s8_u8(ctrl), + vdup_n_s8(static_cast(ctrl_t::kSentinel) - 1))), + 0); + return NonIterableBitMaskType(mask); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { @@ -481,12 +474,8 @@ struct GroupPortableImpl { return NonIterableBitMaskType((ctrl & ~(ctrl << 7)) & kMsbs8Bytes); } - uint32_t CountLeadingEmptyOrDeleted() const { - // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and - // kDeleted. We lower all other bits and count number of trailing zeros. - constexpr uint64_t bits = 0x0101010101010101ULL; - return static_cast(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> - 3); + auto MaskFullOrSentinel() const { + return NonIterableBitMaskType((~ctrl | (ctrl << 7)) & kMsbs8Bytes); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { diff --git a/absl/container/internal/hashtable_control_bytes_test.cc b/absl/container/internal/hashtable_control_bytes_test.cc new file mode 100644 index 00000000000..a4aa3a9669a --- /dev/null +++ b/absl/container/internal/hashtable_control_bytes_test.cc @@ -0,0 +1,259 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/container/internal/hashtable_control_bytes.h" + +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace container_internal { +namespace { + +using ::testing::ElementsAre; +using ::testing::ElementsAreArray; + +// Convenience function to static cast to ctrl_t. +ctrl_t CtrlT(int i) { return static_cast(i); } + +TEST(BitMask, Smoke) { + EXPECT_FALSE((BitMask(0))); + EXPECT_TRUE((BitMask(5))); + + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); + EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); + EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); + EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); + EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); + EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); + EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); +} + +TEST(BitMask, WithShift_MatchPortable) { + // See the non-SSE version of Group for details on what this math is for. + uint64_t ctrl = 0x1716151413121110; + uint64_t hash = 0x12; + constexpr uint64_t lsbs = 0x0101010101010101ULL; + auto x = ctrl ^ (lsbs * hash); + uint64_t mask = (x - lsbs) & ~x & kMsbs8Bytes; + EXPECT_EQ(0x0000000080800000, mask); + + BitMask b(mask); + EXPECT_EQ(*b, 2); +} + +constexpr uint64_t kSome8BytesMask = /* */ 0x8000808080008000ULL; +constexpr uint64_t kSome8BytesMaskAllOnes = 0xff00ffffff00ff00ULL; +constexpr auto kSome8BytesMaskBits = std::array{1, 3, 4, 5, 7}; + +TEST(BitMask, WithShift_FullMask) { + EXPECT_THAT((BitMask(kMsbs8Bytes)), + ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); + EXPECT_THAT( + (BitMask(kMsbs8Bytes)), + ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); + EXPECT_THAT( + (BitMask(~uint64_t{0})), + ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); +} + +TEST(BitMask, WithShift_EmptyMask) { + EXPECT_THAT((BitMask(0)), ElementsAre()); + EXPECT_THAT((BitMask(0)), + ElementsAre()); +} + +TEST(BitMask, WithShift_SomeMask) { + EXPECT_THAT((BitMask(kSome8BytesMask)), + ElementsAreArray(kSome8BytesMaskBits)); + EXPECT_THAT((BitMask( + kSome8BytesMask)), + ElementsAreArray(kSome8BytesMaskBits)); + EXPECT_THAT((BitMask( + kSome8BytesMaskAllOnes)), + ElementsAreArray(kSome8BytesMaskBits)); +} + +TEST(BitMask, WithShift_SomeMaskExtraBitsForNullify) { + // Verify that adding extra bits into non zero bytes is fine. + uint64_t extra_bits = 77; + for (int i = 0; i < 100; ++i) { + // Add extra bits, but keep zero bytes untouched. + uint64_t extra_mask = extra_bits & kSome8BytesMaskAllOnes; + EXPECT_THAT((BitMask( + kSome8BytesMask | extra_mask)), + ElementsAreArray(kSome8BytesMaskBits)) + << i << " " << extra_mask; + extra_bits = (extra_bits + 1) * 3; + } +} + +TEST(BitMask, LeadingTrailing) { + EXPECT_EQ((BitMask(0x00001a40).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x00001a40).TrailingZeros()), 6); + + EXPECT_EQ((BitMask(0x00000001).LeadingZeros()), 15); + EXPECT_EQ((BitMask(0x00000001).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x00008000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x00008000).TrailingZeros()), 15); + + EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); + EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); + + EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); + EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); + + EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); + EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); +} + +template +class GroupTest : public testing::Test {}; +using GroupTypes = + ::testing::Types; +TYPED_TEST_SUITE(GroupTest, GroupTypes); + +TYPED_TEST(GroupTest, Match) { + using GroupType = TypeParam; + if (GroupType::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(GroupType{group}.Match(0), ElementsAre()); + EXPECT_THAT(GroupType{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); + EXPECT_THAT(GroupType{group}.Match(3), ElementsAre(3, 10)); + EXPECT_THAT(GroupType{group}.Match(5), ElementsAre(5, 9)); + EXPECT_THAT(GroupType{group}.Match(7), ElementsAre(7, 8)); + } else if (GroupType::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.Match(0), ElementsAre()); + EXPECT_THAT(GroupType{group}.Match(1), ElementsAre(1, 5, 7)); + EXPECT_THAT(GroupType{group}.Match(2), ElementsAre(2, 4)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << GroupType::kWidth; + } +} + +TYPED_TEST(GroupTest, MaskEmpty) { + using GroupType = TypeParam; + if (GroupType::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskEmpty().LowestBitSet(), 0); + } else if (GroupType::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskEmpty().LowestBitSet(), 0); + } else { + FAIL() << "No test coverage for Group::kWidth==" << GroupType::kWidth; + } +} + +TYPED_TEST(GroupTest, MaskFull) { + using GroupType = TypeParam; + if (GroupType::kWidth == 16) { + ctrl_t group[] = { + ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1), + CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskFull(), + ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15)); + } else if (GroupType::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, + ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel, + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskFull(), ElementsAre(1, 4, 7)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << GroupType::kWidth; + } +} + +TYPED_TEST(GroupTest, MaskNonFull) { + using GroupType = TypeParam; + if (GroupType::kWidth == 16) { + ctrl_t group[] = { + ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), + ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1), + CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskNonFull(), + ElementsAre(0, 2, 4, 6, 10, 13, 14)); + } else if (GroupType::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, + ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel, + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskNonFull(), ElementsAre(0, 2, 3, 5, 6)); + } else { + FAIL() << "No test coverage for Group::kWidth==" << GroupType::kWidth; + } +} + +TYPED_TEST(GroupTest, MaskEmptyOrDeleted) { + using GroupType = TypeParam; + if (GroupType::kWidth == 16) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3), + ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), + CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), + CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); + } else if (GroupType::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), CtrlT(1), + ctrl_t::kSentinel, CtrlT(1)}; + EXPECT_THAT(GroupType{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); + } else { + FAIL() << "No test coverage for Group::kWidth==" << GroupType::kWidth; + } +} + +TYPED_TEST(GroupTest, MaskFullOrSentinel) { + using GroupType = TypeParam; + if (GroupType::kWidth == 16) { + ctrl_t group[] = { + ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kEmpty, CtrlT(3), + ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, ctrl_t::kEmpty, + ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kDeleted, ctrl_t::kDeleted, + ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kDeleted, ctrl_t::kDeleted, + }; + EXPECT_THAT(GroupType{group}.MaskFullOrSentinel().LowestBitSet(), 3); + } else if (GroupType::kWidth == 8) { + ctrl_t group[] = {ctrl_t::kEmpty, ctrl_t::kDeleted, CtrlT(2), + ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel, + ctrl_t::kDeleted, ctrl_t::kEmpty}; + EXPECT_THAT(GroupType{group}.MaskFullOrSentinel().LowestBitSet(), 2); + } else { + FAIL() << "No test coverage for Group::kWidth==" << GroupType::kWidth; + } +} + +} // namespace +} // namespace container_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index c0fce8794d4..965476a09ad 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -94,8 +94,9 @@ void HashtablezInfo::PrepareForSampling(int64_t stride, // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. - depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, - /* skip_count= */ 0); + depth = static_cast( + absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, + /* skip_count= */ 0)); inline_element_size = inline_element_size_value; key_size = key_size_value; value_size = value_size_value; diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h index 305dc855b82..55ce7ed86b5 100644 --- a/absl/container/internal/hashtablez_sampler.h +++ b/absl/container/internal/hashtablez_sampler.h @@ -97,7 +97,7 @@ struct HashtablezInfo : public profiling_internal::Sample { // the lock. static constexpr int kMaxStackDepth = 64; absl::Time create_time; - int32_t depth; + uint32_t depth; // The SOO capacity for this table in elements (not bytes). Note that sampled // tables are never SOO because we need to store the infoz handle on the heap. // Tables that would be SOO if not sampled should have: soo_capacity > 0 && diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc index 0de1e29d328..ef80cb01a88 100644 --- a/absl/container/internal/hashtablez_sampler_test.cc +++ b/absl/container/internal/hashtablez_sampler_test.cc @@ -90,7 +90,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { const size_t test_value_size = 13; HashtablezInfo info; - absl::MutexLock l(&info.init_mu); + absl::MutexLock l(info.init_mu); info.PrepareForSampling(test_stride, test_element_size, /*key_size=*/test_key_size, /*value_size=*/test_value_size, @@ -148,7 +148,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) { TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; - absl::MutexLock l(&info.init_mu); + absl::MutexLock l(info.init_mu); const int64_t test_stride = 21; const size_t test_element_size = 19; const size_t test_key_size = 17; @@ -168,7 +168,7 @@ TEST(HashtablezInfoTest, RecordStorageChanged) { TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; - absl::MutexLock l(&info.init_mu); + absl::MutexLock l(info.init_mu); const int64_t test_stride = 25; const size_t test_element_size = 23; const size_t test_key_size = 21; @@ -203,7 +203,7 @@ TEST(HashtablezInfoTest, RecordErase) { const size_t test_value_size = 25; HashtablezInfo info; - absl::MutexLock l(&info.init_mu); + absl::MutexLock l(info.init_mu); info.PrepareForSampling(test_stride, test_element_size, /*key_size=*/test_key_size, /*value_size=*/test_value_size, @@ -227,7 +227,7 @@ TEST(HashtablezInfoTest, RecordRehash) { const size_t test_key_size = 29; const size_t test_value_size = 27; HashtablezInfo info; - absl::MutexLock l(&info.init_mu); + absl::MutexLock l(info.init_mu); info.PrepareForSampling(test_stride, test_element_size, /*key_size=*/test_key_size, /*value_size=*/test_value_size, @@ -259,7 +259,7 @@ TEST(HashtablezInfoTest, RecordRehash) { TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; - absl::MutexLock l(&info.init_mu); + absl::MutexLock l(info.init_mu); const int64_t test_stride = 35; const size_t test_element_size = 33; const size_t test_key_size = 31; diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index f19e87b3fa5..875a36cf721 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include "absl/base/attributes.h" #include "absl/base/config.h" @@ -40,32 +41,13 @@ namespace container_internal { // Represents a control byte corresponding to a full slot with arbitrary hash. constexpr ctrl_t ZeroCtrlT() { return static_cast(0); } -// We have space for `growth_info` before a single block of control bytes. A -// single block of empty control bytes for tables without any slots allocated. -// This enables removing a branch in the hot path of find(). In order to ensure -// that the control bytes are aligned to 16, we have 16 bytes before the control -// bytes even though growth_info only needs 8. -alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = { - ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), - ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), - ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), - ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), - ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; - -// We need one full byte followed by a sentinel byte for iterator::operator++ to -// work. We have a full group after kSentinel to be safe (in case operator++ is -// changed to read a full group). -ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = { - ZeroCtrlT(), ctrl_t::kSentinel, ZeroCtrlT(), ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, - ctrl_t::kEmpty}; -static_assert(NumControlBytes(SooCapacity()) <= 17, - "kSooControl capacity too small"); +// A single control byte for default-constructed iterators. We leave it +// uninitialized because reading this memory is a bug. +ABSL_DLL ctrl_t kDefaultIterControl; + +// We need one full byte followed by a sentinel byte for iterator::operator++. +ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[2] = {ZeroCtrlT(), + ctrl_t::kSentinel}; namespace { @@ -100,13 +82,13 @@ inline size_t RandomSeed() { return value ^ static_cast(reinterpret_cast(&counter)); } -bool ShouldRehashForBugDetection(PerTableSeed seed, size_t capacity) { +bool ShouldRehashForBugDetection(size_t capacity) { // Note: we can't use the abseil-random library because abseil-random // depends on swisstable. We want to return true with probability // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this, // we probe based on a random hash and see if the offset is less than // RehashProbabilityConstant(). - return probe(seed, capacity, absl::HashOf(RandomSeed())).offset() < + return probe(capacity, absl::HashOf(RandomSeed())).offset() < RehashProbabilityConstant(); } @@ -118,6 +100,21 @@ size_t SingleGroupTableH1(size_t hash, PerTableSeed seed) { return hash ^ seed.seed(); } +// Returns the offset of the new element after resize from capacity 1 to 3. +size_t Resize1To3NewOffset(size_t hash, PerTableSeed seed) { + // After resize from capacity 1 to 3, we always have exactly the slot with + // index 1 occupied, so we need to insert either at index 0 or index 2. + static_assert(SooSlotIndex() == 1); + return SingleGroupTableH1(hash, seed) & 2; +} + +// Returns the address of the ith slot in slots where each slot occupies +// slot_size. +inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) { + return static_cast(static_cast(slot_array) + + (slot * slot_size)); +} + // Returns the address of the slot `i` iterations after `slot` assuming each // slot has the specified size. inline void* NextSlot(void* slot, size_t slot_size, size_t i = 1) { @@ -144,23 +141,22 @@ GenerationType* EmptyGeneration() { } bool CommonFieldsGenerationInfoEnabled:: - should_rehash_for_bug_detection_on_insert(PerTableSeed seed, - size_t capacity) const { + should_rehash_for_bug_detection_on_insert(size_t capacity) const { if (reserved_growth_ == kReservedGrowthJustRanOut) return true; if (reserved_growth_ > 0) return false; - return ShouldRehashForBugDetection(seed, capacity); + return ShouldRehashForBugDetection(capacity); } bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move( - PerTableSeed seed, size_t capacity) const { - return ShouldRehashForBugDetection(seed, capacity); + size_t capacity) const { + return ShouldRehashForBugDetection(capacity); } namespace { FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1, size_t capacity) { - auto seq = probe(h1, capacity); + auto seq = probe_h1(capacity, h1); if (IsEmptyOrDeleted(ctrl[seq.offset()])) { return {seq.offset(), /*probe_length=*/0}; } @@ -175,35 +171,48 @@ FindInfo find_first_non_full_from_h1(const ctrl_t* ctrl, size_t h1, } } -// Whether a table is "small". A small table fits entirely into a probing -// group, i.e., has a capacity < `Group::kWidth`. +// Probes an array of control bits using a probe sequence derived from `hash`, +// and returns the offset corresponding to the first deleted or empty slot. // -// In small mode we are able to use the whole capacity. The extra control +// Behavior when the entire table is full is undefined. +// +// NOTE: this function must work with tables having both empty and deleted +// slots in the same group. Such tables appear during `erase()`. +FindInfo find_first_non_full(const CommonFields& common, size_t hash) { + return find_first_non_full_from_h1(common.control(), H1(hash), + common.capacity()); +} + +// Whether a table fits in half a group. A half-group table fits entirely into a +// probing group, i.e., has a capacity < `Group::kWidth`. +// +// In half-group mode we are able to use the whole capacity. The extra control // bytes give us at least one "empty" control byte to stop the iteration. // This is important to make 1 a valid capacity. // -// In small mode only the first `capacity` control bytes after the sentinel +// In half-group mode only the first `capacity` control bytes after the sentinel // are valid. The rest contain dummy ctrl_t::kEmpty values that do not // represent a real slot. -constexpr bool is_small(size_t capacity) { +constexpr bool is_half_group(size_t capacity) { return capacity < Group::kWidth - 1; } template void IterateOverFullSlotsImpl(const CommonFields& c, size_t slot_size, Fn cb) { const size_t cap = c.capacity(); + ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(cap)); const ctrl_t* ctrl = c.control(); void* slot = c.slot_array(); - if (is_small(cap)) { - // Mirrored/cloned control bytes in small table are also located in the + if (is_half_group(cap)) { + // Mirrored/cloned control bytes in half-group table are also located in the // first group (starting from position 0). We are taking group from position // `capacity` in order to avoid duplicates. - // Small tables capacity fits into portable group, where + // Half-group tables capacity fits into portable group, where // GroupPortableImpl::MaskFull is more efficient for the // capacity <= GroupPortableImpl::kWidth. ABSL_SWISSTABLE_ASSERT(cap <= GroupPortableImpl::kWidth && - "unexpectedly large small capacity"); + "unexpectedly large half-group capacity"); static_assert(Group::kWidth >= GroupPortableImpl::kWidth, "unexpected group width"); // Group starts from kSentinel slot, so indices in the mask will @@ -250,11 +259,6 @@ void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { ctrl[capacity] = ctrl_t::kSentinel; } -FindInfo find_first_non_full(const CommonFields& common, size_t hash) { - return find_first_non_full_from_h1(common.control(), H1(hash, common.seed()), - common.capacity()); -} - void IterateOverFullSlots(const CommonFields& c, size_t slot_size, absl::FunctionRef cb) { IterateOverFullSlotsImpl(c, slot_size, cb); @@ -295,9 +299,77 @@ size_t FindFirstFullSlot(size_t start, size_t end, const ctrl_t* ctrl) { ABSL_UNREACHABLE(); } -size_t DropDeletesWithoutResizeAndPrepareInsert(CommonFields& common, - const PolicyFunctions& policy, - size_t new_hash) { +void PrepareInsertCommon(CommonFields& common) { + common.increment_size(); + common.maybe_increment_generation_on_insert(); +} + +// Sets sanitizer poisoning for slot corresponding to control byte being set. +inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h, + size_t slot_size) { + ABSL_SWISSTABLE_ASSERT(i < c.capacity()); + auto* slot_i = static_cast(c.slot_array()) + i * slot_size; + if (IsFull(h)) { + SanitizerUnpoisonMemoryRegion(slot_i, slot_size); + } else { + SanitizerPoisonMemoryRegion(slot_i, slot_size); + } +} + +// Sets `ctrl[i]` to `h`. +// +// Unlike setting it directly, this function will perform bounds checks and +// mirror the value to the cloned tail if necessary. +inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h, + size_t slot_size) { + ABSL_SWISSTABLE_ASSERT(!c.is_small()); + DoSanitizeOnSetCtrl(c, i, h, slot_size); + ctrl_t* ctrl = c.control(); + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & c.capacity()) + + (NumClonedBytes() & c.capacity())] = h; +} +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) { + SetCtrl(c, i, static_cast(h), slot_size); +} + +// Like SetCtrl, but in a single group table, we can save some operations when +// setting the cloned control byte. +inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h, + size_t slot_size) { + ABSL_SWISSTABLE_ASSERT(!c.is_small()); + ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity())); + DoSanitizeOnSetCtrl(c, i, h, slot_size); + ctrl_t* ctrl = c.control(); + ctrl[i] = h; + ctrl[i + c.capacity() + 1] = h; +} +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h, + size_t slot_size) { + SetCtrlInSingleGroupTable(c, i, static_cast(h), slot_size); +} + +// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1, +// we can save some operations when setting the cloned control byte. +inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h, + size_t slot_size) { + ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1); + DoSanitizeOnSetCtrl(c, i, h, slot_size); + ctrl_t* ctrl = c.control(); + ctrl[i] = h; + ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h; +} +// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. +inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h, + size_t slot_size) { + SetCtrlInLargeTable(c, i, static_cast(h), slot_size); +} + +size_t DropDeletesWithoutResizeAndPrepareInsert( + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t new_hash) { void* set = &common; void* slot_array = common.slot_array(); const size_t capacity = common.capacity(); @@ -342,7 +414,7 @@ size_t DropDeletesWithoutResizeAndPrepareInsert(CommonFields& common, continue; } if (!IsDeleted(ctrl[i])) continue; - const size_t hash = (*hasher)(hash_fn, slot_ptr); + const size_t hash = (*hasher)(hash_fn, slot_ptr, common.seed().seed()); const FindInfo target = find_first_non_full(common, hash); const size_t new_i = target.offset; total_probe_length += target.probe_length; @@ -399,13 +471,13 @@ size_t DropDeletesWithoutResizeAndPrepareInsert(CommonFields& common, PrepareInsertCommon(common); ResetGrowthLeft(common); FindInfo find_info = find_first_non_full(common, new_hash); - SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), policy.slot_size); + SetCtrlInLargeTable(common, find_info.offset, H2(new_hash), slot_size); common.infoz().RecordInsert(new_hash, find_info.probe_length); common.infoz().RecordRehash(total_probe_length); return find_info.offset; } -static bool WasNeverFull(CommonFields& c, size_t index) { +bool WasNeverFull(CommonFields& c, size_t index) { if (is_single_group(c.capacity())) { return true; } @@ -429,6 +501,7 @@ void ResetCtrl(CommonFields& common, size_t slot_size) { ctrl_t* ctrl = common.control(); static constexpr size_t kTwoGroupCapacity = 2 * Group::kWidth - 1; if (ABSL_PREDICT_TRUE(capacity <= kTwoGroupCapacity)) { + if (IsSmallCapacity(capacity)) return; std::memset(ctrl, static_cast(ctrl_t::kEmpty), Group::kWidth); std::memset(ctrl + capacity, static_cast(ctrl_t::kEmpty), Group::kWidth); @@ -444,38 +517,11 @@ void ResetCtrl(CommonFields& common, size_t slot_size) { SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity); } -// Initializes control bytes for single element table. -// Capacity of the table must be 1. -ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeSingleElementControlBytes( - uint64_t h2, ctrl_t* new_ctrl) { - static constexpr uint64_t kEmptyXorSentinel = - static_cast(ctrl_t::kEmpty) ^ - static_cast(ctrl_t::kSentinel); - static constexpr uint64_t kEmpty64 = static_cast(ctrl_t::kEmpty); - // The first 8 bytes, where present slot positions are replaced with 0. - static constexpr uint64_t kFirstCtrlBytesWithZeroes = - k8EmptyBytes ^ kEmpty64 ^ (kEmptyXorSentinel << 8) ^ (kEmpty64 << 16); - - // Fill the original 0th and mirrored 2nd bytes with the hash. - // Result will look like: - // HSHEEEEE - // Where H = h2, E = kEmpty, S = kSentinel. - const uint64_t first_ctrl_bytes = - (h2 | kFirstCtrlBytesWithZeroes) | (h2 << 16); - // Fill last bytes with kEmpty. - std::memset(new_ctrl + 1, static_cast(ctrl_t::kEmpty), Group::kWidth); - // Overwrite the first 3 bytes with HSH. Other bytes will not be changed. - absl::little_endian::Store64(new_ctrl, first_ctrl_bytes); -} - -// Initializes control bytes for growing after SOO to the next capacity. -// `soo_ctrl` is placed in the position `SooSlotIndex()`. -// `new_hash` is placed in the position `new_offset`. -// The table must be non-empty SOO. -ABSL_ATTRIBUTE_ALWAYS_INLINE inline void -InitializeThreeElementsControlBytesAfterSoo(ctrl_t soo_ctrl, size_t new_hash, - size_t new_offset, - ctrl_t* new_ctrl) { +// Initializes control bytes for growing from capacity 1 to 3. +// `orig_h2` is placed in the position `SooSlotIndex()`. +// `new_h2` is placed in the position `new_offset`. +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void InitializeThreeElementsControlBytes( + h2_t orig_h2, h2_t new_h2, size_t new_offset, ctrl_t* new_ctrl) { static constexpr size_t kNewCapacity = NextCapacity(SooCapacity()); static_assert(kNewCapacity == 3); static_assert(is_single_group(kNewCapacity)); @@ -496,9 +542,9 @@ InitializeThreeElementsControlBytesAfterSoo(ctrl_t soo_ctrl, size_t new_hash, (kEmptyXorSentinel << (8 * kNewCapacity)) ^ (kEmpty64 << (8 * kMirroredSooSlotIndex)); - const uint64_t soo_h2 = static_cast(soo_ctrl); - const uint64_t new_h2_xor_empty = static_cast( - H2(new_hash) ^ static_cast(ctrl_t::kEmpty)); + const uint64_t soo_h2 = static_cast(orig_h2); + const uint64_t new_h2_xor_empty = + static_cast(new_h2 ^ static_cast(ctrl_t::kEmpty)); // Fill the original and mirrored bytes for SOO slot. // Result will look like: // EHESEHEE @@ -539,12 +585,25 @@ InitializeThreeElementsControlBytesAfterSoo(ctrl_t soo_ctrl, size_t new_hash, } // namespace -void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) { - ABSL_SWISSTABLE_ASSERT(IsFull(c.control()[index]) && - "erasing a dangling iterator"); +void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size) { + ABSL_SWISSTABLE_ASSERT(c.is_small()); + if (soo_enabled) { + c.set_empty_soo(); + return; + } + c.decrement_size(); + c.infoz().RecordErase(); + SanitizerPoisonMemoryRegion(c.slot_array(), slot_size); +} + +void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size) { + ABSL_SWISSTABLE_ASSERT(!c.is_small()); + ABSL_SWISSTABLE_ASSERT(IsFull(*ctrl) && "erasing a dangling iterator"); c.decrement_size(); c.infoz().RecordErase(); + size_t index = static_cast(ctrl - c.control()); + if (WasNeverFull(c, index)) { SetCtrl(c, index, ctrl_t::kEmpty, slot_size); c.growth_info().OverwriteFullAsEmpty(); @@ -555,8 +614,9 @@ void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) { SetCtrlInLargeTable(c, index, ctrl_t::kDeleted, slot_size); } -void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, - void* alloc, bool reuse, bool soo_enabled) { +void ClearBackingArray(CommonFields& c, + const PolicyFunctions& __restrict policy, void* alloc, + bool reuse, bool soo_enabled) { if (reuse) { c.set_size_to_zero(); ABSL_SWISSTABLE_ASSERT(!soo_enabled || c.capacity() > SooCapacity()); @@ -587,22 +647,31 @@ enum class ResizeNonSooMode { // This function is used for reserving or rehashing non-empty tables. // This use case is rare so the function is type erased. // Returns the total probe length. -size_t FindNewPositionsAndTransferSlots(CommonFields& common, - const PolicyFunctions& policy, - ctrl_t* old_ctrl, void* old_slots, - size_t old_capacity) { +size_t FindNewPositionsAndTransferSlots( + CommonFields& common, const PolicyFunctions& __restrict policy, + ctrl_t* old_ctrl, void* old_slots, size_t old_capacity) { void* new_slots = common.slot_array(); const void* hash_fn = policy.hash_fn(common); const size_t slot_size = policy.slot_size; + const size_t seed = common.seed().seed(); const auto insert_slot = [&](void* slot) { - size_t hash = policy.hash_slot(hash_fn, slot); - auto target = find_first_non_full(common, hash); - SetCtrl(common, target.offset, H2(hash), slot_size); + size_t hash = policy.hash_slot(hash_fn, slot, seed); + FindInfo target; + if (common.is_small()) { + target = FindInfo{0, 0}; + } else { + target = find_first_non_full(common, hash); + SetCtrl(common, target.offset, H2(hash), slot_size); + } policy.transfer_n(&common, SlotAddress(new_slots, target.offset, slot_size), slot, 1); return target.probe_length; }; + if (IsSmallCapacity(old_capacity)) { + if (common.size() == 1) insert_slot(old_slots); + return 0; + } size_t total_probe_length = 0; for (size_t i = 0; i < old_capacity; ++i) { if (IsFull(old_ctrl[i])) { @@ -613,32 +682,92 @@ size_t FindNewPositionsAndTransferSlots(CommonFields& common, return total_probe_length; } +void ReportGrowthToInfozImpl(CommonFields& common, HashtablezInfoHandle infoz, + size_t hash, size_t total_probe_length, + size_t distance_from_desired) { + ABSL_SWISSTABLE_ASSERT(infoz.IsSampled()); + infoz.RecordStorageChanged(common.size() - 1, common.capacity()); + infoz.RecordRehash(total_probe_length); + infoz.RecordInsert(hash, distance_from_desired); + common.set_has_infoz(); + // TODO(b/413062340): we could potentially store infoz in place of the + // control pointer for the capacity 1 case. + common.set_infoz(infoz); +} + +// Specialization to avoid passing two 0s from hot function. +ABSL_ATTRIBUTE_NOINLINE void ReportSingleGroupTableGrowthToInfoz( + CommonFields& common, HashtablezInfoHandle infoz, size_t hash) { + ReportGrowthToInfozImpl(common, infoz, hash, /*total_probe_length=*/0, + /*distance_from_desired=*/0); +} + +ABSL_ATTRIBUTE_NOINLINE void ReportGrowthToInfoz(CommonFields& common, + HashtablezInfoHandle infoz, + size_t hash, + size_t total_probe_length, + size_t distance_from_desired) { + ReportGrowthToInfozImpl(common, infoz, hash, total_probe_length, + distance_from_desired); +} + +ABSL_ATTRIBUTE_NOINLINE void ReportResizeToInfoz(CommonFields& common, + HashtablezInfoHandle infoz, + size_t total_probe_length) { + ABSL_SWISSTABLE_ASSERT(infoz.IsSampled()); + infoz.RecordStorageChanged(common.size(), common.capacity()); + infoz.RecordRehash(total_probe_length); + common.set_has_infoz(); + common.set_infoz(infoz); +} + +struct BackingArrayPtrs { + ctrl_t* ctrl; + void* slots; +}; + +BackingArrayPtrs AllocBackingArray(CommonFields& common, + const PolicyFunctions& __restrict policy, + size_t new_capacity, bool has_infoz, + void* alloc) { + RawHashSetLayout layout(new_capacity, policy.slot_size, policy.slot_align, + has_infoz); + char* mem = static_cast(policy.alloc(alloc, layout.alloc_size())); + const GenerationType old_generation = common.generation(); + common.set_generation_ptr( + reinterpret_cast(mem + layout.generation_offset())); + common.set_generation(NextGeneration(old_generation)); + + return {reinterpret_cast(mem + layout.control_offset()), + mem + layout.slot_offset()}; +} + template -void ResizeNonSooImpl(CommonFields& common, const PolicyFunctions& policy, +void ResizeNonSooImpl(CommonFields& common, + const PolicyFunctions& __restrict policy, size_t new_capacity, HashtablezInfoHandle infoz) { ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity)); ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity()); const size_t old_capacity = common.capacity(); - [[maybe_unused]] ctrl_t* old_ctrl = common.control(); - [[maybe_unused]] void* old_slots = common.slot_array(); + [[maybe_unused]] ctrl_t* old_ctrl; + [[maybe_unused]] void* old_slots; + if constexpr (kMode == ResizeNonSooMode::kGuaranteedAllocated) { + old_ctrl = common.control(); + old_slots = common.slot_array(); + } const size_t slot_size = policy.slot_size; const size_t slot_align = policy.slot_align; const bool has_infoz = infoz.IsSampled(); - - common.set_capacity(new_capacity); - RawHashSetLayout layout(new_capacity, slot_size, slot_align, has_infoz); void* alloc = policy.get_char_alloc(common); - char* mem = static_cast(policy.alloc(alloc, layout.alloc_size())); - const GenerationType old_generation = common.generation(); - common.set_generation_ptr( - reinterpret_cast(mem + layout.generation_offset())); - common.set_generation(NextGeneration(old_generation)); - ctrl_t* new_ctrl = reinterpret_cast(mem + layout.control_offset()); - common.set_control(new_ctrl); - common.set_slots(mem + layout.slot_offset()); + common.set_capacity(new_capacity); + const auto [new_ctrl, new_slots] = + AllocBackingArray(common, policy, new_capacity, has_infoz, alloc); + common.set_control(new_ctrl); + common.set_slots(new_slots); + common.generate_new_seed(has_infoz); size_t total_probe_length = 0; ResetCtrl(common, slot_size); @@ -658,16 +787,13 @@ void ResizeNonSooImpl(CommonFields& common, const PolicyFunctions& policy, CapacityToGrowth(new_capacity)); } - if (has_infoz) { - common.set_has_infoz(); - infoz.RecordStorageChanged(common.size(), new_capacity); - infoz.RecordRehash(total_probe_length); - common.set_infoz(infoz); + if (ABSL_PREDICT_FALSE(has_infoz)) { + ReportResizeToInfoz(common, infoz, total_probe_length); } } void ResizeEmptyNonAllocatedTableImpl(CommonFields& common, - const PolicyFunctions& policy, + const PolicyFunctions& __restrict policy, size_t new_capacity, bool force_infoz) { ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity)); ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity()); @@ -690,25 +816,27 @@ void ResizeEmptyNonAllocatedTableImpl(CommonFields& common, // After transferring the slot, sets control and slots in CommonFields. // It is rare to resize an SOO table with one element to a large size. // Requires: `c` contains SOO data. -void InsertOldSooSlotAndInitializeControlBytes(CommonFields& c, - const PolicyFunctions& policy, - size_t hash, ctrl_t* new_ctrl, - void* new_slots) { +void InsertOldSooSlotAndInitializeControlBytes( + CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* new_ctrl, + void* new_slots, bool has_infoz) { ABSL_SWISSTABLE_ASSERT(c.size() == policy.soo_capacity()); ABSL_SWISSTABLE_ASSERT(policy.soo_enabled); size_t new_capacity = c.capacity(); - c.generate_new_seed(); - size_t offset = probe(c.seed(), new_capacity, hash).offset(); + c.generate_new_seed(has_infoz); + + const size_t soo_slot_hash = + policy.hash_slot(policy.hash_fn(c), c.soo_data(), c.seed().seed()); + size_t offset = probe(new_capacity, soo_slot_hash).offset(); offset = offset == new_capacity ? 0 : offset; SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity); void* target_slot = SlotAddress(new_slots, offset, policy.slot_size); SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size); policy.transfer_n(&c, target_slot, c.soo_data(), 1); - c.set_control(new_ctrl); + c.set_control(new_ctrl); c.set_slots(new_slots); ResetCtrl(c, policy.slot_size); - SetCtrl(c, offset, H2(hash), policy.slot_size); + SetCtrl(c, offset, H2(soo_slot_hash), policy.slot_size); } enum class ResizeFullSooTableSamplingMode { @@ -728,14 +856,16 @@ void AssertFullSoo([[maybe_unused]] CommonFields& common, ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity()); } -void ResizeFullSooTable(CommonFields& common, const PolicyFunctions& policy, +void ResizeFullSooTable(CommonFields& common, + const PolicyFunctions& __restrict policy, size_t new_capacity, ResizeFullSooTableSamplingMode sampling_mode) { AssertFullSoo(common, policy); const size_t slot_size = policy.slot_size; - const size_t slot_align = policy.slot_align; + void* alloc = policy.get_char_alloc(common); HashtablezInfoHandle infoz; + bool has_infoz = false; if (sampling_mode == ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled) { if (ABSL_PREDICT_FALSE(policy.is_hashtablez_eligible)) { @@ -743,33 +873,19 @@ void ResizeFullSooTable(CommonFields& common, const PolicyFunctions& policy, policy.soo_capacity()); } - if (!infoz.IsSampled()) { - return; - } + if (!infoz.IsSampled()) return; + has_infoz = true; } - const bool has_infoz = infoz.IsSampled(); - common.set_capacity(new_capacity); - RawHashSetLayout layout(new_capacity, slot_size, slot_align, has_infoz); - void* alloc = policy.get_char_alloc(common); - char* mem = static_cast(policy.alloc(alloc, layout.alloc_size())); - const GenerationType old_generation = common.generation(); - common.set_generation_ptr( - reinterpret_cast(mem + layout.generation_offset())); - common.set_generation(NextGeneration(old_generation)); - // We do not set control and slots in CommonFields yet to avoid overriding // SOO data. - ctrl_t* new_ctrl = reinterpret_cast(mem + layout.control_offset()); - void* new_slots = mem + layout.slot_offset(); + const auto [new_ctrl, new_slots] = + AllocBackingArray(common, policy, new_capacity, has_infoz, alloc); - const size_t soo_slot_hash = - policy.hash_slot(policy.hash_fn(common), common.soo_data()); - - InsertOldSooSlotAndInitializeControlBytes(common, policy, soo_slot_hash, - new_ctrl, new_slots); + InsertOldSooSlotAndInitializeControlBytes(common, policy, new_ctrl, new_slots, + has_infoz); ResetGrowthLeft(common); if (has_infoz) { common.set_has_infoz(); @@ -847,7 +963,7 @@ void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* __restrict old_ctrl, return; } - ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16); + ABSL_SWISSTABLE_ASSERT(Group::kWidth == 16); // NOLINT(misc-static-assert) // Fill the second half of the main control bytes with kEmpty. // For small capacity that may write into mirrored control bytes. @@ -912,8 +1028,8 @@ constexpr size_t kProbedElementsBufferSize = 512; // Returns the total probe length. template ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertImpl( - CommonFields& c, const PolicyFunctions& policy, const ProbedItem* start, - const ProbedItem* end, void* old_slots) { + CommonFields& c, const PolicyFunctions& __restrict policy, + const ProbedItem* start, const ProbedItem* end, void* old_slots) { const size_t new_capacity = c.capacity(); void* new_slots = c.slot_array(); @@ -949,9 +1065,9 @@ constexpr size_t kNoMarkedElementsSentinel = ~size_t{}; // We marked them in control bytes as kSentinel. // Hash recomputation and full probing is done here. // This use case should be extremely rare. -ABSL_ATTRIBUTE_NOINLINE size_t -ProcessProbedMarkedElements(CommonFields& c, const PolicyFunctions& policy, - ctrl_t* old_ctrl, void* old_slots, size_t start) { +ABSL_ATTRIBUTE_NOINLINE size_t ProcessProbedMarkedElements( + CommonFields& c, const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl, + void* old_slots, size_t start) { size_t old_capacity = PreviousCapacity(c.capacity()); const size_t slot_size = policy.slot_size; void* new_slots = c.slot_array(); @@ -959,12 +1075,13 @@ ProcessProbedMarkedElements(CommonFields& c, const PolicyFunctions& policy, const void* hash_fn = policy.hash_fn(c); auto hash_slot = policy.hash_slot; auto transfer_n = policy.transfer_n; + const size_t seed = c.seed().seed(); for (size_t old_index = start; old_index < old_capacity; ++old_index) { if (old_ctrl[old_index] != ctrl_t::kSentinel) { continue; } void* src_slot = SlotAddress(old_slots, old_index, slot_size); - const size_t hash = hash_slot(hash_fn, src_slot); + const size_t hash = hash_slot(hash_fn, src_slot, seed); const FindInfo target = find_first_non_full(c, hash); total_probe_length += target.probe_length; const size_t new_i = target.offset; @@ -1029,7 +1146,7 @@ class ProbedItemEncoder { // Finds new position for each element and transfers it to the new slots. // Returns the total probe length. size_t DecodeAndInsertToTable(CommonFields& common, - const PolicyFunctions& policy, + const PolicyFunctions& __restrict policy, void* old_slots) const { if (pos_ == buffer_) { return 0; @@ -1103,7 +1220,7 @@ class ProbedItemEncoder { // Finds new position for each element and transfers it to the new slots. // Returns the total probe length. ABSL_ATTRIBUTE_NOINLINE size_t DecodeAndInsertToTableOverflow( - CommonFields& common, const PolicyFunctions& policy, + CommonFields& common, const PolicyFunctions& __restrict policy, void* old_slots) const { ABSL_SWISSTABLE_ASSERT(local_buffer_full_ && "must not be called when local buffer is not full"); @@ -1136,7 +1253,8 @@ class ProbedItemEncoder { // Different encoder is used depending on the capacity of the table. // Returns total probe length. template -size_t GrowToNextCapacity(CommonFields& common, const PolicyFunctions& policy, +size_t GrowToNextCapacity(CommonFields& common, + const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl, void* old_slots) { using ProbedItem = typename Encoder::ProbedItem; ABSL_SWISSTABLE_ASSERT(common.capacity() <= ProbedItem::kMaxNewCapacity); @@ -1154,10 +1272,9 @@ size_t GrowToNextCapacity(CommonFields& common, const PolicyFunctions& policy, // Grows to next capacity for relatively small tables so that even if all // elements are probed, we don't need to overflow the local buffer. // Returns total probe length. -size_t GrowToNextCapacityThatFitsInLocalBuffer(CommonFields& common, - const PolicyFunctions& policy, - ctrl_t* old_ctrl, - void* old_slots) { +size_t GrowToNextCapacityThatFitsInLocalBuffer( + CommonFields& common, const PolicyFunctions& __restrict policy, + ctrl_t* old_ctrl, void* old_slots) { ABSL_SWISSTABLE_ASSERT(common.capacity() <= kMaxLocalBufferNewCapacity); return GrowToNextCapacity< ProbedItemEncoder>( @@ -1167,20 +1284,20 @@ size_t GrowToNextCapacityThatFitsInLocalBuffer(CommonFields& common, // Grows to next capacity with different encodings. Returns total probe length. // These functions are useful to simplify profile analysis. size_t GrowToNextCapacity4BytesEncoder(CommonFields& common, - const PolicyFunctions& policy, + const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl, void* old_slots) { return GrowToNextCapacity>( common, policy, old_ctrl, old_slots); } size_t GrowToNextCapacity8BytesEncoder(CommonFields& common, - const PolicyFunctions& policy, + const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl, void* old_slots) { return GrowToNextCapacity>( common, policy, old_ctrl, old_slots); } -size_t GrowToNextCapacity16BytesEncoder(CommonFields& common, - const PolicyFunctions& policy, - ctrl_t* old_ctrl, void* old_slots) { +size_t GrowToNextCapacity16BytesEncoder( + CommonFields& common, const PolicyFunctions& __restrict policy, + ctrl_t* old_ctrl, void* old_slots) { return GrowToNextCapacity>( common, policy, old_ctrl, old_slots); } @@ -1188,10 +1305,9 @@ size_t GrowToNextCapacity16BytesEncoder(CommonFields& common, // Grows to next capacity for tables with relatively large capacity so that we // can't guarantee that all probed elements fit in the local buffer. Returns // total probe length. -size_t GrowToNextCapacityOverflowLocalBuffer(CommonFields& common, - const PolicyFunctions& policy, - ctrl_t* old_ctrl, - void* old_slots) { +size_t GrowToNextCapacityOverflowLocalBuffer( + CommonFields& common, const PolicyFunctions& __restrict policy, + ctrl_t* old_ctrl, void* old_slots) { const size_t new_capacity = common.capacity(); if (ABSL_PREDICT_TRUE(new_capacity <= ProbedItem4Bytes::kMaxNewCapacity)) { return GrowToNextCapacity4BytesEncoder(common, policy, old_ctrl, old_slots); @@ -1207,7 +1323,7 @@ size_t GrowToNextCapacityOverflowLocalBuffer(CommonFields& common, // capacity of the table. Returns total probe length. ABSL_ATTRIBUTE_NOINLINE size_t GrowToNextCapacityDispatch(CommonFields& common, - const PolicyFunctions& policy, + const PolicyFunctions& __restrict policy, ctrl_t* old_ctrl, void* old_slots) { const size_t new_capacity = common.capacity(); if (ABSL_PREDICT_TRUE(new_capacity <= kMaxLocalBufferNewCapacity)) { @@ -1219,111 +1335,202 @@ size_t GrowToNextCapacityDispatch(CommonFields& common, } } +void IncrementSmallSizeNonSoo(CommonFields& common, + const PolicyFunctions& __restrict policy) { + ABSL_SWISSTABLE_ASSERT(common.is_small()); + common.increment_size(); + SanitizerUnpoisonMemoryRegion(common.slot_array(), policy.slot_size); +} + +void IncrementSmallSize(CommonFields& common, + const PolicyFunctions& __restrict policy) { + ABSL_SWISSTABLE_ASSERT(common.is_small()); + if (policy.soo_enabled) { + common.set_full_soo(); + } else { + IncrementSmallSizeNonSoo(common, policy); + } +} + +std::pair Grow1To3AndPrepareInsert( + CommonFields& common, const PolicyFunctions& __restrict policy, + absl::FunctionRef get_hash) { + // TODO(b/413062340): Refactor to reuse more code with + // GrowSooTableToNextCapacityAndPrepareInsert. + ABSL_SWISSTABLE_ASSERT(common.capacity() == 1); + ABSL_SWISSTABLE_ASSERT(!common.empty()); + ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled); + constexpr size_t kOldCapacity = 1; + constexpr size_t kNewCapacity = NextCapacity(kOldCapacity); + ctrl_t* old_ctrl = common.control(); + void* old_slots = common.slot_array(); + + const size_t slot_size = policy.slot_size; + const size_t slot_align = policy.slot_align; + void* alloc = policy.get_char_alloc(common); + HashtablezInfoHandle infoz = common.infoz(); + const bool has_infoz = infoz.IsSampled(); + common.set_capacity(kNewCapacity); + + const auto [new_ctrl, new_slots] = + AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc); + common.set_control(new_ctrl); + common.set_slots(new_slots); + SanitizerPoisonMemoryRegion(new_slots, kNewCapacity * slot_size); + + if (ABSL_PREDICT_TRUE(!has_infoz)) { + // When we're sampled, we already have a seed. + common.generate_new_seed(/*has_infoz=*/false); + } + const size_t new_hash = get_hash(common.seed().seed()); + h2_t new_h2 = H2(new_hash); + size_t orig_hash = + policy.hash_slot(policy.hash_fn(common), old_slots, common.seed().seed()); + size_t offset = Resize1To3NewOffset(new_hash, common.seed()); + InitializeThreeElementsControlBytes(H2(orig_hash), new_h2, offset, new_ctrl); + + void* old_element_target = NextSlot(new_slots, slot_size); + SanitizerUnpoisonMemoryRegion(old_element_target, slot_size); + policy.transfer_n(&common, old_element_target, old_slots, 1); + + void* new_element_target_slot = SlotAddress(new_slots, offset, slot_size); + SanitizerUnpoisonMemoryRegion(new_element_target_slot, slot_size); + + policy.dealloc(alloc, kOldCapacity, old_ctrl, slot_size, slot_align, + has_infoz); + PrepareInsertCommon(common); + ABSL_SWISSTABLE_ASSERT(common.size() == 2); + GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2); + + if (ABSL_PREDICT_FALSE(has_infoz)) { + ReportSingleGroupTableGrowthToInfoz(common, infoz, new_hash); + } + return {new_ctrl + offset, new_element_target_slot}; +} + // Grows to next capacity and prepares insert for the given new_hash. // Returns the offset of the new element. -size_t GrowToNextCapacityAndPrepareInsert(CommonFields& common, - const PolicyFunctions& policy, - size_t new_hash) { +size_t GrowToNextCapacityAndPrepareInsert( + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t new_hash) { ABSL_SWISSTABLE_ASSERT(common.growth_left() == 0); const size_t old_capacity = common.capacity(); - ABSL_SWISSTABLE_ASSERT(old_capacity == 0 || - old_capacity > policy.soo_capacity()); + ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity()); + ABSL_SWISSTABLE_ASSERT(!IsSmallCapacity(old_capacity)); const size_t new_capacity = NextCapacity(old_capacity); - ABSL_SWISSTABLE_ASSERT(IsValidCapacity(new_capacity)); - ABSL_SWISSTABLE_ASSERT(new_capacity > policy.soo_capacity()); - ctrl_t* old_ctrl = common.control(); void* old_slots = common.slot_array(); common.set_capacity(new_capacity); const size_t slot_size = policy.slot_size; const size_t slot_align = policy.slot_align; - HashtablezInfoHandle infoz; - if (old_capacity > 0) { - infoz = common.infoz(); - } else { - const bool should_sample = - policy.is_hashtablez_eligible && ShouldSampleNextTable(); - if (ABSL_PREDICT_FALSE(should_sample)) { - infoz = ForcedTrySample(slot_size, policy.key_size, policy.value_size, - policy.soo_capacity()); - } - } - const bool has_infoz = infoz.IsSampled(); - - RawHashSetLayout layout(new_capacity, slot_size, slot_align, has_infoz); void* alloc = policy.get_char_alloc(common); - char* mem = static_cast(policy.alloc(alloc, layout.alloc_size())); - const GenerationType old_generation = common.generation(); - common.set_generation_ptr( - reinterpret_cast(mem + layout.generation_offset())); - common.set_generation(NextGeneration(old_generation)); + HashtablezInfoHandle infoz = common.infoz(); + const bool has_infoz = infoz.IsSampled(); - ctrl_t* new_ctrl = reinterpret_cast(mem + layout.control_offset()); - void* new_slots = mem + layout.slot_offset(); - common.set_control(new_ctrl); + const auto [new_ctrl, new_slots] = + AllocBackingArray(common, policy, new_capacity, has_infoz, alloc); + common.set_control(new_ctrl); common.set_slots(new_slots); SanitizerPoisonMemoryRegion(new_slots, new_capacity * slot_size); h2_t new_h2 = H2(new_hash); size_t total_probe_length = 0; FindInfo find_info; - if (old_capacity == 0) { - static_assert(NextCapacity(0) == 1); - InitializeSingleElementControlBytes(new_h2, new_ctrl); - common.generate_new_seed(); - find_info = FindInfo{0, 0}; - SanitizerUnpoisonMemoryRegion(new_slots, slot_size); + if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) { + size_t offset; + GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl, + new_capacity); + // We put the new element either at the beginning or at the end of the + // table with approximately equal probability. + offset = + SingleGroupTableH1(new_hash, common.seed()) & 1 ? 0 : new_capacity - 1; + + ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset])); + SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size); + find_info = FindInfo{offset, 0}; + // Single group tables have all slots full on resize. So we can transfer + // all slots without checking the control bytes. + ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity); + void* target = NextSlot(new_slots, slot_size); + SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size); + policy.transfer_n(&common, target, old_slots, old_capacity); } else { - if (ABSL_PREDICT_TRUE(is_single_group(new_capacity))) { - GrowIntoSingleGroupShuffleControlBytes(old_ctrl, old_capacity, new_ctrl, - new_capacity); - // Single group tables have all slots full on resize. So we can transfer - // all slots without checking the control bytes. - ABSL_SWISSTABLE_ASSERT(common.size() == old_capacity); - auto* target = NextSlot(new_slots, slot_size); - SanitizerUnpoisonMemoryRegion(target, old_capacity * slot_size); - policy.transfer_n(&common, target, old_slots, old_capacity); - // We put the new element either at the beginning or at the end of the - // table with approximately equal probability. - size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 1 - ? 0 - : new_capacity - 1; - - ABSL_SWISSTABLE_ASSERT(IsEmpty(new_ctrl[offset])); - SetCtrlInSingleGroupTable(common, offset, new_h2, policy.slot_size); - find_info = FindInfo{offset, 0}; - } else { - total_probe_length = - GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots); - find_info = find_first_non_full(common, new_hash); - SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size); - } - ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity()); - (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align, - has_infoz); + total_probe_length = + GrowToNextCapacityDispatch(common, policy, old_ctrl, old_slots); + find_info = find_first_non_full(common, new_hash); + SetCtrlInLargeTable(common, find_info.offset, new_h2, policy.slot_size); } + ABSL_SWISSTABLE_ASSERT(old_capacity > policy.soo_capacity()); + (*policy.dealloc)(alloc, old_capacity, old_ctrl, slot_size, slot_align, + has_infoz); PrepareInsertCommon(common); ResetGrowthLeft(GetGrowthInfoFromControl(new_ctrl), new_capacity, common.size()); if (ABSL_PREDICT_FALSE(has_infoz)) { - common.set_has_infoz(); - infoz.RecordStorageChanged(common.size() - 1, new_capacity); - infoz.RecordRehash(total_probe_length); - infoz.RecordInsert(new_hash, find_info.probe_length); - common.set_infoz(infoz); + ReportGrowthToInfoz(common, infoz, new_hash, total_probe_length, + find_info.probe_length); } return find_info.offset; } +} // namespace + +std::pair PrepareInsertSmallNonSoo( + CommonFields& common, const PolicyFunctions& __restrict policy, + absl::FunctionRef get_hash) { + ABSL_SWISSTABLE_ASSERT(common.is_small()); + ABSL_SWISSTABLE_ASSERT(!policy.soo_enabled); + if (common.capacity() == 1) { + if (common.empty()) { + IncrementSmallSizeNonSoo(common, policy); + return {SooControl(), common.slot_array()}; + } else { + return Grow1To3AndPrepareInsert(common, policy, get_hash); + } + } + + // Growing from 0 to 1 capacity. + ABSL_SWISSTABLE_ASSERT(common.capacity() == 0); + constexpr size_t kNewCapacity = 1; + + common.set_capacity(kNewCapacity); + HashtablezInfoHandle infoz; + const bool should_sample = + policy.is_hashtablez_eligible && ShouldSampleNextTable(); + if (ABSL_PREDICT_FALSE(should_sample)) { + infoz = ForcedTrySample(policy.slot_size, policy.key_size, + policy.value_size, policy.soo_capacity()); + } + const bool has_infoz = infoz.IsSampled(); + void* alloc = policy.get_char_alloc(common); + + const auto [new_ctrl, new_slots] = + AllocBackingArray(common, policy, kNewCapacity, has_infoz, alloc); + common.set_control(new_ctrl); + common.set_slots(new_slots); + + static_assert(NextCapacity(0) == 1); + PrepareInsertCommon(common); + + if (ABSL_PREDICT_FALSE(has_infoz)) { + common.generate_new_seed(/*has_infoz=*/true); + ReportSingleGroupTableGrowthToInfoz(common, infoz, + get_hash(common.seed().seed())); + } + return {SooControl(), new_slots}; +} + +namespace { + // Called whenever the table needs to vacate empty slots either by removing // tombstones via rehash or growth to next capacity. ABSL_ATTRIBUTE_NOINLINE -size_t RehashOrGrowToNextCapacityAndPrepareInsert(CommonFields& common, - const PolicyFunctions& policy, - size_t new_hash) { +size_t RehashOrGrowToNextCapacityAndPrepareInsert( + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t new_hash) { const size_t cap = common.capacity(); ABSL_ASSUME(cap > 0); if (cap > Group::kWidth && @@ -1377,10 +1584,11 @@ size_t RehashOrGrowToNextCapacityAndPrepareInsert(CommonFields& common, } } -// Slow path for PrepareInsertNonSoo that is called when the table has deleted +// Slow path for PrepareInsertLarge that is called when the table has deleted // slots or need to be resized or rehashed. -size_t PrepareInsertNonSooSlow(CommonFields& common, - const PolicyFunctions& policy, size_t hash) { +size_t PrepareInsertLargeSlow(CommonFields& common, + const PolicyFunctions& __restrict policy, + size_t hash) { const GrowthInfo growth_info = common.growth_info(); ABSL_SWISSTABLE_ASSERT(!growth_info.HasNoDeletedAndGrowthLeft()); if (ABSL_PREDICT_TRUE(growth_info.HasNoGrowthLeftAndNoDeleted())) { @@ -1402,7 +1610,6 @@ size_t PrepareInsertNonSooSlow(CommonFields& common, return target.offset; } - // Resizes empty non-allocated SOO table to NextCapacity(SooCapacity()), // forces the table to be sampled and prepares the insert. // SOO tables need to switch from SOO to heap in order to store the infoz. @@ -1411,11 +1618,13 @@ size_t PrepareInsertNonSooSlow(CommonFields& common, // 2. `c.empty()`. ABSL_ATTRIBUTE_NOINLINE size_t GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert( - CommonFields& common, const PolicyFunctions& policy, size_t new_hash) { + CommonFields& common, const PolicyFunctions& __restrict policy, + absl::FunctionRef get_hash) { ResizeEmptyNonAllocatedTableImpl(common, policy, NextCapacity(SooCapacity()), /*force_infoz=*/true); PrepareInsertCommon(common); common.growth_info().OverwriteEmptyAsFull(); + const size_t new_hash = get_hash(common.seed().seed()); SetCtrlInSingleGroupTable(common, SooSlotIndex(), H2(new_hash), policy.slot_size); common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0); @@ -1428,9 +1637,9 @@ GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert( // 2. `c.empty()`. // 3. `new_size > policy.soo_capacity()`. // The table will be attempted to be sampled. -void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common, - const PolicyFunctions& policy, - size_t new_size) { +void ReserveEmptyNonAllocatedTableToFitNewSize( + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t new_size) { ValidateMaxSize(new_size, policy.slot_size); ABSL_ASSUME(new_size > 0); ResizeEmptyNonAllocatedTableImpl(common, policy, SizeToCapacity(new_size), @@ -1447,7 +1656,8 @@ void ReserveEmptyNonAllocatedTableToFitNewSize(CommonFields& common, // 1. `c.capacity() > policy.soo_capacity()` OR `!c.empty()`. // Reserving already allocated tables is considered to be a rare case. ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable( - CommonFields& common, const PolicyFunctions& policy, size_t new_size) { + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t new_size) { const size_t cap = common.capacity(); ValidateMaxSize(new_size, policy.slot_size); ABSL_ASSUME(new_size > 0); @@ -1464,6 +1674,17 @@ ABSL_ATTRIBUTE_NOINLINE void ReserveAllocatedTable( common.infoz().RecordReservation(new_size); } +// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO +// table to be sampled. SOO tables need to switch from SOO to heap in order to +// store the infoz. No-op if sampling is disabled or not possible. +void GrowFullSooTableToNextCapacityForceSampling( + CommonFields& common, const PolicyFunctions& __restrict policy) { + AssertFullSoo(common, policy); + ResizeFullSooTable( + common, policy, NextCapacity(SooCapacity()), + ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled); +} + } // namespace void* GetRefForEmptyClass(CommonFields& common) { @@ -1474,15 +1695,16 @@ void* GetRefForEmptyClass(CommonFields& common) { return &common; } -void ResizeAllocatedTableWithSeedChange(CommonFields& common, - const PolicyFunctions& policy, - size_t new_capacity) { +void ResizeAllocatedTableWithSeedChange( + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t new_capacity) { ResizeNonSooImpl( common, policy, new_capacity, common.infoz()); } void ReserveEmptyNonAllocatedTableToFitBucketCount( - CommonFields& common, const PolicyFunctions& policy, size_t bucket_count) { + CommonFields& common, const PolicyFunctions& __restrict policy, + size_t bucket_count) { size_t new_capacity = NormalizeCapacity(bucket_count); ValidateMaxSize(CapacityToGrowth(new_capacity), policy.slot_size); ResizeEmptyNonAllocatedTableImpl(common, policy, new_capacity, @@ -1491,49 +1713,40 @@ void ReserveEmptyNonAllocatedTableToFitBucketCount( // Resizes a full SOO table to the NextCapacity(SooCapacity()). template -size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common, - const PolicyFunctions& policy, - size_t new_hash, - ctrl_t soo_slot_ctrl) { +size_t GrowSooTableToNextCapacityAndPrepareInsert( + CommonFields& common, const PolicyFunctions& __restrict policy, + absl::FunctionRef get_hash, bool force_sampling) { AssertSoo(common, policy); - if (ABSL_PREDICT_FALSE(soo_slot_ctrl == ctrl_t::kEmpty)) { + if (ABSL_PREDICT_FALSE(force_sampling)) { // The table is empty, it is only used for forced sampling of SOO tables. return GrowEmptySooTableToNextCapacityForceSamplingAndPrepareInsert( - common, policy, new_hash); + common, policy, get_hash); } ABSL_SWISSTABLE_ASSERT(common.size() == policy.soo_capacity()); static constexpr size_t kNewCapacity = NextCapacity(SooCapacity()); const size_t slot_size = policy.slot_size; - const size_t slot_align = policy.slot_align; + void* alloc = policy.get_char_alloc(common); common.set_capacity(kNewCapacity); // Since the table is not empty, it will not be sampled. // The decision to sample was already made during the first insertion. - RawHashSetLayout layout(kNewCapacity, slot_size, slot_align, - /*has_infoz=*/false); - void* alloc = policy.get_char_alloc(common); - char* mem = static_cast(policy.alloc(alloc, layout.alloc_size())); - const GenerationType old_generation = common.generation(); - common.set_generation_ptr( - reinterpret_cast(mem + layout.generation_offset())); - common.set_generation(NextGeneration(old_generation)); - + // // We do not set control and slots in CommonFields yet to avoid overriding // SOO data. - ctrl_t* new_ctrl = reinterpret_cast(mem + layout.control_offset()); - void* new_slots = mem + layout.slot_offset(); + const auto [new_ctrl, new_slots] = AllocBackingArray( + common, policy, kNewCapacity, /*has_infoz=*/false, alloc); PrepareInsertCommon(common); ABSL_SWISSTABLE_ASSERT(common.size() == 2); GetGrowthInfoFromControl(new_ctrl).InitGrowthLeftNoDeleted(kNewCapacity - 2); - common.generate_new_seed(); + common.generate_new_seed(/*has_infoz=*/false); + const h2_t soo_slot_h2 = H2(policy.hash_slot( + policy.hash_fn(common), common.soo_data(), common.seed().seed())); + const size_t new_hash = get_hash(common.seed().seed()); - // After resize from capacity 1 to 3, we always have exactly the slot with - // index 1 occupied, so we need to insert either at index 0 or index 2. - static_assert(SooSlotIndex() == 1); - const size_t offset = SingleGroupTableH1(new_hash, common.seed()) & 2; - InitializeThreeElementsControlBytesAfterSoo(soo_slot_ctrl, new_hash, offset, - new_ctrl); + const size_t offset = Resize1To3NewOffset(new_hash, common.seed()); + InitializeThreeElementsControlBytes(soo_slot_h2, H2(new_hash), offset, + new_ctrl); SanitizerPoisonMemoryRegion(new_slots, slot_size * kNewCapacity); void* target_slot = SlotAddress(new_slots, SooSlotIndex(), slot_size); @@ -1555,25 +1768,19 @@ size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common, static_assert(SooSlotMemcpySize == 0); policy.transfer_n(&common, target_slot, common.soo_data(), 1); } - // Seed was already generated above. - common.set_control(new_ctrl); + common.set_control(new_ctrl); common.set_slots(new_slots); - common.infoz().RecordInsert(new_hash, /*distance_from_desired=*/0); + // Full SOO table couldn't be sampled. If SOO table is sampled, it would + // have been resized to the next capacity. + ABSL_SWISSTABLE_ASSERT(!common.infoz().IsSampled()); SanitizerUnpoisonMemoryRegion(SlotAddress(new_slots, offset, slot_size), slot_size); return offset; } -void GrowFullSooTableToNextCapacityForceSampling( - CommonFields& common, const PolicyFunctions& policy) { - AssertFullSoo(common, policy); - ResizeFullSooTable( - common, policy, NextCapacity(SooCapacity()), - ResizeFullSooTableSamplingMode::kForceSampleNoResizeIfUnsampled); -} - -void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n) { +void Rehash(CommonFields& common, const PolicyFunctions& __restrict policy, + size_t n) { const size_t cap = common.capacity(); auto clear_backing_array = [&]() { @@ -1604,7 +1811,7 @@ void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n) { } ABSL_SWISSTABLE_ASSERT(slot_size <= sizeof(HeapOrSoo)); ABSL_SWISSTABLE_ASSERT(policy.slot_align <= alignof(HeapOrSoo)); - HeapOrSoo tmp_slot(uninitialized_tag_t{}); + HeapOrSoo tmp_slot; size_t begin_offset = FindFirstFullSlot(0, cap, common.control()); policy.transfer_n( &common, &tmp_slot, @@ -1640,26 +1847,29 @@ void Rehash(CommonFields& common, const PolicyFunctions& policy, size_t n) { } } -void Copy(CommonFields& common, const PolicyFunctions& policy, +void Copy(CommonFields& common, const PolicyFunctions& __restrict policy, const CommonFields& other, absl::FunctionRef copy_fn) { const size_t size = other.size(); ABSL_SWISSTABLE_ASSERT(size > 0); const size_t soo_capacity = policy.soo_capacity(); const size_t slot_size = policy.slot_size; - if (size <= soo_capacity) { - ABSL_SWISSTABLE_ASSERT(size == 1); - common.set_full_soo(); + const bool soo_enabled = policy.soo_enabled; + if (size == 1) { + if (!soo_enabled) ReserveTableToFitNewSize(common, policy, 1); + IncrementSmallSize(common, policy); + const size_t other_capacity = other.capacity(); const void* other_slot = - other.capacity() <= soo_capacity - ? other.soo_data() - : SlotAddress( - other.slot_array(), - FindFirstFullSlot(0, other.capacity(), other.control()), - slot_size); - copy_fn(common.soo_data(), other_slot); - - if (policy.is_hashtablez_eligible && ShouldSampleNextTable()) { + other_capacity <= soo_capacity ? other.soo_data() + : other.is_small() + ? other.slot_array() + : SlotAddress(other.slot_array(), + FindFirstFullSlot(0, other_capacity, other.control()), + slot_size); + copy_fn(soo_enabled ? common.soo_data() : common.slot_array(), other_slot); + + if (soo_enabled && policy.is_hashtablez_eligible && + ShouldSampleNextTable()) { GrowFullSooTableToNextCapacityForceSampling(common, policy); } return; @@ -1670,53 +1880,29 @@ void Copy(CommonFields& common, const PolicyFunctions& policy, ABSL_SWISSTABLE_ASSERT(other.capacity() > soo_capacity); const size_t cap = common.capacity(); ABSL_SWISSTABLE_ASSERT(cap > soo_capacity); - // Note about single group tables: - // 1. It is correct to have any order of elements. - // 2. Order has to be non deterministic. - // 3. We are assigning elements with arbitrary `shift` starting from - // `capacity + shift` position. - // 4. `shift` must be coprime with `capacity + 1` in order to be able to use - // modular arithmetic to traverse all positions, instead of cycling - // through a subset of positions. Odd numbers are coprime with any - // `capacity + 1` (2^N). size_t offset = cap; - const size_t shift = is_single_group(cap) ? (common.seed().seed() | 1) : 0; const void* hash_fn = policy.hash_fn(common); auto hasher = policy.hash_slot; + const size_t seed = common.seed().seed(); IterateOverFullSlotsImpl( - other, slot_size, [&](const ctrl_t* that_ctrl, void* that_slot) { - if (shift == 0) { - // Big tables case. Position must be searched via probing. - // The table is guaranteed to be empty, so we can do faster than - // a full `insert`. - const size_t hash = (*hasher)(hash_fn, that_slot); - FindInfo target = find_first_non_full(common, hash); - infoz.RecordInsert(hash, target.probe_length); - offset = target.offset; - } else { - // Small tables case. Next position is computed via shift. - offset = (offset + shift) & cap; - } - const h2_t h2 = static_cast(*that_ctrl); - // We rely on the hash not changing for small tables. - ABSL_SWISSTABLE_ASSERT( - H2((*hasher)(hash_fn, that_slot)) == h2 && - "hash function value changed unexpectedly during the copy"); - SetCtrl(common, offset, h2, slot_size); + other, slot_size, [&](const ctrl_t*, void* that_slot) { + // The table is guaranteed to be empty, so we can do faster than + // a full `insert`. + const size_t hash = (*hasher)(hash_fn, that_slot, seed); + FindInfo target = find_first_non_full(common, hash); + infoz.RecordInsert(hash, target.probe_length); + offset = target.offset; + SetCtrl(common, offset, H2(hash), slot_size); copy_fn(SlotAddress(common.slot_array(), offset, slot_size), that_slot); common.maybe_increment_generation_on_insert(); }); - if (shift != 0) { - // On small table copy we do not record individual inserts. - // RecordInsert requires hash, but it is unknown for small tables. - infoz.RecordStorageChanged(size, cap); - } common.increment_size(size); common.growth_info().OverwriteManyEmptyAsFull(size); } void ReserveTableToFitNewSize(CommonFields& common, - const PolicyFunctions& policy, size_t new_size) { + const PolicyFunctions& __restrict policy, + size_t new_size) { common.reset_reserved_growth(new_size); common.set_reservation_size(new_size); ABSL_SWISSTABLE_ASSERT(new_size > policy.soo_capacity()); @@ -1728,34 +1914,24 @@ void ReserveTableToFitNewSize(CommonFields& common, ABSL_SWISSTABLE_ASSERT(!common.empty() || cap > policy.soo_capacity()); ABSL_SWISSTABLE_ASSERT(cap > 0); const size_t max_size_before_growth = - cap <= policy.soo_capacity() ? policy.soo_capacity() - : common.size() + common.growth_left(); + IsSmallCapacity(cap) ? cap : common.size() + common.growth_left(); if (new_size <= max_size_before_growth) { return; } ReserveAllocatedTable(common, policy, new_size); } -size_t PrepareInsertNonSoo(CommonFields& common, const PolicyFunctions& policy, - size_t hash, FindInfo target) { - const bool rehash_for_bug_detection = - common.should_rehash_for_bug_detection_on_insert() && - // Required to allow use of ResizeAllocatedTable. - common.capacity() > 0; - if (rehash_for_bug_detection) { - // Move to a different heap allocation in order to detect bugs. - const size_t cap = common.capacity(); - ResizeAllocatedTableWithSeedChange( - common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap)); - target = find_first_non_full(common, hash); - } - +namespace { +size_t PrepareInsertLargeImpl(CommonFields& common, + const PolicyFunctions& __restrict policy, + size_t hash, FindInfo target) { + ABSL_SWISSTABLE_ASSERT(!common.is_small()); const GrowthInfo growth_info = common.growth_info(); // When there are no deleted slots in the table // and growth_left is positive, we can insert at the first // empty slot in the probe sequence (target). if (ABSL_PREDICT_FALSE(!growth_info.HasNoDeletedAndGrowthLeft())) { - return PrepareInsertNonSooSlow(common, policy, hash); + return PrepareInsertLargeSlow(common, policy, hash); } PrepareInsertCommon(common); common.growth_info().OverwriteEmptyAsFull(); @@ -1763,6 +1939,31 @@ size_t PrepareInsertNonSoo(CommonFields& common, const PolicyFunctions& policy, common.infoz().RecordInsert(hash, target.probe_length); return target.offset; } +} // namespace + +size_t PrepareInsertLarge(CommonFields& common, + const PolicyFunctions& __restrict policy, size_t hash, + FindInfo target) { + // NOLINTNEXTLINE(misc-static-assert) + ABSL_SWISSTABLE_ASSERT(!SwisstableGenerationsEnabled()); + return PrepareInsertLargeImpl(common, policy, hash, target); +} + +size_t PrepareInsertLargeGenerationsEnabled( + CommonFields& common, const PolicyFunctions& policy, size_t hash, + FindInfo target, absl::FunctionRef recompute_hash) { + // NOLINTNEXTLINE(misc-static-assert) + ABSL_SWISSTABLE_ASSERT(SwisstableGenerationsEnabled()); + if (common.should_rehash_for_bug_detection_on_insert()) { + // Move to a different heap allocation in order to detect bugs. + const size_t cap = common.capacity(); + ResizeAllocatedTableWithSeedChange( + common, policy, common.growth_left() > 0 ? cap : NextCapacity(cap)); + hash = recompute_hash(common.seed().seed()); + target = find_first_non_full(common, hash); + } + return PrepareInsertLargeImpl(common, policy, hash, target); +} namespace { // Returns true if the following is true @@ -1798,32 +1999,33 @@ template size_t TryFindNewIndexWithoutProbing(size_t h1, size_t old_index, // We need to instantiate ALL possible template combinations because we define // the function in the cc file. template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>( - CommonFields&, const PolicyFunctions&, size_t, ctrl_t); + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); template size_t GrowSooTableToNextCapacityAndPrepareInsert< - OptimalMemcpySizeForSooSlotTransfer(1), true>(CommonFields&, - const PolicyFunctions&, - size_t, ctrl_t); + OptimalMemcpySizeForSooSlotTransfer(1), true>( + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(2, 3)); template size_t GrowSooTableToNextCapacityAndPrepareInsert< - OptimalMemcpySizeForSooSlotTransfer(3), true>(CommonFields&, - const PolicyFunctions&, - size_t, ctrl_t); + OptimalMemcpySizeForSooSlotTransfer(3), true>( + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(4, 8)); template size_t GrowSooTableToNextCapacityAndPrepareInsert< - OptimalMemcpySizeForSooSlotTransfer(8), true>(CommonFields&, - const PolicyFunctions&, - size_t, ctrl_t); + OptimalMemcpySizeForSooSlotTransfer(8), true>( + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); #if UINTPTR_MAX == UINT32_MAX static_assert(MaxSooSlotSize() == 8); #else static_assert(VerifyOptimalMemcpySizeForSooSlotTransferRange(9, 16)); template size_t GrowSooTableToNextCapacityAndPrepareInsert< - OptimalMemcpySizeForSooSlotTransfer(16), true>(CommonFields&, - const PolicyFunctions&, - size_t, ctrl_t); + OptimalMemcpySizeForSooSlotTransfer(16), true>( + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); static_assert(MaxSooSlotSize() == 16); #endif diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 512c94624bc..9f62700785a 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -150,11 +150,11 @@ // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and // perform a `find` to see if it's already present; if it is, we're done. If // it's not, we may decide the table is getting overcrowded (i.e. the load -// factor is greater than 7/8 for big tables; `is_small()` tables use a max load -// factor of 1); in this case, we allocate a bigger array, `unchecked_insert` -// each element of the table into the new array (we know that no insertion here -// will insert an already-present value), and discard the old backing array. At -// this point, we may `unchecked_insert` the value `x`. +// factor is greater than 7/8 for big tables; tables smaller than one probing +// group use a max load factor of 1); in this case, we allocate a bigger array, +// `unchecked_insert` each element of the table into the new array (we know that +// no insertion here will insert an already-present value), and discard the old +// backing array. At this point, we may `unchecked_insert` the value `x`. // // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which // presents a viable, initialized slot pointee to the caller. @@ -194,6 +194,7 @@ #include #include "absl/base/attributes.h" +#include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/iterator_traits.h" @@ -282,10 +283,8 @@ void SwapAlloc(AllocType& lhs, AllocType& rhs, swap(lhs, rhs); } template -void SwapAlloc(AllocType& lhs, AllocType& rhs, +void SwapAlloc([[maybe_unused]] AllocType& lhs, [[maybe_unused]] AllocType& rhs, std::false_type /* propagate_on_container_swap */) { - (void)lhs; - (void)rhs; assert(lhs == rhs && "It's UB to call swap with unequal non-propagating allocators."); } @@ -369,19 +368,7 @@ struct IsDecomposable< std::declval()...))>, Policy, Hash, Eq, Ts...> : std::true_type {}; -// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. -template -constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { - using std::swap; - return noexcept(swap(std::declval(), std::declval())); -} -template -constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { - return false; -} - -// See definition comment for why this is size 32. -ABSL_DLL extern const ctrl_t kEmptyGroup[32]; +ABSL_DLL extern ctrl_t kDefaultIterControl; // We use these sentinel capacity values in debug mode to indicate different // classes of bugs. @@ -395,17 +382,14 @@ enum InvalidCapacity : size_t { kSelfMovedFrom, }; -// Returns a pointer to a control byte group that can be used by empty tables. -inline ctrl_t* EmptyGroup() { - // Const must be cast away here; no uses of this function will actually write - // to it because it is only used for empty tables. - return const_cast(kEmptyGroup + 16); -} +// Returns a pointer to a control byte that can be used by default-constructed +// iterators. We don't expect this pointer to be dereferenced. +inline ctrl_t* DefaultIterControl() { return &kDefaultIterControl; } // For use in SOO iterators. // TODO(b/289225379): we could potentially get rid of this by adding an is_soo // bit in iterators. This would add branches but reduce cache misses. -ABSL_DLL extern const ctrl_t kSooControl[17]; +ABSL_DLL extern const ctrl_t kSooControl[2]; // Returns a pointer to a full byte followed by a sentinel byte. inline ctrl_t* SooControl() { @@ -452,18 +436,29 @@ class PerTableSeed { // The number of bits in the seed. // It is big enough to ensure non-determinism of iteration order. // We store the seed inside a uint64_t together with size and other metadata. - // Using 16 bits allows us to save one `and` instruction in H1 (we use movzwl - // instead of movq+and). + // Using 16 bits allows us to save one `and` instruction in H1 (we use + // sign-extended move instead of mov+and). static constexpr size_t kBitCount = 16; + static constexpr size_t kSignBit = uint64_t{1} << (kBitCount - 1); - // Returns the seed for the table. Only the lowest kBitCount are non zero. - size_t seed() const { return seed_; } + // Returns the seed for the table. + size_t seed() const { + // We use a sign-extended load to ensure high bits are non-zero. + int16_t seed_signed = absl::bit_cast(seed_); + auto seed_sign_extended = + static_cast>(seed_signed); + return absl::bit_cast(seed_sign_extended); + } private: friend class HashtableSize; - explicit PerTableSeed(size_t seed) : seed_(seed) {} + explicit PerTableSeed(uint16_t seed) : seed_(seed) { + ABSL_SWISSTABLE_ASSERT((seed & kSignBit) != 0 || seed == 0); + } - const size_t seed_; + // The most significant bit of the seed is always 1 when there is a non-zero + // seed. This way, when sign-extended the seed has non-zero high bits. + const uint16_t seed_; }; // Returns next per-table seed. @@ -502,8 +497,14 @@ class HashtableSize { return PerTableSeed(static_cast(data_) & kSeedMask); } - void generate_new_seed() { - data_ = (data_ & ~kSeedMask) ^ uint64_t{NextSeed()}; + void generate_new_seed() { set_seed(NextSeed()); } + + // We need to use a constant seed when the table is sampled so that sampled + // hashes use the same seed and can e.g. identify stuck bits accurately. + void set_sampled_seed() { set_seed(PerTableSeed::kSignBit); } + + bool is_sampled_seed() const { + return (data_ & kSeedMask) == PerTableSeed::kSignBit; } // Returns true if the table has infoz. @@ -517,6 +518,9 @@ class HashtableSize { void set_no_seed_for_testing() { data_ &= ~kSeedMask; } private: + void set_seed(uint16_t seed) { + data_ = (data_ & ~kSeedMask) | (seed | PerTableSeed::kSignBit); + } static constexpr size_t kSizeShift = 64 - kSizeBitCount; static constexpr uint64_t kSizeOneNoMetadata = uint64_t{1} << kSizeShift; static constexpr uint64_t kMetadataMask = kSizeOneNoMetadata - 1; @@ -527,15 +531,13 @@ class HashtableSize { uint64_t data_; }; -// Extracts the H1 portion of a hash: 57 bits mixed with a per-table seed. -inline size_t H1(size_t hash, PerTableSeed seed) { - return (hash >> 7) ^ seed.seed(); -} +// H1 is just the low bits of the hash. +inline size_t H1(size_t hash) { return hash; } -// Extracts the H2 portion of a hash: the 7 bits not used for H1. +// Extracts the H2 portion of a hash: the 7 most significant bits. // // These are used as an occupied control byte. -inline h2_t H2(size_t hash) { return hash & 0x7F; } +inline h2_t H2(size_t hash) { return hash >> (sizeof(size_t) * 8 - 7); } // When there is an insertion with no reserved growth, we rehash with // probability `min(1, RehashProbabilityConstant() / capacity())`. Using a @@ -571,11 +573,9 @@ class CommonFieldsGenerationInfoEnabled { // references. We rehash on the first insertion after reserved_growth_ reaches // 0 after a call to reserve. We also do a rehash with low probability // whenever reserved_growth_ is zero. - bool should_rehash_for_bug_detection_on_insert(PerTableSeed seed, - size_t capacity) const; + bool should_rehash_for_bug_detection_on_insert(size_t capacity) const; // Similar to above, except that we don't depend on reserved_growth_. - bool should_rehash_for_bug_detection_on_move(PerTableSeed seed, - size_t capacity) const; + bool should_rehash_for_bug_detection_on_move(size_t capacity) const; void maybe_increment_generation_on_insert() { if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0; @@ -628,12 +628,8 @@ class CommonFieldsGenerationInfoDisabled { CommonFieldsGenerationInfoDisabled& operator=( CommonFieldsGenerationInfoDisabled&&) = default; - bool should_rehash_for_bug_detection_on_insert(PerTableSeed, size_t) const { - return false; - } - bool should_rehash_for_bug_detection_on_move(PerTableSeed, size_t) const { - return false; - } + bool should_rehash_for_bug_detection_on_insert(size_t) const { return false; } + bool should_rehash_for_bug_detection_on_move(size_t) const { return false; } void maybe_increment_generation_on_insert() {} void increment_generation() {} void reset_reserved_growth(size_t, size_t) {} @@ -781,6 +777,9 @@ static_assert(alignof(GrowthInfo) == alignof(size_t), ""); // A valid capacity is a non-zero integer `2^m - 1`. constexpr bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } +// Whether a table is small enough that we don't need to hash any keys. +constexpr bool IsSmallCapacity(size_t capacity) { return capacity <= 1; } + // Returns the number of "cloned control bytes". // // This is the number of control bytes that are present both at the beginning @@ -790,7 +789,7 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } // Returns the number of control bytes including cloned. constexpr size_t NumControlBytes(size_t capacity) { - return capacity + 1 + NumClonedBytes(); + return IsSmallCapacity(capacity) ? 0 : capacity + 1 + NumClonedBytes(); } // Computes the offset from the start of the backing allocation of control. @@ -808,6 +807,9 @@ constexpr size_t AlignUpTo(size_t offset, size_t align) { // Helper class for computing offsets and allocation size of hash set fields. class RawHashSetLayout { public: + // TODO(b/413062340): maybe don't allocate growth info for capacity 1 tables. + // Doing so may require additional branches/complexity so it might not be + // worth it. explicit RawHashSetLayout(size_t capacity, size_t slot_size, size_t slot_align, bool has_infoz) : control_offset_(ControlOffset(has_infoz)), @@ -846,50 +848,30 @@ class RawHashSetLayout { struct HashtableFreeFunctionsAccess; -// Suppress erroneous uninitialized memory errors on GCC. For example, GCC -// thinks that the call to slot_array() in find_or_prepare_insert() is reading -// uninitialized memory, but slot_array is only called there when the table is -// non-empty and this memory is initialized when the table is non-empty. -#if !defined(__clang__) && defined(__GNUC__) -#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) \ - _Pragma("GCC diagnostic push") \ - _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") \ - _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") x; \ - _Pragma("GCC diagnostic pop") -#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) \ - ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(return x) -#else -#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(x) x -#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(x) return x -#endif - // This allows us to work around an uninitialized memory warning when // constructing begin() iterators in empty hashtables. +template union MaybeInitializedPtr { - void* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); } - void set(void* ptr) { p = ptr; } + T* get() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(p); } + void set(T* ptr) { p = ptr; } - void* p; + T* p; }; struct HeapPtrs { - explicit HeapPtrs(uninitialized_tag_t) {} - explicit HeapPtrs(ctrl_t* c) : control(c) {} - // The control bytes (and, also, a pointer near to the base of the backing // array). // - // This contains `capacity + 1 + NumClonedBytes()` entries, even - // when the table is empty (hence EmptyGroup). + // This contains `capacity + 1 + NumClonedBytes()` entries. // // Note that growth_info is stored immediately before this pointer. - // May be uninitialized for SOO tables. - ctrl_t* control; + // May be uninitialized for small tables. + MaybeInitializedPtr control; // The beginning of the slots, located at `SlotOffset()` bytes after // `control`. May be uninitialized for empty tables. // Note: we can't use `slots` because Qt defines "slots" as a macro. - MaybeInitializedPtr slot_array; + MaybeInitializedPtr slot_array; }; // Returns the maximum size of the SOO slot. @@ -898,19 +880,16 @@ constexpr size_t MaxSooSlotSize() { return sizeof(HeapPtrs); } // Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo // is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`. union HeapOrSoo { - explicit HeapOrSoo(uninitialized_tag_t) : heap(uninitialized_tag_t{}) {} - explicit HeapOrSoo(ctrl_t* c) : heap(c) {} - - ctrl_t*& control() { + MaybeInitializedPtr& control() { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control); } - ctrl_t* control() const { + MaybeInitializedPtr control() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.control); } - MaybeInitializedPtr& slot_array() { + MaybeInitializedPtr& slot_array() { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array); } - MaybeInitializedPtr slot_array() const { + MaybeInitializedPtr slot_array() const { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(heap.slot_array); } void* get_soo_data() { @@ -939,20 +918,13 @@ inline GrowthInfo& GetGrowthInfoFromControl(ctrl_t* control) { class CommonFields : public CommonFieldsGenerationInfo { public: explicit CommonFields(soo_tag_t) - : capacity_(SooCapacity()), - size_(no_seed_empty_tag_t{}), - heap_or_soo_(uninitialized_tag_t{}) {} + : capacity_(SooCapacity()), size_(no_seed_empty_tag_t{}) {} explicit CommonFields(full_soo_tag_t) - : capacity_(SooCapacity()), - size_(full_soo_tag_t{}), - heap_or_soo_(uninitialized_tag_t{}) {} + : capacity_(SooCapacity()), size_(full_soo_tag_t{}) {} explicit CommonFields(non_soo_tag_t) - : capacity_(0), - size_(no_seed_empty_tag_t{}), - heap_or_soo_(EmptyGroup()) {} + : capacity_(0), size_(no_seed_empty_tag_t{}) {} // For use in swapping. - explicit CommonFields(uninitialized_tag_t) - : size_(uninitialized_tag_t{}), heap_or_soo_(uninitialized_tag_t{}) {} + explicit CommonFields(uninitialized_tag_t) : size_(uninitialized_tag_t{}) {} // Not copyable CommonFields(const CommonFields&) = delete; @@ -979,31 +951,25 @@ class CommonFields : public CommonFieldsGenerationInfo { const void* soo_data() const { return heap_or_soo_.get_soo_data(); } void* soo_data() { return heap_or_soo_.get_soo_data(); } - ctrl_t* control() const { return heap_or_soo_.control(); } - - // When we set the control bytes, we also often want to generate a new seed. - // So we bundle these two operations together to make sure we don't forget to - // generate a new seed. - // The table will be invalidated if - // `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is - // being changed. In such cases, we will need to rehash the table. - template - void set_control(ctrl_t* c) { - heap_or_soo_.control() = c; - if constexpr (kGenerateSeed) { - generate_new_seed(); - } - } - void* backing_array_start() const { - // growth_info (and maybe infoz) is stored before control bytes. - ABSL_SWISSTABLE_ASSERT( - reinterpret_cast(control()) % alignof(size_t) == 0); - return control() - ControlOffset(has_infoz()); + ctrl_t* control() const { + ABSL_SWISSTABLE_ASSERT(capacity() > 0); + // Assume that the control bytes don't alias `this`. + ctrl_t* ctrl = heap_or_soo_.control().get(); + [[maybe_unused]] size_t num_control_bytes = NumControlBytes(capacity()); + ABSL_ASSUME(reinterpret_cast(ctrl + num_control_bytes) <= + reinterpret_cast(this) || + reinterpret_cast(this + 1) <= + reinterpret_cast(ctrl)); + ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(ctrl); } + void set_control(ctrl_t* c) { heap_or_soo_.control().set(c); } + // Note: we can't use slots() because Qt defines "slots" as a macro. void* slot_array() const { return heap_or_soo_.slot_array().get(); } - MaybeInitializedPtr slots_union() const { return heap_or_soo_.slot_array(); } + MaybeInitializedPtr slots_union() const { + return heap_or_soo_.slot_array(); + } void set_slots(void* s) { heap_or_soo_.slot_array().set(s); } // The number of filled slots. @@ -1032,13 +998,20 @@ class CommonFields : public CommonFieldsGenerationInfo { } bool empty() const { return size_.empty(); } - // The seed used for the H1 part of the hash function. + // The seed used for the hash function. PerTableSeed seed() const { return size_.seed(); } - // Generates a new seed for the H1 part of the hash function. - // The table will be invalidated if - // `kGenerateSeed && !empty() && !is_single_group(capacity())` because H1 is - // being changed. In such cases, we will need to rehash the table. - void generate_new_seed() { size_.generate_new_seed(); } + // Generates a new seed the hash function. + // The table will be invalidated if `!empty()` because hash is being changed. + // In such cases, we will need to rehash the table. + void generate_new_seed(bool has_infoz) { + // Note: we can't use has_infoz() here because we set has_infoz later than + // we generate the seed. + if (ABSL_PREDICT_FALSE(has_infoz)) { + size_.set_sampled_seed(); + return; + } + size_.generate_new_seed(); + } void set_no_seed_for_testing() { size_.set_no_seed_for_testing(); } // The total number of available slots. @@ -1049,6 +1022,7 @@ class CommonFields : public CommonFieldsGenerationInfo { c > kAboveMaxValidCapacity); capacity_ = c; } + bool is_small() const { return IsSmallCapacity(capacity_); } // The number of slots we can still fill without needing to rehash. // This is stored in the heap allocation before the control bytes. @@ -1057,6 +1031,7 @@ class CommonFields : public CommonFieldsGenerationInfo { size_t growth_left() const { return growth_info().GetGrowthLeft(); } GrowthInfo& growth_info() { + ABSL_SWISSTABLE_ASSERT(!is_small()); return GetGrowthInfoFromControl(control()); } GrowthInfo growth_info() const { @@ -1064,16 +1039,26 @@ class CommonFields : public CommonFieldsGenerationInfo { } bool has_infoz() const { return size_.has_infoz(); } - void set_has_infoz() { size_.set_has_infoz(); } + void set_has_infoz() { + ABSL_SWISSTABLE_ASSERT(size_.is_sampled_seed()); + size_.set_has_infoz(); + } + + HashtablezInfoHandle* infoz_ptr() const { + // growth_info is stored before control bytes. + ABSL_SWISSTABLE_ASSERT( + reinterpret_cast(control()) % alignof(size_t) == 0); + ABSL_SWISSTABLE_ASSERT(has_infoz()); + return reinterpret_cast( + control() - ControlOffset(/*has_infoz=*/true)); + } HashtablezInfoHandle infoz() { - return has_infoz() - ? *reinterpret_cast(backing_array_start()) - : HashtablezInfoHandle(); + return has_infoz() ? *infoz_ptr() : HashtablezInfoHandle(); } void set_infoz(HashtablezInfoHandle infoz) { ABSL_SWISSTABLE_ASSERT(has_infoz()); - *reinterpret_cast(backing_array_start()) = infoz; + *infoz_ptr() = infoz; } bool should_rehash_for_bug_detection_on_insert() const { @@ -1084,11 +1069,11 @@ class CommonFields : public CommonFieldsGenerationInfo { // will end up rehashing anyways. if (growth_left() == 0) return false; return CommonFieldsGenerationInfo:: - should_rehash_for_bug_detection_on_insert(seed(), capacity()); + should_rehash_for_bug_detection_on_insert(capacity()); } bool should_rehash_for_bug_detection_on_move() const { return CommonFieldsGenerationInfo::should_rehash_for_bug_detection_on_move( - seed(), capacity()); + capacity()); } void reset_reserved_growth(size_t reservation) { CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size()); @@ -1190,30 +1175,32 @@ constexpr size_t NormalizeCapacity(size_t n) { } // General notes on capacity/growth methods below: -// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an -// average of two empty slots per group. -// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. +// - We use 27/32 as maximum load factor. For 16-wide groups, that gives an +// average of 2.5 empty slots per group. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we // never need to probe (the whole table fits in one group) so we don't need a // load factor less than 1. +// - For (capacity+1) == Group::kWidth, growth is capacity - 1 since we need +// at least one empty slot for probing algorithm. +// - For (capacity+1) > Group::kWidth, growth is 27/32*capacity. // Given `capacity`, applies the load factor; i.e., it returns the maximum // number of values we should put into the table before a resizing rehash. constexpr size_t CapacityToGrowth(size_t capacity) { ABSL_SWISSTABLE_ASSERT(IsValidCapacity(capacity)); - // `capacity*7/8` + // `capacity*27/32` if (Group::kWidth == 8 && capacity == 7) { - // x-x/8 does not work when x==7. + // formula does not work when x==7. return 6; } - return capacity - capacity / 8; + return capacity - capacity / 8 - capacity / 32; } // Given `size`, "unapplies" the load factor to find how large the capacity // should be to stay within the load factor. // // For size == 0, returns 0. -// For other values, returns the same as `NormalizeCapacity(size*8/7)`. +// For other values, returns the same as `NormalizeCapacity(size*32/27)`. constexpr size_t SizeToCapacity(size_t size) { if (size == 0) { return 0; @@ -1222,14 +1209,10 @@ constexpr size_t SizeToCapacity(size_t size) { // Shifting right `~size_t{}` by `leading_zeros` yields // NormalizeCapacity(size). int leading_zeros = absl::countl_zero(size); - constexpr size_t kLast3Bits = size_t{7} << (sizeof(size_t) * 8 - 3); - size_t max_size_for_next_capacity = kLast3Bits >> leading_zeros; + size_t next_capacity = ~size_t{} >> leading_zeros; + size_t max_size_for_next_capacity = CapacityToGrowth(next_capacity); // Decrease shift if size is too big for the minimum capacity. leading_zeros -= static_cast(size > max_size_for_next_capacity); - if constexpr (Group::kWidth == 8) { - // Formula doesn't work when size==7 for 8-wide groups. - leading_zeros -= (size == 7); - } return (~size_t{}) >> leading_zeros; } @@ -1267,7 +1250,7 @@ inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation, if (ABSL_PREDICT_FALSE(ctrl == nullptr)) { ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation); } - if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) { + if (ABSL_PREDICT_FALSE(ctrl == DefaultIterControl())) { ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.", operation); } @@ -1302,7 +1285,7 @@ inline void AssertIsValidForComparison(const ctrl_t* ctrl, const GenerationType* generation_ptr) { if (!SwisstableDebugEnabled()) return; const bool ctrl_is_valid_for_comparison = - ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl); + ctrl == nullptr || ctrl == DefaultIterControl() || IsFull(*ctrl); if (SwisstableGenerationsEnabled()) { if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) { ABSL_RAW_LOG(FATAL, @@ -1368,8 +1351,8 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, } }; - const bool a_is_default = ctrl_a == EmptyGroup(); - const bool b_is_default = ctrl_b == EmptyGroup(); + const bool a_is_default = ctrl_a == DefaultIterControl(); + const bool b_is_default = ctrl_b == DefaultIterControl(); if (a_is_default && b_is_default) return; fail_if(a_is_default != b_is_default, "Comparing default-constructed hashtable iterator with a " @@ -1377,13 +1360,6 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, if (SwisstableGenerationsEnabled()) { if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return; - // Users don't need to know whether the tables are SOO so don't mention SOO - // in the debug message. - const bool a_is_soo = IsSooControl(ctrl_a); - const bool b_is_soo = IsSooControl(ctrl_b); - fail_if(a_is_soo != b_is_soo || (a_is_soo && b_is_soo), - "Comparing iterators from different hashtables."); - const bool a_is_empty = IsEmptyGeneration(generation_ptr_a); const bool b_is_empty = IsEmptyGeneration(generation_ptr_b); fail_if(a_is_empty != b_is_empty, @@ -1419,26 +1395,16 @@ constexpr bool is_single_group(size_t capacity) { } // Begins a probing operation on `common.control`, using `hash`. -inline probe_seq probe(size_t h1, size_t capacity) { +inline probe_seq probe_h1(size_t capacity, size_t h1) { return probe_seq(h1, capacity); } -inline probe_seq probe(PerTableSeed seed, size_t capacity, - size_t hash) { - return probe(H1(hash, seed), capacity); +inline probe_seq probe(size_t capacity, size_t hash) { + return probe_h1(capacity, H1(hash)); } inline probe_seq probe(const CommonFields& common, size_t hash) { - return probe(common.seed(), common.capacity(), hash); + return probe(common.capacity(), hash); } -// Probes an array of control bits using a probe sequence derived from `hash`, -// and returns the offset corresponding to the first deleted or empty slot. -// -// Behavior when the entire table is full is undefined. -// -// NOTE: this function must work with tables having both empty and deleted -// slots in the same group. Such tables appear during `erase()`. -FindInfo find_first_non_full(const CommonFields& common, size_t hash); - constexpr size_t kProbedElementIndexSentinel = ~size_t{}; // Implementation detail of transfer_unprobed_elements_to_next_capacity_fn. @@ -1502,79 +1468,11 @@ extern template size_t TryFindNewIndexWithoutProbing(size_t h1, ctrl_t* new_ctrl, size_t new_capacity); -// Sets sanitizer poisoning for slot corresponding to control byte being set. -inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h, - size_t slot_size) { - ABSL_SWISSTABLE_ASSERT(i < c.capacity()); - auto* slot_i = static_cast(c.slot_array()) + i * slot_size; - if (IsFull(h)) { - SanitizerUnpoisonMemoryRegion(slot_i, slot_size); - } else { - SanitizerPoisonMemoryRegion(slot_i, slot_size); - } -} - -// Sets `ctrl[i]` to `h`. -// -// Unlike setting it directly, this function will perform bounds checks and -// mirror the value to the cloned tail if necessary. -inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h, - size_t slot_size) { - DoSanitizeOnSetCtrl(c, i, h, slot_size); - ctrl_t* ctrl = c.control(); - ctrl[i] = h; - ctrl[((i - NumClonedBytes()) & c.capacity()) + - (NumClonedBytes() & c.capacity())] = h; -} -// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. -inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) { - SetCtrl(c, i, static_cast(h), slot_size); -} - -// Like SetCtrl, but in a single group table, we can save some operations when -// setting the cloned control byte. -inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h, - size_t slot_size) { - ABSL_SWISSTABLE_ASSERT(is_single_group(c.capacity())); - DoSanitizeOnSetCtrl(c, i, h, slot_size); - ctrl_t* ctrl = c.control(); - ctrl[i] = h; - ctrl[i + c.capacity() + 1] = h; -} -// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. -inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h, - size_t slot_size) { - SetCtrlInSingleGroupTable(c, i, static_cast(h), slot_size); -} - -// Like SetCtrl, but in a table with capacity >= Group::kWidth - 1, -// we can save some operations when setting the cloned control byte. -inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, ctrl_t h, - size_t slot_size) { - ABSL_SWISSTABLE_ASSERT(c.capacity() >= Group::kWidth - 1); - DoSanitizeOnSetCtrl(c, i, h, slot_size); - ctrl_t* ctrl = c.control(); - ctrl[i] = h; - ctrl[((i - NumClonedBytes()) & c.capacity()) + NumClonedBytes()] = h; -} -// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. -inline void SetCtrlInLargeTable(const CommonFields& c, size_t i, h2_t h, - size_t slot_size) { - SetCtrlInLargeTable(c, i, static_cast(h), slot_size); -} - // growth_info (which is a size_t) is stored with the backing array. constexpr size_t BackingArrayAlignment(size_t align_of_slot) { return (std::max)(align_of_slot, alignof(GrowthInfo)); } -// Returns the address of the ith slot in slots where each slot occupies -// slot_size. -inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) { - return static_cast(static_cast(slot_array) + - (slot * slot_size)); -} - // Iterates over all full slots and calls `cb(const ctrl_t*, void*)`. // No insertion to the table is allowed during `cb` call. // Erasure is allowed only for the element passed to the callback. @@ -1592,27 +1490,6 @@ constexpr bool ShouldSampleHashtablezInfoForAlloc() { return std::is_same_v>; } -template -bool ShouldSampleHashtablezInfoOnResize(bool force_sampling, - bool is_hashtablez_eligible, - size_t old_capacity, CommonFields& c) { - if (!is_hashtablez_eligible) return false; - // Force sampling is only allowed for SOO tables. - ABSL_SWISSTABLE_ASSERT(kSooEnabled || !force_sampling); - if (kSooEnabled && force_sampling) { - return true; - } - // In SOO, we sample on the first insertion so if this is an empty SOO case - // (e.g. when reserve is called), then we still need to sample. - if (kSooEnabled && old_capacity == SooCapacity() && c.empty()) { - return ShouldSampleNextTable(); - } - if (!kSooEnabled && old_capacity == 0) { - return ShouldSampleNextTable(); - } - return false; -} - // Allocates `n` bytes for a backing array. template ABSL_ATTRIBUTE_NOINLINE void* AllocateBackingArray(void* alloc, size_t n) { @@ -1647,7 +1524,7 @@ struct PolicyFunctions { void* (*hash_fn)(CommonFields& common); // Returns the hash of the pointed-to slot. - size_t (*hash_slot)(const void* hash_fn, void* slot); + HashSlotFn hash_slot; // Transfers the contents of `count` slots from src_slot to dst_slot. // We use ability to transfer several slots in single group table growth. @@ -1802,23 +1679,20 @@ constexpr size_t OptimalMemcpySizeForSooSlotTransfer( // Resizes SOO table to the NextCapacity(SooCapacity()) and prepares insert for // the given new_hash. Returns the offset of the new element. -// `soo_slot_ctrl` is the control byte of the SOO slot. -// If soo_slot_ctrl is kEmpty -// 1. The table must be empty. -// 2. Table will be forced to be sampled. // All possible template combinations are defined in cc file to improve // compilation time. template -size_t GrowSooTableToNextCapacityAndPrepareInsert(CommonFields& common, - const PolicyFunctions& policy, - size_t new_hash, - ctrl_t soo_slot_ctrl); - -// As `ResizeFullSooTableToNextCapacity`, except that we also force the SOO -// table to be sampled. SOO tables need to switch from SOO to heap in order to -// store the infoz. No-op if sampling is disabled or not possible. -void GrowFullSooTableToNextCapacityForceSampling(CommonFields& common, - const PolicyFunctions& policy); +size_t GrowSooTableToNextCapacityAndPrepareInsert( + CommonFields& common, const PolicyFunctions& policy, + absl::FunctionRef get_hash, bool force_sampling); + +// PrepareInsert for small tables (is_small()==true). +// Returns the new control and the new slot. +// Hash is only computed if the table is sampled or grew to large size +// (is_small()==false). +std::pair PrepareInsertSmallNonSoo( + CommonFields& common, const PolicyFunctions& policy, + absl::FunctionRef get_hash); // Resizes table with allocated slots and change the table seed. // Tables with SOO enabled must have capacity > policy.soo_capacity. @@ -1827,19 +1701,15 @@ void ResizeAllocatedTableWithSeedChange(CommonFields& common, const PolicyFunctions& policy, size_t new_capacity); -inline void PrepareInsertCommon(CommonFields& common) { - common.increment_size(); - common.maybe_increment_generation_on_insert(); -} - // ClearBackingArray clears the backing array, either modifying it in place, // or creating a new one based on the value of "reuse". // REQUIRES: c.capacity > 0 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy, void* alloc, bool reuse, bool soo_enabled); -// Type-erased version of raw_hash_set::erase_meta_only. -void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size); +// Type-erased versions of raw_hash_set::erase_meta_only_{small,large}. +void EraseMetaOnlySmall(CommonFields& c, bool soo_enabled, size_t slot_size); +void EraseMetaOnlyLarge(CommonFields& c, const ctrl_t* ctrl, size_t slot_size); // For trivially relocatable types we use memcpy directly. This allows us to // share the same function body for raw_hash_set instantiations that have the @@ -1870,11 +1740,17 @@ void* GetRefForEmptyClass(CommonFields& common); // When the table has deleted slots (according to GrowthInfo), the target // position will be searched one more time using `find_first_non_full`. // -// REQUIRES: Table is not SOO. +// REQUIRES: `!common.is_small()`. // REQUIRES: At least one non-full slot available. // REQUIRES: `target` is a valid empty position to insert. -size_t PrepareInsertNonSoo(CommonFields& common, const PolicyFunctions& policy, - size_t hash, FindInfo target); +size_t PrepareInsertLarge(CommonFields& common, const PolicyFunctions& policy, + size_t hash, FindInfo target); + +// Same as above, but with generations enabled, we may end up changing the seed, +// which means we need to be able to recompute the hash. +size_t PrepareInsertLargeGenerationsEnabled( + CommonFields& common, const PolicyFunctions& policy, size_t hash, + FindInfo target, absl::FunctionRef recompute_hash); // A SwissTable. // @@ -1929,6 +1805,10 @@ class raw_hash_set { using slot_type = typename PolicyTraits::slot_type; + constexpr static bool kIsDefaultHash = + std::is_same_v> || + std::is_same_v; + // TODO(b/289225379): we could add extra SOO space inside raw_hash_set // after CommonFields to allow inlining larger slot_types (e.g. std::string), // but it's a bit complicated if we want to support incomplete mapped_type in @@ -1953,10 +1833,19 @@ class raw_hash_set { bool is_soo() const { return fits_in_soo(capacity()); } bool is_full_soo() const { return is_soo() && !empty(); } + bool is_small() const { return common().is_small(); } + // Give an early error when key_type is not hashable/eq. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); + // Try to be helpful when the hasher returns an unreasonable type. + using key_hash_result = + absl::remove_cvref_t()( + std::declval()))>; + static_assert(sizeof(key_hash_result) >= sizeof(size_t), + "`Hash::operator()` should return a `size_t`"); + using AllocTraits = absl::allocator_traits; using SlotAlloc = typename absl::allocator_traits< allocator_type>::template rebind_alloc; @@ -2023,19 +1912,19 @@ class raw_hash_set { // PRECONDITION: not an end() iterator. reference operator*() const { - AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()"); + assert_is_full("operator*()"); return unchecked_deref(); } // PRECONDITION: not an end() iterator. pointer operator->() const { - AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->"); + assert_is_full("operator->"); return &operator*(); } // PRECONDITION: not an end() iterator. iterator& operator++() { - AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++"); + assert_is_full("operator++"); ++ctrl_; ++slot_; skip_empty_or_deleted(); @@ -2073,7 +1962,7 @@ class raw_hash_set { // This constructor is used in begin() to avoid an MSan // use-of-uninitialized-value error. Delegating from this constructor to // the previous one doesn't avoid the error. - iterator(ctrl_t* ctrl, MaybeInitializedPtr slot, + iterator(ctrl_t* ctrl, MaybeInitializedPtr slot, const GenerationType* generation_ptr) : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(ctrl), @@ -2086,38 +1975,42 @@ class raw_hash_set { explicit iterator(const GenerationType* generation_ptr) : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {} + void assert_is_full(const char* operation) const { + AssertIsFull(ctrl_, generation(), generation_ptr(), operation); + } + // Fixes up `ctrl_` to point to a full or sentinel by advancing `ctrl_` and // `slot_` until they reach one. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { - uint32_t shift = - GroupFullEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted(); - ctrl_ += shift; - slot_ += shift; + ++ctrl_; + ++slot_; } } - ctrl_t* control() const { return ctrl_; } - slot_type* slot() const { return slot_; } - - // We use EmptyGroup() for default-constructed iterators so that they can - // be distinguished from end iterators, which have nullptr ctrl_. - ctrl_t* ctrl_ = EmptyGroup(); - // To avoid uninitialized member warnings, put slot_ in an anonymous union. - // The member is not initialized on singleton and end iterators. - union { - slot_type* slot_; - }; - // An equality check which skips ABSL Hardening iterator invalidation // checks. // Should be used when the lifetimes of the iterators are well-enough // understood to prove that they cannot be invalid. - bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); } + bool unchecked_equals(const iterator& b) const { + return ctrl_ == b.control(); + } // Dereferences the iterator without ABSL Hardening iterator invalidation // checks. reference unchecked_deref() const { return PolicyTraits::element(slot_); } + + ctrl_t* control() const { return ctrl_; } + slot_type* slot() const { return slot_; } + + // We use DefaultIterControl() for default-constructed iterators so that + // they can be distinguished from end iterators, which have nullptr ctrl_. + ctrl_t* ctrl_ = DefaultIterControl(); + // To avoid uninitialized member warnings, put slot_ in an anonymous union. + // The member is not initialized on singleton and end iterators. + union { + slot_type* slot_; + }; }; class const_iterator { @@ -2158,14 +2051,13 @@ class raw_hash_set { const GenerationType* gen) : inner_(const_cast(ctrl), const_cast(slot), gen) { } + bool unchecked_equals(const const_iterator& b) const { + return inner_.unchecked_equals(b.inner_); + } ctrl_t* control() const { return inner_.control(); } slot_type* slot() const { return inner_.slot(); } iterator inner_; - - bool unchecked_equals(const const_iterator& b) { - return inner_.unchecked_equals(b.inner_); - } }; using node_type = node_handle, Alloc>; @@ -2350,7 +2242,7 @@ class raw_hash_set { } raw_hash_set& operator=(raw_hash_set&& that) noexcept( - absl::allocator_traits::is_always_equal::value && + AllocTraits::is_always_equal::value && std::is_nothrow_move_assignable::value && std::is_nothrow_move_assignable::value) { // TODO(sbenza): We should only use the operations from the noexcept clause @@ -2370,7 +2262,7 @@ class raw_hash_set { iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { if (ABSL_PREDICT_FALSE(empty())) return end(); - if (capacity() == 1) return single_iterator(); + if (is_small()) return single_iterator(); iterator it = {control(), common().slots_union(), common().generation_ptr()}; it.skip_empty_or_deleted(); @@ -2401,7 +2293,7 @@ class raw_hash_set { size_t capacity() const { const size_t cap = common().capacity(); // Compiler complains when using functions in ASSUME so use local variable. - ABSL_ATTRIBUTE_UNUSED static constexpr size_t kDefaultCapacity = + [[maybe_unused]] static constexpr size_t kDefaultCapacity = DefaultCapacity(); ABSL_ASSUME(cap >= kDefaultCapacity); return cap; @@ -2424,9 +2316,11 @@ class raw_hash_set { const size_t cap = capacity(); if (cap == 0) { // Already guaranteed to be empty; so nothing to do. - } else if (is_soo()) { - if (!empty()) destroy(soo_slot()); - common().set_empty_soo(); + } else if (is_small()) { + if (!empty()) { + destroy(single_slot()); + decrement_small_size(); + } } else { destroy_slots(); clear_backing_array(/*reuse=*/cap < 128); @@ -2496,13 +2390,13 @@ class raw_hash_set { // s.insert({"abc", 42}); std::pair insert(init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND -#if __cplusplus >= 202002L +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L requires(!IsLifetimeBoundAssignmentFrom::value) #endif { return emplace(std::move(value)); } -#if __cplusplus >= 202002L +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L std::pair insert( init_type&& value ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) ABSL_ATTRIBUTE_LIFETIME_BOUND @@ -2702,14 +2596,11 @@ class raw_hash_set { // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { + ABSL_SWISSTABLE_ASSERT(capacity() > 0); AssertNotDebugCapacity(); - AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()"); + it.assert_is_full("erase()"); destroy(it.slot()); - if (is_soo()) { - common().set_empty_soo(); - } else { - erase_meta_only(it); - } + erase_meta_only(it); } iterator erase(const_iterator first, @@ -2719,9 +2610,9 @@ class raw_hash_set { // capacity() > 0 as a precondition. if (empty()) return end(); if (first == last) return last.inner_; - if (is_soo()) { - destroy(soo_slot()); - common().set_empty_soo(); + if (is_small()) { + destroy(single_slot()); + erase_meta_only_small(); return end(); } if (first == begin() && last == end()) { @@ -2753,14 +2644,15 @@ class raw_hash_set { .second; }; - if (src.is_soo()) { + if (src.is_small()) { if (src.empty()) return; - if (insert_slot(src.soo_slot())) src.common().set_empty_soo(); + if (insert_slot(src.single_slot())) + src.erase_meta_only_small(); return; } for (auto it = src.begin(), e = src.end(); it != e;) { auto next = std::next(it); - if (insert_slot(it.slot())) src.erase_meta_only(it); + if (insert_slot(it.slot())) src.erase_meta_only_large(it); it = next; } } @@ -2772,15 +2664,10 @@ class raw_hash_set { node_type extract(const_iterator position) { AssertNotDebugCapacity(); - AssertIsFull(position.control(), position.inner_.generation(), - position.inner_.generation_ptr(), "extract()"); + position.inner_.assert_is_full("extract()"); allocator_type alloc(char_alloc_ref()); auto node = CommonAccess::Transfer(alloc, position.slot()); - if (is_soo()) { - common().set_empty_soo(); - } else { - erase_meta_only(position); - } + erase_meta_only(position); return node; } @@ -2792,9 +2679,9 @@ class raw_hash_set { } void swap(raw_hash_set& that) noexcept( - IsNoThrowSwappable() && IsNoThrowSwappable() && - IsNoThrowSwappable( - typename AllocTraits::propagate_on_container_swap{})) { + AllocTraits::is_always_equal::value && + std::is_nothrow_swappable::value && + std::is_nothrow_swappable::value) { AssertNotDebugCapacity(); that.AssertNotDebugCapacity(); using std::swap; @@ -2833,12 +2720,12 @@ class raw_hash_set { // NOTE: This is a very low level operation and should not be used without // specific benchmarks indicating its importance. template - void prefetch(const key_arg& key) const { + void prefetch([[maybe_unused]] const key_arg& key) const { if (capacity() == DefaultCapacity()) return; - (void)key; // Avoid probing if we won't be able to prefetch the addresses received. #ifdef ABSL_HAVE_PREFETCH prefetch_heap_block(); + if (is_small()) return; auto seq = probe(common(), hash_of(key)); PrefetchToLocalCache(control() + seq.offset()); PrefetchToLocalCache(slot_array() + seq.offset()); @@ -2856,7 +2743,7 @@ class raw_hash_set { template iterator find(const key_arg& key) ABSL_ATTRIBUTE_LIFETIME_BOUND { AssertOnFind(key); - if (capacity() <= 1) return find_small(key); + if (is_small()) return find_small(key); prefetch_heap_block(); return find_large(key, hash_of(key)); } @@ -2965,24 +2852,6 @@ class raw_hash_set { const raw_hash_set& s; }; - struct HashElement { - template - size_t operator()(const K& key, Args&&...) const { - return h(key); - } - const hasher& h; - }; - - template - struct EqualElement { - template - bool operator()(const K2& lhs, Args&&...) const { - ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN(eq(lhs, rhs)); - } - const K1& rhs; - const key_equal& eq; - }; - struct EmplaceDecomposable { template std::pair operator()(const K& key, Args&&... args) const { @@ -3036,18 +2905,13 @@ class raw_hash_set { // SOO functionality. template iterator find_small(const key_arg& key) { - ABSL_SWISSTABLE_ASSERT(capacity() <= 1); - return empty() || !PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(single_slot())) - ? end() - : single_iterator(); + ABSL_SWISSTABLE_ASSERT(is_small()); + return empty() || !equal_to(key, single_slot()) ? end() : single_iterator(); } template iterator find_large(const key_arg& key, size_t hash) { - ABSL_SWISSTABLE_ASSERT(capacity() > 1); - ABSL_SWISSTABLE_ASSERT(!is_soo()); + ABSL_SWISSTABLE_ASSERT(!is_small()); auto seq = probe(common(), hash); const h2_t h2 = H2(hash); const ctrl_t* ctrl = control(); @@ -3057,9 +2921,7 @@ class raw_hash_set { #endif Group g{ctrl + seq.offset()}; for (uint32_t i : g.Match(h2)) { - if (ABSL_PREDICT_TRUE(PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(slot_array() + seq.offset(i))))) + if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i)))) return iterator_at(seq.offset(i)); } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); @@ -3084,7 +2946,7 @@ class raw_hash_set { } void destroy_slots() { - ABSL_SWISSTABLE_ASSERT(!is_soo()); + ABSL_SWISSTABLE_ASSERT(!is_small()); if (PolicyTraits::template destroy_is_trivial()) return; auto destroy_slot = [&](const ctrl_t*, void* slot) { this->destroy(static_cast(slot)); @@ -3116,13 +2978,14 @@ class raw_hash_set { return; } if (capacity() == 0) return; - if (is_soo()) { + if (is_small()) { if (!empty()) { - ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(soo_slot())); + ABSL_SWISSTABLE_IGNORE_UNINITIALIZED(destroy(single_slot())); } - return; + if constexpr (SooEnabled()) return; + } else { + destroy_slots(); } - destroy_slots(); dealloc(); } @@ -3131,23 +2994,41 @@ class raw_hash_set { // This merely updates the pertinent control byte. This can be used in // conjunction with Policy::transfer to move the object to another place. void erase_meta_only(const_iterator it) { - ABSL_SWISSTABLE_ASSERT(!is_soo()); - EraseMetaOnly(common(), static_cast(it.control() - control()), - sizeof(slot_type)); + if (is_small()) { + erase_meta_only_small(); + return; + } + erase_meta_only_large(it); + } + void erase_meta_only_small() { + EraseMetaOnlySmall(common(), SooEnabled(), sizeof(slot_type)); + } + void erase_meta_only_large(const_iterator it) { + EraseMetaOnlyLarge(common(), it.control(), sizeof(slot_type)); } template - size_t hash_of(const K& key) const { - return hash_ref()(key); - } - size_t hash_of(slot_type* slot) const { - return PolicyTraits::apply(HashElement{hash_ref()}, + ABSL_ATTRIBUTE_ALWAYS_INLINE bool equal_to(const K& key, + slot_type* slot) const { + return PolicyTraits::apply(EqualElement{key, eq_ref()}, PolicyTraits::element(slot)); } + template + ABSL_ATTRIBUTE_ALWAYS_INLINE size_t hash_of(const K& key) const { + return HashElement{hash_ref(), + common().seed().seed()}(key); + } + ABSL_ATTRIBUTE_ALWAYS_INLINE size_t hash_of(slot_type* slot) const { + return PolicyTraits::apply( + HashElement{hash_ref(), common().seed().seed()}, + PolicyTraits::element(slot)); + } // Casting directly from e.g. char* to slot_type* can cause compilation errors // on objective-C. This function converts to void* first, avoiding the issue. - static slot_type* to_slot(void* buf) { return static_cast(buf); } + static ABSL_ATTRIBUTE_ALWAYS_INLINE slot_type* to_slot(void* buf) { + return static_cast(buf); + } // Requires that lhs does not have a full SOO slot. static void move_common(bool rhs_is_full_soo, CharAlloc& rhs_alloc, @@ -3183,8 +3064,7 @@ class raw_hash_set { std::move(tmp)); } - void annotate_for_bug_detection_on_move( - ABSL_ATTRIBUTE_UNUSED raw_hash_set& that) { + void annotate_for_bug_detection_on_move([[maybe_unused]] raw_hash_set& that) { // We only enable moved-from validation when generations are enabled (rather // than using NDEBUG) to avoid issues in which NDEBUG is enabled in some // translation units but not in others. @@ -3255,64 +3135,96 @@ class raw_hash_set { template std::pair find_or_prepare_insert_soo(const K& key) { - ctrl_t soo_slot_ctrl; + ABSL_SWISSTABLE_ASSERT(is_soo()); + bool force_sampling; if (empty()) { if (!should_sample_soo()) { common().set_full_soo(); - return {soo_iterator(), true}; + return {single_iterator(), true}; } - soo_slot_ctrl = ctrl_t::kEmpty; - } else if (PolicyTraits::apply(EqualElement{key, eq_ref()}, - PolicyTraits::element(soo_slot()))) { - return {soo_iterator(), false}; + force_sampling = true; + } else if (equal_to(key, single_slot())) { + return {single_iterator(), false}; } else { - soo_slot_ctrl = static_cast(H2(hash_of(soo_slot()))); + force_sampling = false; } + ABSL_SWISSTABLE_ASSERT(capacity() == 1); constexpr bool kUseMemcpy = PolicyTraits::transfer_uses_memcpy() && SooEnabled(); size_t index = GrowSooTableToNextCapacityAndPrepareInsert< kUseMemcpy ? OptimalMemcpySizeForSooSlotTransfer(sizeof(slot_type)) : 0, - kUseMemcpy>(common(), GetPolicyFunctions(), hash_of(key), - soo_slot_ctrl); + kUseMemcpy>(common(), GetPolicyFunctions(), + HashKey{hash_ref(), key}, + force_sampling); return {iterator_at(index), true}; } template - std::pair find_or_prepare_insert_non_soo(const K& key) { + std::pair find_or_prepare_insert_small(const K& key) { + ABSL_SWISSTABLE_ASSERT(is_small()); + if constexpr (SooEnabled()) { + return find_or_prepare_insert_soo(key); + } + if (!empty()) { + if (equal_to(key, single_slot())) { + return {single_iterator(), false}; + } + } + return {iterator_at_ptr(PrepareInsertSmallNonSoo( + common(), GetPolicyFunctions(), + HashKey{hash_ref(), key})), + true}; + } + + template + std::pair find_or_prepare_insert_large(const K& key) { ABSL_SWISSTABLE_ASSERT(!is_soo()); prefetch_heap_block(); const size_t hash = hash_of(key); auto seq = probe(common(), hash); const h2_t h2 = H2(hash); const ctrl_t* ctrl = control(); - while (true) { + size_t index; + bool inserted; + // We use a lambda function to be able to exit from the nested loop without + // duplicating generated code for the return statement (e.g. iterator_at). + [&]() ABSL_ATTRIBUTE_ALWAYS_INLINE { + while (true) { #ifndef ABSL_HAVE_MEMORY_SANITIZER - absl::PrefetchToLocalCache(slot_array() + seq.offset()); + absl::PrefetchToLocalCache(slot_array() + seq.offset()); #endif - Group g{ctrl + seq.offset()}; - for (uint32_t i : g.Match(h2)) { - if (ABSL_PREDICT_TRUE(PolicyTraits::apply( - EqualElement{key, eq_ref()}, - PolicyTraits::element(slot_array() + seq.offset(i))))) - return {iterator_at(seq.offset(i)), false}; - } - auto mask_empty = g.MaskEmpty(); - if (ABSL_PREDICT_TRUE(mask_empty)) { - size_t target = seq.offset(mask_empty.LowestBitSet()); - return {iterator_at(PrepareInsertNonSoo(common(), GetPolicyFunctions(), - hash, - FindInfo{target, seq.index()})), - true}; + Group g{ctrl + seq.offset()}; + for (uint32_t i : g.Match(h2)) { + if (ABSL_PREDICT_TRUE(equal_to(key, slot_array() + seq.offset(i)))) { + index = seq.offset(i); + inserted = false; + return; + } + } + auto mask_empty = g.MaskEmpty(); + if (ABSL_PREDICT_TRUE(mask_empty)) { + size_t target = seq.offset(mask_empty.LowestBitSet()); + index = SwisstableGenerationsEnabled() + ? PrepareInsertLargeGenerationsEnabled( + common(), GetPolicyFunctions(), hash, + FindInfo{target, seq.index()}, + HashKey{hash_ref(), key}) + : PrepareInsertLarge(common(), GetPolicyFunctions(), hash, + FindInfo{target, seq.index()}); + inserted = true; + return; + } + seq.next(); + ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity() && "full table!"); } - seq.next(); - ABSL_SWISSTABLE_ASSERT(seq.index() <= capacity() && "full table!"); - } + }(); + return {iterator_at(index), inserted}; } protected: // Asserts for correctness that we run on find/find_or_prepare_insert. template - void AssertOnFind(ABSL_ATTRIBUTE_UNUSED const K& key) { + void AssertOnFind([[maybe_unused]] const K& key) { AssertHashEqConsistent(key); AssertNotDebugCapacity(); } @@ -3368,23 +3280,18 @@ class raw_hash_set { const size_t hash_of_arg = hash_of(key); const auto assert_consistent = [&](const ctrl_t*, void* slot) { - const value_type& element = - PolicyTraits::element(static_cast(slot)); - const bool is_key_equal = - PolicyTraits::apply(EqualElement{key, eq_ref()}, element); + const bool is_key_equal = equal_to(key, to_slot(slot)); if (!is_key_equal) return; - const size_t hash_of_slot = - PolicyTraits::apply(HashElement{hash_ref()}, element); - ABSL_ATTRIBUTE_UNUSED const bool is_hash_equal = - hash_of_arg == hash_of_slot; + [[maybe_unused]] const bool is_hash_equal = + hash_of_arg == hash_of(to_slot(slot)); assert((!is_key_equal || is_hash_equal) && "eq(k1, k2) must imply that hash(k1) == hash(k2). " "hash/eq functors are inconsistent."); }; - if (is_soo()) { - assert_consistent(/*unused*/ nullptr, soo_slot()); + if (is_small()) { + assert_consistent(/*unused*/ nullptr, single_slot()); return; } // We only do validation for small tables so that it's constant time. @@ -3398,8 +3305,8 @@ class raw_hash_set { template std::pair find_or_prepare_insert(const K& key) { AssertOnFind(key); - if (is_soo()) return find_or_prepare_insert_soo(key); - return find_or_prepare_insert_non_soo(key); + if (is_small()) return find_or_prepare_insert_small(key); + return find_or_prepare_insert_large(key); } // Constructs the value in the space pointed by the iterator. This only works @@ -3414,9 +3321,9 @@ class raw_hash_set { void emplace_at(iterator iter, Args&&... args) { construct(iter.slot(), std::forward(args)...); - // When capacity is 1, find calls find_small and if size is 0, then it will + // When is_small, find calls find_small and if size is 0, then it will // return an end iterator. This can happen in the raw_hash_set copy ctor. - assert((capacity() == 1 || + assert((is_small() || PolicyTraits::apply(FindElement{*this}, *iter) == iter) && "constructed value does not match the lookup key"); } @@ -3427,6 +3334,10 @@ class raw_hash_set { const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND { return const_cast(this)->iterator_at(i); } + iterator iterator_at_ptr(std::pair ptrs) + ABSL_ATTRIBUTE_LIFETIME_BOUND { + return {ptrs.first, to_slot(ptrs.second), common().generation_ptr()}; + } reference unchecked_deref(iterator it) { return it.unchecked_deref(); } @@ -3444,16 +3355,13 @@ class raw_hash_set { // // See `CapacityToGrowth()`. size_t growth_left() const { - ABSL_SWISSTABLE_ASSERT(!is_soo()); return common().growth_left(); } GrowthInfo& growth_info() { - ABSL_SWISSTABLE_ASSERT(!is_soo()); return common().growth_info(); } GrowthInfo growth_info() const { - ABSL_SWISSTABLE_ASSERT(!is_soo()); return common().growth_info(); } @@ -3487,22 +3395,22 @@ class raw_hash_set { ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN( const_cast(this)->soo_slot()); } - iterator soo_iterator() { - return {SooControl(), soo_slot(), common().generation_ptr()}; - } - const_iterator soo_iterator() const { - return const_cast(this)->soo_iterator(); - } slot_type* single_slot() { - ABSL_SWISSTABLE_ASSERT(capacity() <= 1); + ABSL_SWISSTABLE_ASSERT(is_small()); return SooEnabled() ? soo_slot() : slot_array(); } const slot_type* single_slot() const { return const_cast(this)->single_slot(); } + void decrement_small_size() { + ABSL_SWISSTABLE_ASSERT(is_small()); + SooEnabled() ? common().set_empty_soo() : common().decrement_size(); + if (!SooEnabled()) { + SanitizerPoisonObject(single_slot()); + } + } iterator single_iterator() { - return {SooEnabled() ? SooControl() : control(), single_slot(), - common().generation_ptr()}; + return {SooControl(), single_slot(), common().generation_ptr()}; } const_iterator single_iterator() const { return const_cast(this)->single_iterator(); @@ -3559,8 +3467,6 @@ class raw_hash_set { ctrl_t* new_ctrl = common.control(); slot_type* new_slots = set->slot_array(); - const PerTableSeed seed = common.seed(); - for (size_t group_index = 0; group_index < old_capacity; group_index += Group::kWidth) { GroupFullEmptyOrDeleted old_g(old_ctrl + group_index); @@ -3576,7 +3482,7 @@ class raw_hash_set { // TODO(b/382423690): try to avoid entire hash calculation since we need // only one new bit of h1. size_t hash = set->hash_of(old_slot); - size_t h1 = H1(hash, seed); + size_t h1 = H1(hash); h2_t h2 = H2(hash); size_t new_index = TryFindNewIndexWithoutProbing( h1, old_index, old_capacity, new_ctrl, new_capacity); @@ -3612,14 +3518,14 @@ class raw_hash_set { static constexpr PolicyFunctions value = { static_cast(sizeof(key_type)), static_cast(sizeof(value_type)), - static_cast(sizeof(slot_type)), + static_cast(sizeof(slot_type)), static_cast(alignof(slot_type)), SooEnabled(), ShouldSampleHashtablezInfoForAlloc(), // TODO(b/328722020): try to type erase // for standard layout and alignof(Hash) <= alignof(CommonFields). std::is_empty_v ? &GetRefForEmptyClass : &raw_hash_set::get_hash_ref_fn, - PolicyTraits::template get_hash_slot_fn(), + PolicyTraits::template get_hash_slot_fn(), PolicyTraits::transfer_uses_memcpy() ? TransferNRelocatable : &raw_hash_set::transfer_n_slots_fn, @@ -3647,18 +3553,18 @@ struct HashtableFreeFunctionsAccess { if (c->empty()) { return 0; } - if (c->is_soo()) { - auto it = c->soo_iterator(); + if (c->is_small()) { + auto it = c->single_iterator(); if (!pred(*it)) { ABSL_SWISSTABLE_ASSERT(c->size() == 1 && "hash table was modified unexpectedly"); return 0; } c->destroy(it.slot()); - c->common().set_empty_soo(); + c->erase_meta_only_small(); return 1; } - ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size(); + [[maybe_unused]] const size_t original_size_for_assert = c->size(); size_t num_deleted = 0; using SlotType = typename Set::slot_type; IterateOverFullSlots( @@ -3667,8 +3573,7 @@ struct HashtableFreeFunctionsAccess { auto* slot = static_cast(slot_void); if (pred(Set::PolicyTraits::element(slot))) { c->destroy(slot); - EraseMetaOnly(c->common(), static_cast(ctrl - c->control()), - sizeof(*slot)); + EraseMetaOnlyLarge(c->common(), ctrl, sizeof(*slot)); ++num_deleted; } }); @@ -3685,8 +3590,8 @@ struct HashtableFreeFunctionsAccess { if (c->empty()) { return; } - if (c->is_soo()) { - cb(*c->soo_iterator()); + if (c->is_small()) { + cb(*c->single_iterator()); return; } using SlotType = typename Set::slot_type; @@ -3723,9 +3628,11 @@ struct HashtableDebugAccess> { using Traits = typename Set::PolicyTraits; using Slot = typename Traits::slot_type; + constexpr static bool kIsDefaultHash = Set::kIsDefaultHash; + static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) { - if (set.is_soo()) return 0; + if (set.is_small()) return 0; size_t num_probes = 0; const size_t hash = set.hash_of(key); auto seq = probe(set.common(), hash); @@ -3734,10 +3641,7 @@ struct HashtableDebugAccess> { while (true) { container_internal::Group g{ctrl + seq.offset()}; for (uint32_t i : g.Match(h2)) { - if (Traits::apply( - typename Set::template EqualElement{ - key, set.eq_ref()}, - Traits::element(set.slot_array() + seq.offset(i)))) + if (set.equal_to(key, set.slot_array() + seq.offset(i))) return num_probes; ++num_probes; } @@ -3770,16 +3674,21 @@ struct HashtableDebugAccess> { // Extern template instantiations reduce binary size and linker input size. // Function definition is in raw_hash_set.cc. extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<0, false>( - CommonFields&, const PolicyFunctions&, size_t, ctrl_t); + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<1, true>( - CommonFields&, const PolicyFunctions&, size_t, ctrl_t); + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<4, true>( - CommonFields&, const PolicyFunctions&, size_t, ctrl_t); + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<8, true>( - CommonFields&, const PolicyFunctions&, size_t, ctrl_t); + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); #if UINTPTR_MAX == UINT64_MAX extern template size_t GrowSooTableToNextCapacityAndPrepareInsert<16, true>( - CommonFields&, const PolicyFunctions&, size_t, ctrl_t); + CommonFields&, const PolicyFunctions&, absl::FunctionRef, + bool); #endif } // namespace container_internal diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc index 7e7a5063d59..b268d9e366c 100644 --- a/absl/container/internal/raw_hash_set_allocator_test.cc +++ b/absl/container/internal/raw_hash_set_allocator_test.cc @@ -180,7 +180,7 @@ struct Policy { static slot_type& element(slot_type* slot) { return *slot; } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -436,13 +436,14 @@ TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { } TEST_F(PropagateOnAll, Swap) { - auto it = t1.insert(0).first; + t1.insert(0); Table u(0, a2); u.swap(t1); EXPECT_EQ(a1, u.get_allocator()); EXPECT_EQ(a2, t1.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, a2.num_allocs()); + auto it = u.begin(); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc index ac948779412..f3e32fd2ee8 100644 --- a/absl/container/internal/raw_hash_set_benchmark.cc +++ b/absl/container/internal/raw_hash_set_benchmark.cc @@ -64,7 +64,7 @@ struct IntPolicy { return std::forward(f)(x, x); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -127,7 +127,7 @@ class StringPolicy { PairArgs(std::forward(args)...)); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -519,17 +519,6 @@ void BM_Group_MaskNonFull(benchmark::State& state) { } BENCHMARK(BM_Group_MaskNonFull); -void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) { - std::array group; - Iota(group.begin(), group.end(), -2); - Group g{group.data()}; - for (auto _ : state) { - ::benchmark::DoNotOptimize(g); - ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted()); - } -} -BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted); - void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -2); diff --git a/absl/container/internal/raw_hash_set_probe_benchmark.cc b/absl/container/internal/raw_hash_set_probe_benchmark.cc index e56648f3bf9..458038e0b34 100644 --- a/absl/container/internal/raw_hash_set_probe_benchmark.cc +++ b/absl/container/internal/raw_hash_set_probe_benchmark.cc @@ -71,7 +71,7 @@ struct Policy { return std::forward(f)(arg, arg); } - template + template static constexpr auto get_hash_slot_fn() { return nullptr; } diff --git a/absl/container/internal/raw_hash_set_resize_impl.h b/absl/container/internal/raw_hash_set_resize_impl.h index 149d9e825e5..ed48d96bc67 100644 --- a/absl/container/internal/raw_hash_set_resize_impl.h +++ b/absl/container/internal/raw_hash_set_resize_impl.h @@ -52,7 +52,6 @@ struct ProbedItemImpl { static constexpr IntType kMaxNewBits = kMaxOldBits + 1; static constexpr IntType kMaxNewCapacity = (IntType{1} << kMaxNewBits) - 1; - static constexpr IntType kH2Shift = (kTotalBits - kH2Bits); static_assert(kMaxNewBits + kMaxOldBits + kH2Bits == kTotalBits); ProbedItemImpl() = default; diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index 9a323c41bec..d411709615c 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -54,8 +54,6 @@ #include "absl/container/internal/hash_function_defaults.h" #include "absl/container/internal/hash_policy_testing.h" #include "absl/random/random.h" -// TODO(b/382423690): Separate tests that depend only on -// hashtable_control_bytes. #include "absl/container/internal/hashtable_control_bytes.h" #include "absl/container/internal/hashtable_debug.h" #include "absl/container/internal/hashtablez_sampler.h" @@ -96,7 +94,6 @@ struct RawHashSetTestOnlyAccess { namespace { using ::testing::ElementsAre; -using ::testing::ElementsAreArray; using ::testing::Eq; using ::testing::Ge; using ::testing::Lt; @@ -279,28 +276,53 @@ TEST(Util, NormalizeCapacity) { EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2)); } -TEST(Util, GrowthAndCapacity) { - // Verify that GrowthToCapacity gives the minimum capacity that has enough - // growth. +TEST(Util, SizeToCapacitySmallValues) { EXPECT_EQ(SizeToCapacity(0), 0); EXPECT_EQ(SizeToCapacity(1), 1); EXPECT_EQ(SizeToCapacity(2), 3); EXPECT_EQ(SizeToCapacity(3), 3); + EXPECT_EQ(SizeToCapacity(4), 7); + EXPECT_EQ(SizeToCapacity(5), 7); + EXPECT_EQ(SizeToCapacity(6), 7); + if (Group::kWidth == 16) { + EXPECT_EQ(SizeToCapacity(7), 7); + EXPECT_EQ(SizeToCapacity(14), 15); + } else { + EXPECT_EQ(SizeToCapacity(7), 15); + } +} + +TEST(Util, CapacityToGrowthSmallValues) { + EXPECT_EQ(CapacityToGrowth(1), 1); + EXPECT_EQ(CapacityToGrowth(3), 3); + if (Group::kWidth == 16) { + EXPECT_EQ(CapacityToGrowth(7), 7); + } else { + EXPECT_EQ(CapacityToGrowth(7), 6); + } + EXPECT_EQ(CapacityToGrowth(15), 14); + EXPECT_EQ(CapacityToGrowth(31), 28); + EXPECT_EQ(CapacityToGrowth(63), 55); +} + +TEST(Util, GrowthAndCapacity) { + // Verify that GrowthToCapacity gives the minimum capacity that has enough + // growth. for (size_t growth = 1; growth < 10000; ++growth) { SCOPED_TRACE(growth); size_t capacity = SizeToCapacity(growth); ASSERT_TRUE(IsValidCapacity(capacity)); // The capacity is large enough for `growth`. - EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); + ASSERT_THAT(CapacityToGrowth(capacity), Ge(growth)); // For (capacity+1) < kWidth, growth should equal capacity. if (capacity + 1 < Group::kWidth) { - EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity)); + ASSERT_THAT(CapacityToGrowth(capacity), Eq(capacity)); } else { - EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity)); + ASSERT_THAT(CapacityToGrowth(capacity), Lt(capacity)); } if (growth != 0 && capacity > 1) { // There is no smaller capacity that works. - EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); + ASSERT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)) << capacity; } } @@ -308,9 +330,9 @@ TEST(Util, GrowthAndCapacity) { capacity = 2 * capacity + 1) { SCOPED_TRACE(capacity); size_t growth = CapacityToGrowth(capacity); - EXPECT_THAT(growth, Lt(capacity)); - EXPECT_EQ(SizeToCapacity(growth), capacity); - EXPECT_EQ(NormalizeCapacity(SizeToCapacity(growth)), capacity); + ASSERT_THAT(growth, Lt(capacity)); + ASSERT_EQ(SizeToCapacity(growth), capacity); + ASSERT_EQ(NormalizeCapacity(SizeToCapacity(growth)), capacity); } } @@ -329,203 +351,6 @@ TEST(Util, probe_seq) { EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); } -TEST(BitMask, Smoke) { - EXPECT_FALSE((BitMask(0))); - EXPECT_TRUE((BitMask(5))); - - EXPECT_THAT((BitMask(0)), ElementsAre()); - EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); - EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); - EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); - EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); - EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); - EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); - EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); -} - -TEST(BitMask, WithShift_MatchPortable) { - // See the non-SSE version of Group for details on what this math is for. - uint64_t ctrl = 0x1716151413121110; - uint64_t hash = 0x12; - constexpr uint64_t lsbs = 0x0101010101010101ULL; - auto x = ctrl ^ (lsbs * hash); - uint64_t mask = (x - lsbs) & ~x & kMsbs8Bytes; - EXPECT_EQ(0x0000000080800000, mask); - - BitMask b(mask); - EXPECT_EQ(*b, 2); -} - -constexpr uint64_t kSome8BytesMask = /* */ 0x8000808080008000ULL; -constexpr uint64_t kSome8BytesMaskAllOnes = 0xff00ffffff00ff00ULL; -constexpr auto kSome8BytesMaskBits = std::array{1, 3, 4, 5, 7}; - - -TEST(BitMask, WithShift_FullMask) { - EXPECT_THAT((BitMask(kMsbs8Bytes)), - ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); - EXPECT_THAT( - (BitMask(kMsbs8Bytes)), - ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); - EXPECT_THAT( - (BitMask(~uint64_t{0})), - ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); -} - -TEST(BitMask, WithShift_EmptyMask) { - EXPECT_THAT((BitMask(0)), ElementsAre()); - EXPECT_THAT((BitMask(0)), - ElementsAre()); -} - -TEST(BitMask, WithShift_SomeMask) { - EXPECT_THAT((BitMask(kSome8BytesMask)), - ElementsAreArray(kSome8BytesMaskBits)); - EXPECT_THAT((BitMask( - kSome8BytesMask)), - ElementsAreArray(kSome8BytesMaskBits)); - EXPECT_THAT((BitMask( - kSome8BytesMaskAllOnes)), - ElementsAreArray(kSome8BytesMaskBits)); -} - -TEST(BitMask, WithShift_SomeMaskExtraBitsForNullify) { - // Verify that adding extra bits into non zero bytes is fine. - uint64_t extra_bits = 77; - for (int i = 0; i < 100; ++i) { - // Add extra bits, but keep zero bytes untouched. - uint64_t extra_mask = extra_bits & kSome8BytesMaskAllOnes; - EXPECT_THAT((BitMask( - kSome8BytesMask | extra_mask)), - ElementsAreArray(kSome8BytesMaskBits)) - << i << " " << extra_mask; - extra_bits = (extra_bits + 1) * 3; - } -} - -TEST(BitMask, LeadingTrailing) { - EXPECT_EQ((BitMask(0x00001a40).LeadingZeros()), 3); - EXPECT_EQ((BitMask(0x00001a40).TrailingZeros()), 6); - - EXPECT_EQ((BitMask(0x00000001).LeadingZeros()), 15); - EXPECT_EQ((BitMask(0x00000001).TrailingZeros()), 0); - - EXPECT_EQ((BitMask(0x00008000).LeadingZeros()), 0); - EXPECT_EQ((BitMask(0x00008000).TrailingZeros()), 15); - - EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); - EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); - - EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); - EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); - - EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); - EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); -} - -TEST(Group, EmptyGroup) { - for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); -} - -TEST(Group, Match) { - if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.Match(0), ElementsAre()); - EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); - EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); - EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); - EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); - } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), - ctrl_t::kDeleted, CtrlT(2), CtrlT(1), - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.Match(0), ElementsAre()); - EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); - EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); - } else { - FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; - } -} - -TEST(Group, MaskEmpty) { - if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4); - } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), - ctrl_t::kDeleted, CtrlT(2), CtrlT(1), - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0); - } else { - FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; - } -} - -TEST(Group, MaskFull) { - if (Group::kWidth == 16) { - ctrl_t group[] = { - ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1), - CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskFull(), - ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15)); - } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, - ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel, - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7)); - } else { - FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; - } -} - -TEST(Group, MaskNonFull) { - if (Group::kWidth == 16) { - ctrl_t group[] = { - ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), - ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1), - CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskNonFull(), - ElementsAre(0, 2, 4, 6, 10, 13, 14)); - } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, - ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel, - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskNonFull(), ElementsAre(0, 2, 3, 5, 6)); - } else { - FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; - } -} - -TEST(Group, MaskEmptyOrDeleted) { - if (Group::kWidth == 16) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3), - ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), - CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), - CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4); - } else if (Group::kWidth == 8) { - ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), - ctrl_t::kDeleted, CtrlT(2), CtrlT(1), - ctrl_t::kSentinel, CtrlT(1)}; - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); - EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3); - } else { - FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; - } -} - TEST(Batch, DropDeletes) { constexpr size_t kCapacity = 63; constexpr size_t kGroupWidth = container_internal::Group::kWidth; @@ -551,30 +376,6 @@ TEST(Batch, DropDeletes) { } } -TEST(Group, CountLeadingEmptyOrDeleted) { - const std::vector empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted}; - const std::vector full_examples = { - CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3), - CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel}; - - for (ctrl_t empty : empty_examples) { - std::vector e(Group::kWidth, empty); - EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); - for (ctrl_t full : full_examples) { - for (size_t i = 0; i != Group::kWidth; ++i) { - std::vector f(Group::kWidth, empty); - f[i] = full; - EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); - } - std::vector f(Group::kWidth, empty); - f[Group::kWidth * 2 / 3] = full; - f[Group::kWidth / 2] = full; - EXPECT_EQ(Group::kWidth / 2, - Group{f.data()}.CountLeadingEmptyOrDeleted()); - } - } -} - template struct ValuePolicy { using slot_type = T; @@ -610,7 +411,7 @@ struct ValuePolicy { std::forward(f), std::forward(args)...); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -758,7 +559,7 @@ class StringPolicy { PairArgs(std::forward(args)...)); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -1184,7 +985,7 @@ TYPED_TEST(SmallTableResizeTest, InsertIntoSmallTable) { t.insert(i); ASSERT_EQ(t.size(), i + 1); for (int j = 0; j < i + 1; ++j) { - EXPECT_TRUE(t.find(j) != t.end()); + ASSERT_TRUE(t.find(j) != t.end()); EXPECT_EQ(*t.find(j), j); } } @@ -1207,7 +1008,7 @@ TYPED_TEST(SmallTableResizeTest, ResizeGrowSmallTables) { t.reserve(target_size); } for (size_t i = 0; i < source_size; ++i) { - EXPECT_TRUE(t.find(static_cast(i)) != t.end()); + ASSERT_TRUE(t.find(static_cast(i)) != t.end()); EXPECT_EQ(*t.find(static_cast(i)), static_cast(i)); } } @@ -1232,7 +1033,7 @@ TYPED_TEST(SmallTableResizeTest, ResizeReduceSmallTables) { << "rehash(0) must resize to the minimum capacity"; } for (size_t i = 0; i < inserted_count; ++i) { - EXPECT_TRUE(t.find(static_cast(i)) != t.end()); + ASSERT_TRUE(t.find(static_cast(i)) != t.end()); EXPECT_EQ(*t.find(static_cast(i)), static_cast(i)); } } @@ -1283,6 +1084,9 @@ TYPED_TEST(SooTest, Contains2) { t.clear(); EXPECT_FALSE(t.contains(0)); + + EXPECT_TRUE(t.insert(0).second); + EXPECT_TRUE(t.contains(0)); } int decompose_constructed; @@ -1352,7 +1156,7 @@ struct DecomposePolicy { return std::forward(f)(x, x); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return nullptr; } @@ -2083,8 +1887,6 @@ TEST(Table, EraseInsertProbing) { TEST(Table, GrowthInfoDeletedBit) { BadTable t; - EXPECT_TRUE( - RawHashSetTestOnlyAccess::GetCommon(t).growth_info().HasNoDeleted()); int64_t init_count = static_cast( CapacityToGrowth(NormalizeCapacity(Group::kWidth + 1))); for (int64_t i = 0; i < init_count; ++i) { @@ -2470,12 +2272,10 @@ TEST(Table, NoThrowMoveAssign) { } TEST(Table, NoThrowSwappable) { - ASSERT_TRUE( - container_internal::IsNoThrowSwappable>()); - ASSERT_TRUE(container_internal::IsNoThrowSwappable< - std::equal_to>()); - ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); - EXPECT_TRUE(container_internal::IsNoThrowSwappable()); + ASSERT_TRUE(std::is_nothrow_swappable>()); + ASSERT_TRUE(std::is_nothrow_swappable>()); + ASSERT_TRUE(std::is_nothrow_swappable>()); + EXPECT_TRUE(std::is_nothrow_swappable()); } TEST(Table, HeterogeneousLookup) { @@ -2604,6 +2404,19 @@ TEST(Table, Merge) { EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"))); } +TEST(Table, MergeSmall) { + StringTable t1, t2; + t1.emplace("1", "1"); + t2.emplace("2", "2"); + + EXPECT_THAT(t1, UnorderedElementsAre(Pair("1", "1"))); + EXPECT_THAT(t2, UnorderedElementsAre(Pair("2", "2"))); + + t2.merge(t1); + EXPECT_EQ(t1.size(), 0); + EXPECT_THAT(t2, UnorderedElementsAre(Pair("1", "1"), Pair("2", "2"))); +} + TEST(Table, IteratorEmplaceConstructibleRequirement) { struct Value { explicit Value(absl::string_view view) : value(view) {} @@ -2690,6 +2503,24 @@ TEST(Nodes, ExtractInsert) { EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move) } +TEST(Nodes, ExtractInsertSmall) { + constexpr char k0[] = "Very long string zero."; + StringTable t = {{k0, ""}}; + EXPECT_THAT(t, UnorderedElementsAre(Pair(k0, ""))); + + auto node = t.extract(k0); + EXPECT_EQ(t.size(), 0); + EXPECT_TRUE(node); + EXPECT_FALSE(node.empty()); + + StringTable t2; + StringTable::insert_return_type res = t2.insert(std::move(node)); + EXPECT_TRUE(res.inserted); + EXPECT_THAT(*res.position, Pair(k0, "")); + EXPECT_FALSE(res.node); + EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, ""))); +} + TYPED_TEST(SooTest, HintInsert) { TypeParam t = {1, 2, 3}; auto node = t.extract(1); @@ -2828,12 +2659,12 @@ TEST(TableDeathTest, InvalidIteratorAsserts) { NonSooIntTable t; // Extra simple "regexp" as regexp support is highly varied across platforms. - EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), - "erase.* called on end.. iterator."); + EXPECT_DEATH_IF_SUPPORTED(++t.end(), "operator.* called on end.. iterator."); typename NonSooIntTable::iterator iter; EXPECT_DEATH_IF_SUPPORTED( ++iter, "operator.* called on default-constructed iterator."); t.insert(0); + t.insert(1); iter = t.begin(); t.erase(iter); const char* const kErasedDeathMessage = @@ -3006,12 +2837,12 @@ TYPED_TEST(RawHashSamplerTest, Sample) { // Expect that we sampled at the requested sampling rate of ~1%. EXPECT_NEAR((end_size - start_size) / static_cast(tables.size()), 0.01, 0.005); - EXPECT_EQ(observed_checksums.size(), 5); + ASSERT_EQ(observed_checksums.size(), 5); for (const auto& [_, count] : observed_checksums) { EXPECT_NEAR((100 * count) / static_cast(tables.size()), 0.2, 0.05); } - EXPECT_EQ(reservations.size(), 10); + ASSERT_EQ(reservations.size(), 10); for (const auto& [reservation, count] : reservations) { EXPECT_GE(reservation, 0); EXPECT_LT(reservation, 100); @@ -3644,11 +3475,13 @@ TEST(Iterator, InvalidComparisonDifferentTables) { EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter), "Invalid iterator comparison.*default-constructed"); t1.insert(0); + t1.insert(1); EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()), "Invalid iterator comparison.*empty hashtable"); EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter), "Invalid iterator comparison.*default-constructed"); t2.insert(0); + t2.insert(1); EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()), "Invalid iterator comparison.*end.. iterator"); EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()), @@ -3687,40 +3520,47 @@ TEST(Table, CountedHash) { GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause " "redundant hashing."; } + // When the table is sampled, we need to hash on the first insertion. + DisableSampling(); using Table = CountedHashIntTable; auto HashCount = [](const Table& t) { return t.hash_function().count; }; { Table t; + t.find(0); EXPECT_EQ(HashCount(t), 0); } { Table t; t.insert(1); - EXPECT_EQ(HashCount(t), 1); + t.find(1); + EXPECT_EQ(HashCount(t), 0); t.erase(1); - EXPECT_LE(HashCount(t), 2); + EXPECT_EQ(HashCount(t), 0); + t.insert(1); + t.insert(2); + EXPECT_EQ(HashCount(t), 2); } { Table t; t.insert(3); - EXPECT_EQ(HashCount(t), 1); + EXPECT_EQ(HashCount(t), 0); auto node = t.extract(3); - EXPECT_LE(HashCount(t), 2); + EXPECT_EQ(HashCount(t), 0); t.insert(std::move(node)); - EXPECT_LE(HashCount(t), 3); + EXPECT_EQ(HashCount(t), 0); } { Table t; t.emplace(5); - EXPECT_EQ(HashCount(t), 1); + EXPECT_EQ(HashCount(t), 0); } { Table src; src.insert(7); Table dst; dst.merge(src); - EXPECT_EQ(HashCount(dst), 1); + EXPECT_EQ(HashCount(dst), 0); } } @@ -3731,9 +3571,7 @@ TEST(Table, IterateOverFullSlotsEmpty) { auto fail_if_any = [](const ctrl_t*, void* i) { FAIL() << "expected no slots " << **static_cast(i); }; - container_internal::IterateOverFullSlots( - RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType), fail_if_any); - for (size_t i = 0; i < 256; ++i) { + for (size_t i = 2; i < 256; ++i) { t.reserve(i); container_internal::IterateOverFullSlots( RawHashSetTestOnlyAccess::GetCommon(t), sizeof(SlotType), fail_if_any); @@ -3745,7 +3583,9 @@ TEST(Table, IterateOverFullSlotsFull) { using SlotType = NonSooIntTableSlotType; std::vector expected_slots; - for (int64_t idx = 0; idx < 128; ++idx) { + t.insert(0); + expected_slots.push_back(0); + for (int64_t idx = 1; idx < 128; ++idx) { t.insert(idx); expected_slots.push_back(idx); @@ -4226,8 +4066,8 @@ struct ConstUint8Hash { // 5. Finally we will catch up and go to overflow codepath. TEST(Table, GrowExtremelyLargeTable) { constexpr size_t kTargetCapacity = -#if defined(__wasm__) || defined(__asmjs__) - NextCapacity(ProbedItem4Bytes::kMaxNewCapacity); // OOMs on WASM. +#if defined(__wasm__) || defined(__asmjs__) || defined(__i386__) + NextCapacity(ProbedItem4Bytes::kMaxNewCapacity); // OOMs on WASM, 32-bit. #else NextCapacity(ProbedItem8Bytes::kMaxNewCapacity); #endif @@ -4240,7 +4080,7 @@ TEST(Table, GrowExtremelyLargeTable) { CommonFields& common = RawHashSetTestOnlyAccess::GetCommon(t); // Set 0 seed so that H1 is always 0. common.set_no_seed_for_testing(); - ASSERT_EQ(H1(t.hash_function()(75), common.seed()), 0); + ASSERT_EQ(H1(t.hash_function()(75)), 0); uint8_t inserted_till = 210; for (uint8_t i = 0; i < inserted_till; ++i) { t.insert(i); @@ -4264,6 +4104,16 @@ TEST(Table, GrowExtremelyLargeTable) { EXPECT_EQ(t.capacity(), kTargetCapacity); } +// Test that after calling generate_new_seed(), the high bits of the returned +// seed are non-zero. +TEST(PerTableSeed, HighBitsAreNonZero) { + HashtableSize hs(no_seed_empty_tag_t{}); + for (int i = 0; i < 100; ++i) { + hs.generate_new_seed(); + ASSERT_GT(hs.seed().seed() >> 16, 0); + } +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h index 8aed18b2e61..46faa8951c6 100644 --- a/absl/container/node_hash_map.h +++ b/absl/container/node_hash_map.h @@ -110,18 +110,18 @@ class NodeHashMapPolicy; // absl::node_hash_map ducks = // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; // -// // Insert a new element into the node hash map -// ducks.insert({"d", "donald"}}; +// // Insert a new element into the node hash map +// ducks.insert({"d", "donald"}}; // -// // Force a rehash of the node hash map -// ducks.rehash(0); +// // Force a rehash of the node hash map +// ducks.rehash(0); // -// // Find the element with the key "b" -// std::string search_key = "b"; -// auto result = ducks.find(search_key); -// if (result != ducks.end()) { -// std::cout << "Result: " << result->second << std::endl; -// } +// // Find the element with the key "b" +// std::string search_key = "b"; +// auto result = ducks.find(search_key); +// if (result != ducks.end()) { +// std::cout << "Result: " << result->second << std::endl; +// } template , class Eq = DefaultHashContainerEq, class Alloc = std::allocator>> @@ -153,9 +153,9 @@ class ABSL_ATTRIBUTE_OWNER node_hash_map // // * Copy assignment operator // - // // Hash functor and Comparator are copied as well - // absl::node_hash_map map4; - // map4 = map3; + // // Hash functor and Comparator are copied as well + // absl::node_hash_map map4; + // map4 = map3; // // * Move constructor // @@ -663,10 +663,10 @@ class NodeHashMapPolicy static Value& value(value_type* elem) { return elem->second; } static const Value& value(const value_type* elem) { return elem->second; } - template + template static constexpr HashSlotFn get_hash_slot_fn() { return memory_internal::IsLayoutCompatible::value - ? &TypeErasedDerefAndApplyToSlotFn + ? &TypeErasedDerefAndApplyToSlotFn : nullptr; } }; diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h index 6240e2d9154..9eef870ff81 100644 --- a/absl/container/node_hash_set.h +++ b/absl/container/node_hash_set.h @@ -108,16 +108,16 @@ struct NodeHashSetPolicy; // absl::node_hash_set ducks = // {"huey", "dewey", "louie"}; // -// // Insert a new element into the node hash set -// ducks.insert("donald"); +// // Insert a new element into the node hash set +// ducks.insert("donald"); // -// // Force a rehash of the node hash set -// ducks.rehash(0); +// // Force a rehash of the node hash set +// ducks.rehash(0); // -// // See if "dewey" is present -// if (ducks.contains("dewey")) { -// std::cout << "We found dewey!" << std::endl; -// } +// // See if "dewey" is present +// if (ducks.contains("dewey")) { +// std::cout << "We found dewey!" << std::endl; +// } template , class Eq = DefaultHashContainerEq, class Alloc = std::allocator> class ABSL_ATTRIBUTE_OWNER node_hash_set @@ -147,9 +147,9 @@ class ABSL_ATTRIBUTE_OWNER node_hash_set // // * Copy assignment operator // - // // Hash functor and Comparator are copied as well - // absl::node_hash_set set4; - // set4 = set3; + // // Hash functor and Comparator are copied as well + // absl::node_hash_set set4; + // set4 = set3; // // * Move constructor // @@ -557,9 +557,9 @@ struct NodeHashSetPolicy static size_t element_space_used(const T*) { return sizeof(T); } - template + template static constexpr HashSlotFn get_hash_slot_fn() { - return &TypeErasedDerefAndApplyToSlotFn; + return &TypeErasedDerefAndApplyToSlotFn; } }; } // namespace container_internal diff --git a/absl/copts/GENERATED_AbseilCopts.cmake b/absl/copts/GENERATED_AbseilCopts.cmake index cc0f4bb2d93..32b97fcb307 100644 --- a/absl/copts/GENERATED_AbseilCopts.cmake +++ b/absl/copts/GENERATED_AbseilCopts.cmake @@ -23,6 +23,7 @@ list(APPEND ABSL_CLANG_CL_TEST_FLAGS "-Wno-implicit-int-conversion" "-Wno-missing-prototypes" "-Wno-missing-variable-declarations" + "-Wno-nullability-completeness" "-Wno-shadow" "-Wno-shorten-64-to-32" "-Wno-sign-compare" @@ -120,6 +121,7 @@ list(APPEND ABSL_LLVM_FLAGS "-Wno-implicit-float-conversion" "-Wno-implicit-int-float-conversion" "-Wno-unknown-warning-option" + "-Wno-unused-command-line-argument" "-DNOMINMAX" ) @@ -139,7 +141,6 @@ list(APPEND ABSL_LLVM_TEST_FLAGS "-Winvalid-constexpr" "-Wliteral-conversion" "-Wmissing-declarations" - "-Wnullability-completeness" "-Woverlength-strings" "-Wpointer-arith" "-Wself-assign" @@ -160,11 +161,13 @@ list(APPEND ABSL_LLVM_TEST_FLAGS "-Wno-implicit-float-conversion" "-Wno-implicit-int-float-conversion" "-Wno-unknown-warning-option" + "-Wno-unused-command-line-argument" "-DNOMINMAX" "-Wno-deprecated-declarations" "-Wno-implicit-int-conversion" "-Wno-missing-prototypes" "-Wno-missing-variable-declarations" + "-Wno-nullability-completeness" "-Wno-shadow" "-Wno-shorten-64-to-32" "-Wno-sign-compare" diff --git a/absl/copts/GENERATED_copts.bzl b/absl/copts/GENERATED_copts.bzl index 35319f08afe..8d7219044ee 100644 --- a/absl/copts/GENERATED_copts.bzl +++ b/absl/copts/GENERATED_copts.bzl @@ -24,6 +24,7 @@ ABSL_CLANG_CL_TEST_FLAGS = [ "-Wno-implicit-int-conversion", "-Wno-missing-prototypes", "-Wno-missing-variable-declarations", + "-Wno-nullability-completeness", "-Wno-shadow", "-Wno-shorten-64-to-32", "-Wno-sign-compare", @@ -121,6 +122,7 @@ ABSL_LLVM_FLAGS = [ "-Wno-implicit-float-conversion", "-Wno-implicit-int-float-conversion", "-Wno-unknown-warning-option", + "-Wno-unused-command-line-argument", "-DNOMINMAX", ] @@ -140,7 +142,6 @@ ABSL_LLVM_TEST_FLAGS = [ "-Winvalid-constexpr", "-Wliteral-conversion", "-Wmissing-declarations", - "-Wnullability-completeness", "-Woverlength-strings", "-Wpointer-arith", "-Wself-assign", @@ -161,11 +162,13 @@ ABSL_LLVM_TEST_FLAGS = [ "-Wno-implicit-float-conversion", "-Wno-implicit-int-float-conversion", "-Wno-unknown-warning-option", + "-Wno-unused-command-line-argument", "-DNOMINMAX", "-Wno-deprecated-declarations", "-Wno-implicit-int-conversion", "-Wno-missing-prototypes", "-Wno-missing-variable-declarations", + "-Wno-nullability-completeness", "-Wno-shadow", "-Wno-shorten-64-to-32", "-Wno-sign-compare", diff --git a/absl/copts/copts.py b/absl/copts/copts.py index 941528e0ba4..e6c4385d9f9 100644 --- a/absl/copts/copts.py +++ b/absl/copts/copts.py @@ -84,6 +84,7 @@ # Disable warnings on unknown warning flags (when warning flags are # unknown on older compiler versions) "-Wno-unknown-warning-option", + "-Wno-unused-command-line-argument", # Don't define min and max macros (Build on Windows using clang) "-DNOMINMAX", ] @@ -93,6 +94,7 @@ "-Wno-implicit-int-conversion", "-Wno-missing-prototypes", "-Wno-missing-variable-declarations", + "-Wno-nullability-completeness", "-Wno-shadow", "-Wno-shorten-64-to-32", "-Wno-sign-compare", diff --git a/absl/crc/BUILD.bazel b/absl/crc/BUILD.bazel index b659a7e24ed..22c3cbf0fc6 100644 --- a/absl/crc/BUILD.bazel +++ b/absl/crc/BUILD.bazel @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/crc/crc32c_test.cc b/absl/crc/crc32c_test.cc index df0afb3e329..eed5a6c8947 100644 --- a/absl/crc/crc32c_test.cc +++ b/absl/crc/crc32c_test.cc @@ -15,11 +15,14 @@ #include "absl/crc/crc32c.h" #include +#include #include #include #include +#include #include #include +#include #include "gtest/gtest.h" #include "absl/crc/internal/crc32c.h" @@ -101,6 +104,33 @@ TEST(CRC32C, ExtendByZeroes) { } } +// Test ExtendCrc32cByZeroes() for the full range of the size_t length, +// including every bit. This is important because ExtendCrc32cByZeroes() is +// implemented using an array of constants, where each entry in the array is +// used only when a particular bit in the size_t length is set. This test +// verifies that every entry in that array is correct. +TEST(CRC32C, ExtendByZeroesAllLengthBits) { + absl::crc32c_t base_crc = absl::crc32c_t{0xc99465aa}; + const std::array, 5> kTestCases = {{ + {0, absl::crc32c_t(0xc99465aa)}, + {std::numeric_limits::max(), absl::crc32c_t(0x9b1d5aaa)}, + {0x12345678, absl::crc32c_t(0xcf0e9553)}, + {std::numeric_limits::max(), absl::crc32c_t(0xf5bff489)}, + {0x12345678abcdefff, absl::crc32c_t(0xaa1ffb0b)}, + }}; + for (const auto &test_case : kTestCases) { + uint64_t length = std::get<0>(test_case); + absl::crc32c_t expected_value = std::get<1>(test_case); + SCOPED_TRACE(length); + if (length > std::numeric_limits::max()) { + // On 32-bit platforms, 64-bit lengths cannot be used or tested. + continue; + } + EXPECT_EQ(absl::ExtendCrc32cByZeroes(base_crc, static_cast(length)), + expected_value); + } +} + TEST(CRC32C, UnextendByZeroes) { constexpr size_t kExtendByValues[] = {2, 200, 20000, 200000, 20000000}; constexpr size_t kUnextendByValues[] = {0, 100, 10000, 100000, 10000000}; diff --git a/absl/crc/internal/cpu_detect.cc b/absl/crc/internal/cpu_detect.cc index c59f773ea30..a6976017f31 100644 --- a/absl/crc/internal/cpu_detect.cc +++ b/absl/crc/internal/cpu_detect.cc @@ -145,6 +145,14 @@ CpuType GetIntelCpuType() { } case 0x5e: // Skylake (client) return CpuType::kIntelSkylake; + case 0x6a: // Ice Lake + return CpuType::kIntelIcelake; + case 0x8f: // Sapphire Rapids + return CpuType::kIntelSapphirerapids; + case 0xcf: // Emerald Rapids + return CpuType::kIntelEmeraldrapids; + case 0xad: // Granite Rapids + return CpuType::kIntelGraniterapidsap; default: return CpuType::kUnknown; } @@ -210,6 +218,14 @@ CpuType GetAmdCpuType() { return CpuType::kUnknown; } break; + case 0x1A: + switch (model_num) { + case 0x2: + return CpuType::kAmdTurin; + default: + return CpuType::kUnknown; + } + break; default: return CpuType::kUnknown; } @@ -259,6 +275,7 @@ CpuType GetCpuType() { case 0xd40: return CpuType::kArmNeoverseV1; case 0xd49: return CpuType::kArmNeoverseN2; case 0xd4f: return CpuType::kArmNeoverseV2; + case 0xd8e: return CpuType::kArmNeoverseN3; default: return CpuType::kUnknown; } diff --git a/absl/crc/internal/cpu_detect.h b/absl/crc/internal/cpu_detect.h index 01e19590cae..e76a8027ca1 100644 --- a/absl/crc/internal/cpu_detect.h +++ b/absl/crc/internal/cpu_detect.h @@ -30,10 +30,15 @@ enum class CpuType { kAmdNaples, kAmdMilan, kAmdGenoa, + kAmdTurin, kAmdRyzenV3000, kIntelCascadelakeXeon, kIntelSkylakeXeon, kIntelBroadwell, + kIntelIcelake, + kIntelSapphirerapids, + kIntelEmeraldrapids, + kIntelGraniterapidsap, kIntelSkylake, kIntelIvybridge, kIntelSandybridge, @@ -42,7 +47,8 @@ enum class CpuType { kArmNeoverseV1, kAmpereSiryn, kArmNeoverseN2, - kArmNeoverseV2 + kArmNeoverseV2, + kArmNeoverseN3, }; // Returns the type of host CPU this code is running on. Returns kUnknown if diff --git a/absl/crc/internal/crc_memcpy_x86_arm_combined.cc b/absl/crc/internal/crc_memcpy_x86_arm_combined.cc index 38f61e9b1fd..247b3aa9d1f 100644 --- a/absl/crc/internal/crc_memcpy_x86_arm_combined.cc +++ b/absl/crc/internal/crc_memcpy_x86_arm_combined.cc @@ -422,6 +422,11 @@ CrcMemcpy::ArchSpecificEngines CrcMemcpy::GetArchSpecificEngines() { }; // INTEL_SANDYBRIDGE performs better with SSE than AVX. case CpuType::kIntelSandybridge: + // Use SIMD memcpy on ARM cores. + case CpuType::kArmNeoverseN1: + case CpuType::kArmNeoverseN2: + case CpuType::kArmNeoverseV1: + case CpuType::kArmNeoverseV2: return { /*.temporal=*/new AcceleratedCrcMemcpyEngine<3, 0>(), /*.non_temporal=*/new CrcNonTemporalMemcpyEngine(), diff --git a/absl/crc/internal/crc_x86_arm_combined.cc b/absl/crc/internal/crc_x86_arm_combined.cc index 3194bec468e..ebd9c3fef2e 100644 --- a/absl/crc/internal/crc_x86_arm_combined.cc +++ b/absl/crc/internal/crc_x86_arm_combined.cc @@ -100,47 +100,67 @@ constexpr size_t kMediumCutoff = 2048; namespace { -uint32_t multiply(uint32_t a, uint32_t b) { - V128 power = V128_From64WithZeroFill(a); - V128 crc = V128_From64WithZeroFill(b); - V128 res = V128_PMulLow(power, crc); - - // Combine crc values. - // - // Adding res to itself is equivalent to multiplying by 2, - // or shifting left by 1. Addition is used as not all compilers - // are able to generate optimal code without this hint. - // https://godbolt.org/z/rr3fMnf39 - res = V128_Add64(res, res); - return static_cast(V128_Extract32<1>(res)) ^ - CRC32_u32(0, static_cast(V128_Low64(res))); +// Does polynomial multiplication a * b * x^33 mod G. +// +// One of the multiplicands needs to have an extra factor of x^-33 to cancel out +// the extra factor of x^33. The extra factor of x^33 comes from: +// +// - x^1 from the carry-less multiplication, due to the +// "least-significant-bit-first" convention of CRC-32C. +// +// - x^32 from using CRC32_u64() to reduce the carry-less product to 32 bits. +// +// Both could be avoided, but at the cost of extra instructions. It's more +// efficient to just drop a factor of x^33 from one of the multiplicands. +uint32_t MultiplyWithExtraX33(uint32_t a, uint32_t b) { + V128 a_vec = V128_From64WithZeroFill(a); + V128 b_vec = V128_From64WithZeroFill(b); + V128 res = V128_PMulLow(a_vec, b_vec); + + return CRC32_u64(0, static_cast(V128_Low64(res))); } -// Powers of crc32c polynomial, for faster ExtendByZeros. -// Verified against folly: -// folly/hash/detail/Crc32CombineDetail.cpp +// The number of low-order bits that ComputeZeroConstant() drops from the +// length, i.e. treats as zeroes +constexpr int kNumDroppedBits = 4; + +// Precomputed constants for faster ExtendByZeroes(). This was generated by +// gen_crc32c_consts.py. The entry at index i is x^(2^(i + 3 + kNumDroppedBits) +// - 33) mod G. That is x^-33 times the polynomial by which the CRC value needs +// to be multiplied to extend it by 2^(i + 3 + kNumDroppedBits) zero bits, or +// equivalently 2^(i + kNumDroppedBits) zero bytes. The extra factor of x^-33 +// cancels out the extra factor of x^33 that MultiplyWithExtraX33() introduces. constexpr uint32_t kCRC32CPowers[] = { - 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955, 0xb8fdb1e7, - 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62, 0x28461564, - 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f, 0x538586e3, - 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe, 0xe94ca9bc, - 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000, 0x00800000, - 0x00008000, 0x82f63b78, 0x6ea2d55c, 0x18b8ea18, 0x510ac59a, 0xb82be955, - 0xb8fdb1e7, 0x88e56f72, 0x74c360a4, 0xe4172b16, 0x0d65762a, 0x35d73a62, - 0x28461564, 0xbf455269, 0xe2ea32dc, 0xfe7740e6, 0xf946610b, 0x3c204f8f, - 0x538586e3, 0x59726915, 0x734d5309, 0xbc1ac763, 0x7d0722cc, 0xd289cabe, - 0xe94ca9bc, 0x05b74f3f, 0xa51e1f42, 0x40000000, 0x20000000, 0x08000000, - 0x00800000, 0x00008000, + 0x493c7d27, 0xba4fc28e, 0x9e4addf8, 0x0d3b6092, 0xb9e02b86, 0xdd7e3b0c, + 0x170076fa, 0xa51b6135, 0x82f89c77, 0x54a86326, 0x1dc403cc, 0x5ae703ab, + 0xc5013a36, 0xac2ac6dd, 0x9b4615a9, 0x688d1c61, 0xf6af14e6, 0xb6ffe386, + 0xb717425b, 0x478b0d30, 0x54cc62e5, 0x7b2102ee, 0x8a99adef, 0xa7568c8f, + 0xd610d67e, 0x6b086b3f, 0xd94f3c0b, 0xbf818109, 0x780d5a4d, 0x05ec76f1, + 0x00000001, 0x493c7d27, 0xba4fc28e, 0x9e4addf8, 0x0d3b6092, 0xb9e02b86, + 0xdd7e3b0c, 0x170076fa, 0xa51b6135, 0x82f89c77, 0x54a86326, 0x1dc403cc, + 0x5ae703ab, 0xc5013a36, 0xac2ac6dd, 0x9b4615a9, 0x688d1c61, 0xf6af14e6, + 0xb6ffe386, 0xb717425b, 0x478b0d30, 0x54cc62e5, 0x7b2102ee, 0x8a99adef, + 0xa7568c8f, 0xd610d67e, 0x6b086b3f, 0xd94f3c0b, 0xbf818109, 0x780d5a4d, }; +// There must be an entry for each non-dropped bit in the size_t length. +static_assert(std::size(kCRC32CPowers) >= sizeof(size_t) * 8 - kNumDroppedBits); } // namespace -// Compute a magic constant, so that multiplying by it is the same as -// extending crc by length zeros. +// Compute a magic constant, so that multiplying by it is the same as extending +// crc by length zeros. The lowest kNumDroppedBits of the length are ignored and +// treated as zeroes; the caller is assumed to handle any nonzero bits there. +#if defined(NDEBUG) && ABSL_HAVE_CPP_ATTRIBUTE(clang::no_sanitize) +// The array accesses in this are safe: `length >= size_t{1} << +// kNumDroppedBits`, so `countr_zero(length >> kNumDroppedBits) < sizeof(size_t) +// * 8 - kNumDroppedBits`, and `length & (length - 1)` cannot introduce bits +// `>= sizeof(size_t) * 8 - kNumDroppedBits`. The compiler cannot prove this, so +// manually disable bounds checking. +[[clang::no_sanitize("array-bounds")]] +#endif uint32_t CRC32AcceleratedX86ARMCombined::ComputeZeroConstant( size_t length) const { - // Lowest 2 bits are handled separately in ExtendByZeroes - length >>= 2; + length >>= kNumDroppedBits; int index = absl::countr_zero(length); uint32_t prev = kCRC32CPowers[index]; @@ -149,7 +169,7 @@ uint32_t CRC32AcceleratedX86ARMCombined::ComputeZeroConstant( while (length) { // For each bit of length, extend by 2**n zeros. index = absl::countr_zero(length); - prev = multiply(prev, kCRC32CPowers[index]); + prev = MultiplyWithExtraX33(prev, kCRC32CPowers[index]); length &= length - 1; } return prev; @@ -159,22 +179,13 @@ void CRC32AcceleratedX86ARMCombined::ExtendByZeroes(uint32_t* crc, size_t length) const { uint32_t val = *crc; // Don't bother with multiplication for small length. - switch (length & 3) { - case 0: - break; - case 1: - val = CRC32_u8(val, 0); - break; - case 2: - val = CRC32_u16(val, 0); - break; - case 3: - val = CRC32_u8(val, 0); - val = CRC32_u16(val, 0); - break; - } - if (length > 3) { - val = multiply(val, ComputeZeroConstant(length)); + if (length & 1) val = CRC32_u8(val, 0); + if (length & 2) val = CRC32_u16(val, 0); + if (length & 4) val = CRC32_u32(val, 0); + if (length & 8) val = CRC32_u64(val, 0); + static_assert(kNumDroppedBits == 4); + if (length >= size_t{1} << kNumDroppedBits) { + val = MultiplyWithExtraX33(val, ComputeZeroConstant(length)); } *crc = val; } @@ -306,6 +317,46 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreamsBase return crc; } + // Same as Process64BytesCRC, but just interleaved for 2 streams. + ABSL_ATTRIBUTE_ALWAYS_INLINE void Process64BytesCRC2Streams( + const uint8_t* p0, const uint8_t* p1, uint64_t* crc) const { + uint64_t crc0 = crc[0]; + uint64_t crc1 = crc[1]; + for (int i = 0; i < 8; i++) { + crc0 = CRC32_u64(static_cast(crc0), + absl::little_endian::Load64(p0)); + crc1 = CRC32_u64(static_cast(crc1), + absl::little_endian::Load64(p1)); + p0 += 8; + p1 += 8; + } + crc[0] = crc0; + crc[1] = crc1; + } + + // Same as Process64BytesCRC, but just interleaved for 3 streams. + ABSL_ATTRIBUTE_ALWAYS_INLINE void Process64BytesCRC3Streams( + const uint8_t* p0, const uint8_t* p1, const uint8_t* p2, + uint64_t* crc) const { + uint64_t crc0 = crc[0]; + uint64_t crc1 = crc[1]; + uint64_t crc2 = crc[2]; + for (int i = 0; i < 8; i++) { + crc0 = CRC32_u64(static_cast(crc0), + absl::little_endian::Load64(p0)); + crc1 = CRC32_u64(static_cast(crc1), + absl::little_endian::Load64(p1)); + crc2 = CRC32_u64(static_cast(crc2), + absl::little_endian::Load64(p2)); + p0 += 8; + p1 += 8; + p2 += 8; + } + crc[0] = crc0; + crc[1] = crc1; + crc[2] = crc2; + } + // Constants generated by './scripts/gen-crc-consts.py x86_pclmul // crc32_lsb_0x82f63b78' from the Linux kernel. alignas(16) static constexpr uint64_t kFoldAcross512Bits[2] = { @@ -339,7 +390,8 @@ template = 1 && num_crc_streams <= kMaxStreams, "Invalid number of crc streams"); static_assert(num_pclmul_streams >= 0 && num_pclmul_streams <= kMaxStreams, @@ -349,47 +401,15 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams uint32_t l = *crc; uint64_t l64; - // We have dedicated instruction for 1,2,4 and 8 bytes. - if (length & 8) { - ABSL_INTERNAL_STEP8(l, p); - length &= ~size_t{8}; - } - if (length & 4) { - ABSL_INTERNAL_STEP4(l, p); - length &= ~size_t{4}; - } - if (length & 2) { - ABSL_INTERNAL_STEP2(l, p); - length &= ~size_t{2}; - } - if (length & 1) { - ABSL_INTERNAL_STEP1(l, p); - length &= ~size_t{1}; - } - if (length == 0) { - *crc = l; - return; - } - // length is now multiple of 16. - // For small blocks just run simple loop, because cost of combining multiple // streams is significant. - if (strategy != CutoffStrategy::Unroll64CRC) { - if (length < kSmallCutoff) { - while (length >= 16) { - ABSL_INTERNAL_STEP8(l, p); - ABSL_INTERNAL_STEP8(l, p); - length -= 16; - } - *crc = l; - return; - } - } - - // For medium blocks we run 3 crc streams and combine them as described in - // Intel paper above. Running 4th stream doesn't help, because crc - // instruction has latency 3 and throughput 1. - if (length < kMediumCutoff) { + if (strategy != CutoffStrategy::Unroll64CRC && (length < kSmallCutoff)) { + // fallthrough; Use the same strategy as we do for processing the + // remaining bytes after any other strategy. + } else if (length < kMediumCutoff) { + // For medium blocks we run 3 crc streams and combine them as described in + // Intel paper above. Running 4th stream doesn't help, because crc + // instruction has latency 3 and throughput 1. l64 = l; if (strategy == CutoffStrategy::Fold3) { uint64_t l641 = 0; @@ -438,6 +458,7 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams p += 64; } } + l = static_cast(l64); } else { // There is a lot of data, we can ignore combine costs and run all // requested streams (num_crc_streams + num_pclmul_streams), @@ -471,9 +492,19 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams uint64_t l64_pclmul[kMaxStreams] = {0}; // Peel first iteration, because PCLMULQDQ stream, needs setup. - for (size_t i = 0; i < num_crc_streams; i++) { - l64_crc[i] = Process64BytesCRC(crc_streams[i], l64_crc[i]); - crc_streams[i] += 16 * 4; + if (num_crc_streams == 1) { + l64_crc[0] = Process64BytesCRC(crc_streams[0], l64_crc[0]); + crc_streams[0] += 16 * 4; + } else if (num_crc_streams == 2) { + Process64BytesCRC2Streams(crc_streams[0], crc_streams[1], l64_crc); + crc_streams[0] += 16 * 4; + crc_streams[1] += 16 * 4; + } else { + Process64BytesCRC3Streams(crc_streams[0], crc_streams[1], + crc_streams[2], l64_crc); + crc_streams[0] += 16 * 4; + crc_streams[1] += 16 * 4; + crc_streams[2] += 16 * 4; } V128 partialCRC[kMaxStreams][4]; @@ -511,24 +542,28 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams // } // But unrolling and interleaving PCLMULQDQ and CRC blocks manually // gives ~2% performance boost. - l64_crc[0] = Process64BytesCRC(crc_streams[0], l64_crc[0]); - crc_streams[0] += 16 * 4; + if (num_crc_streams == 1) { + l64_crc[0] = Process64BytesCRC(crc_streams[0], l64_crc[0]); + crc_streams[0] += 16 * 4; + } else if (num_crc_streams == 2) { + Process64BytesCRC2Streams(crc_streams[0], crc_streams[1], l64_crc); + crc_streams[0] += 16 * 4; + crc_streams[1] += 16 * 4; + } else { + Process64BytesCRC3Streams(crc_streams[0], crc_streams[1], + crc_streams[2], l64_crc); + crc_streams[0] += 16 * 4; + crc_streams[1] += 16 * 4; + crc_streams[2] += 16 * 4; + } if (num_pclmul_streams > 0) { Process64BytesPclmul(pclmul_streams[0], partialCRC[0]); pclmul_streams[0] += 16 * 4; } - if (num_crc_streams > 1) { - l64_crc[1] = Process64BytesCRC(crc_streams[1], l64_crc[1]); - crc_streams[1] += 16 * 4; - } if (num_pclmul_streams > 1) { Process64BytesPclmul(pclmul_streams[1], partialCRC[1]); pclmul_streams[1] += 16 * 4; } - if (num_crc_streams > 2) { - l64_crc[2] = Process64BytesCRC(crc_streams[2], l64_crc[2]); - crc_streams[2] += 16 * 4; - } if (num_pclmul_streams > 2) { Process64BytesPclmul(pclmul_streams[2], partialCRC[2]); pclmul_streams[2] += 16 * 4; @@ -542,14 +577,15 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams } // Combine all streams into single result. + static_assert(64 % (1 << kNumDroppedBits) == 0); uint32_t magic = ComputeZeroConstant(bs * 64); l64 = l64_crc[0]; for (size_t i = 1; i < num_crc_streams; i++) { - l64 = multiply(static_cast(l64), magic); + l64 = MultiplyWithExtraX33(static_cast(l64), magic); l64 ^= l64_crc[i]; } for (size_t i = 0; i < num_pclmul_streams; i++) { - l64 = multiply(static_cast(l64), magic); + l64 = MultiplyWithExtraX33(static_cast(l64), magic); l64 ^= l64_pclmul[i]; } @@ -559,15 +595,26 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams } else { p = crc_streams[num_crc_streams - 1]; } + l = static_cast(l64); } - l = static_cast(l64); + uint64_t remaining_bytes = static_cast(e - p); + // Process the remaining bytes. while ((e - p) >= 16) { ABSL_INTERNAL_STEP8(l, p); ABSL_INTERNAL_STEP8(l, p); } - // Process the last few bytes - while (p != e) { + + if (remaining_bytes & 8) { + ABSL_INTERNAL_STEP8(l, p); + } + if (remaining_bytes & 4) { + ABSL_INTERNAL_STEP4(l, p); + } + if (remaining_bytes & 2) { + ABSL_INTERNAL_STEP2(l, p); + } + if (remaining_bytes & 1) { ABSL_INTERNAL_STEP1(l, p); } @@ -593,6 +640,8 @@ CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { case CpuType::kAmdRome: case CpuType::kAmdNaples: case CpuType::kAmdMilan: + case CpuType::kAmdGenoa: + case CpuType::kAmdTurin: return new CRC32AcceleratedX86ARMCombinedMultipleStreams< 3, 1, CutoffStrategy::Fold3>(); // PCLMULQDQ is fast, use combined PCLMULQDQ + CRC implementation. @@ -600,6 +649,10 @@ CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { case CpuType::kIntelSkylakeXeon: case CpuType::kIntelBroadwell: case CpuType::kIntelSkylake: + case CpuType::kIntelIcelake: + case CpuType::kIntelSapphirerapids: + case CpuType::kIntelEmeraldrapids: + case CpuType::kIntelGraniterapidsap: return new CRC32AcceleratedX86ARMCombinedMultipleStreams< 3, 2, CutoffStrategy::Fold3>(); // PCLMULQDQ is slow, don't use it. @@ -611,6 +664,7 @@ CRCImpl* TryNewCRC32AcceleratedX86ARMCombined() { case CpuType::kArmNeoverseN1: case CpuType::kArmNeoverseN2: case CpuType::kArmNeoverseV1: + case CpuType::kArmNeoverseN3: return new CRC32AcceleratedX86ARMCombinedMultipleStreams< 1, 1, CutoffStrategy::Unroll64CRC>(); case CpuType::kAmpereSiryn: diff --git a/absl/crc/internal/gen_crc32c_consts.py b/absl/crc/internal/gen_crc32c_consts.py new file mode 100755 index 00000000000..f78ae302d5c --- /dev/null +++ b/absl/crc/internal/gen_crc32c_consts.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# +# Copyright 2025 The Abseil Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script generates kCRC32CPowers[].""" + + +def poly_mul(a, b): + """Polynomial multiplication: a * b.""" + product = 0 + for i in range(b.bit_length()): + if (b & (1 << i)) != 0: + product ^= a << i + return product + + +def poly_div(a, b): + """Polynomial division: floor(a / b).""" + q = 0 + while a.bit_length() >= b.bit_length(): + q ^= 1 << (a.bit_length() - b.bit_length()) + a ^= b << (a.bit_length() - b.bit_length()) + return q + + +def poly_reduce(a, b): + """Polynomial reduction: a mod b.""" + return a ^ poly_mul(poly_div(a, b), b) + + +def poly_exp(a, b, g): + """Polynomial exponentiation: a^b mod g.""" + if b == 1: + return poly_reduce(a, g) + c = poly_exp(a, b // 2, g) + c = poly_mul(c, c) + if b % 2 != 0: + c = poly_mul(c, a) + return poly_reduce(c, g) + + +def bitreflect(a, num_bits): + """Reflects the bits of the given integer.""" + if a.bit_length() > num_bits: + raise ValueError(f'Integer has more than {num_bits} bits') + return sum(((a >> i) & 1) << (num_bits - 1 - i) for i in range(num_bits)) + + +G = 0x11EDC6F41 # The CRC-32C reducing polynomial, in the "natural" bit order +CRC_BITS = 32 # The degree of G, i.e. the 32 in "CRC-32C" +LSB_FIRST = True # CRC-32C is a least-significant-bit-first CRC +NUM_SIZE_BITS = 64 # The maximum number of bits in the length (size_t) +NUM_DROPPED_BITS = 4 # The number of bits dropped from the length +LOG2_BITS_PER_BYTE = 3 # log2 of the number of bits in a byte, i.e. log2(8) +X = 2 # The polynomial 'x', in the "natural" bit order + + +def print_crc32c_powers(): + """Generates kCRC32CPowers[]. + + kCRC32CPowers[] is an array of length NUM_SIZE_BITS - NUM_DROPPED_BITS, + whose i'th entry is x^(2^(i + LOG2_BITS_PER_BYTE + NUM_DROPPED_BITS) - + CRC_BITS - 1) mod G. See kCRC32CPowers[] in the C++ source for more info. + """ + for i in range(NUM_SIZE_BITS - NUM_DROPPED_BITS): + poly = poly_exp( + X, + 2 ** (i + LOG2_BITS_PER_BYTE + NUM_DROPPED_BITS) + - CRC_BITS + - (1 if LSB_FIRST else 0), + G, + ) + poly = bitreflect(poly, CRC_BITS) + print(f'0x{poly:0{2*CRC_BITS//8}x}, ', end='') + + +if __name__ == '__main__': + print_crc32c_powers() diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel index cd0f1dea8db..ed7cc493e90 100644 --- a/absl/debugging/BUILD.bazel +++ b/absl/debugging/BUILD.bazel @@ -55,6 +55,7 @@ cc_library( "//absl/base:config", "//absl/base:core_headers", "//absl/base:dynamic_annotations", + "//absl/base:malloc_internal", "//absl/base:raw_logging_internal", ], ) @@ -68,6 +69,7 @@ cc_test( ":stacktrace", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:errno_saver", "//absl/types:span", "@googletest//:gtest", "@googletest//:gtest_main", diff --git a/absl/debugging/CMakeLists.txt b/absl/debugging/CMakeLists.txt index 60b138a5399..d8249fed502 100644 --- a/absl/debugging/CMakeLists.txt +++ b/absl/debugging/CMakeLists.txt @@ -42,6 +42,7 @@ absl_cc_library( absl::config absl::core_headers absl::dynamic_annotations + absl::malloc_internal absl::raw_logging_internal PUBLIC ) @@ -57,6 +58,7 @@ absl_cc_test( absl::stacktrace absl::config absl::core_headers + absl::errno_saver absl::span GTest::gmock_main ) diff --git a/absl/debugging/internal/decode_rust_punycode.h b/absl/debugging/internal/decode_rust_punycode.h index b1b1c97feea..44aad8adb2b 100644 --- a/absl/debugging/internal/decode_rust_punycode.h +++ b/absl/debugging/internal/decode_rust_punycode.h @@ -23,10 +23,10 @@ ABSL_NAMESPACE_BEGIN namespace debugging_internal { struct DecodeRustPunycodeOptions { - const char* punycode_begin; - const char* punycode_end; - char* out_begin; - char* out_end; + const char* absl_nonnull punycode_begin; + const char* absl_nonnull punycode_end; + char* absl_nonnull out_begin; + char* absl_nonnull out_end; }; // Given Rust Punycode in `punycode_begin .. punycode_end`, writes the diff --git a/absl/debugging/internal/demangle.cc b/absl/debugging/internal/demangle.cc index dc15b8e5849..5f62ebb8978 100644 --- a/absl/debugging/internal/demangle.cc +++ b/absl/debugging/internal/demangle.cc @@ -484,36 +484,6 @@ static bool IsAlpha(char c) { static bool IsDigit(char c) { return c >= '0' && c <= '9'; } -// Returns true if "str" is a function clone suffix. These suffixes are used -// by GCC 4.5.x and later versions (and our locally-modified version of GCC -// 4.4.x) to indicate functions which have been cloned during optimization. -// We treat any sequence (.+.+)+ as a function clone suffix. -// Additionally, '_' is allowed along with the alphanumeric sequence. -static bool IsFunctionCloneSuffix(const char *str) { - size_t i = 0; - while (str[i] != '\0') { - bool parsed = false; - // Consume a single [. | _]*[.]* sequence. - if (str[i] == '.' && (IsAlpha(str[i + 1]) || str[i + 1] == '_')) { - parsed = true; - i += 2; - while (IsAlpha(str[i]) || str[i] == '_') { - ++i; - } - } - if (str[i] == '.' && IsDigit(str[i + 1])) { - parsed = true; - i += 2; - while (IsDigit(str[i])) { - ++i; - } - } - if (!parsed) - return false; - } - return true; // Consumed everything in "str". -} - static bool EndsWith(State *state, const char chr) { return state->parse_state.out_cur_idx > 0 && state->parse_state.out_cur_idx < state->out_end_idx && @@ -2932,7 +2902,7 @@ static bool ParseTopLevelMangledName(State *state) { if (ParseMangledName(state)) { if (RemainingInput(state)[0] != '\0') { // Drop trailing function clone suffix, if any. - if (IsFunctionCloneSuffix(RemainingInput(state))) { + if (RemainingInput(state)[0] == '.') { return true; } // Append trailing version suffix if any. diff --git a/absl/debugging/internal/demangle_test.cc b/absl/debugging/internal/demangle_test.cc index 9c8225a7599..1731197e57e 100644 --- a/absl/debugging/internal/demangle_test.cc +++ b/absl/debugging/internal/demangle_test.cc @@ -556,14 +556,15 @@ TEST(Demangle, Clones) { EXPECT_TRUE(Demangle("_ZL3Foov.part.9.165493.constprop.775.31805", tmp, sizeof(tmp))); EXPECT_STREQ("Foo()", tmp); - // Invalid (. without anything else), should not demangle. - EXPECT_FALSE(Demangle("_ZL3Foov.", tmp, sizeof(tmp))); - // Invalid (. with mix of alpha and digits), should not demangle. - EXPECT_FALSE(Demangle("_ZL3Foov.abc123", tmp, sizeof(tmp))); - // Invalid (.clone. not followed by number), should not demangle. - EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp))); - // Invalid (.constprop. not followed by number), should not demangle. - EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp))); + // Other suffixes should demangle too. + EXPECT_TRUE(Demangle("_ZL3Foov.", tmp, sizeof(tmp))); + EXPECT_STREQ("Foo()", tmp); + EXPECT_TRUE(Demangle("_ZL3Foov.abc123", tmp, sizeof(tmp))); + EXPECT_STREQ("Foo()", tmp); + EXPECT_TRUE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp))); + EXPECT_STREQ("Foo()", tmp); + EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp))); + EXPECT_STREQ("Foo()", tmp); } TEST(Demangle, Discriminators) { @@ -1935,11 +1936,11 @@ static const char *DemangleStackConsumption(const char *mangled, return g_demangle_result; } -// Demangle stack consumption should be within 8kB for simple mangled names +// Demangle stack consumption should be within 9kB for simple mangled names // with some level of nesting. With alternate signal stack we have 64K, // but some signal handlers run on thread stack, and could have arbitrarily // little space left (so we don't want to make this number too large). -const int kStackConsumptionUpperLimit = 8192; +const int kStackConsumptionUpperLimit = 9670; // Returns a mangled name nested to the given depth. static std::string NestedMangledName(int depth) { diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h index 1fac29c52b0..c8a103669ca 100644 --- a/absl/debugging/internal/elf_mem_image.h +++ b/absl/debugging/internal/elf_mem_image.h @@ -32,10 +32,10 @@ #error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set #endif -#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \ - !defined(__native_client__) && !defined(__asmjs__) && \ - !defined(__wasm__) && !defined(__HAIKU__) && !defined(__sun) && \ - !defined(__VXWORKS__) && !defined(__hexagon__) && !defined(__XTENSA__) +#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \ + !defined(__asmjs__) && !defined(__wasm__) && !defined(__HAIKU__) && \ + !defined(__sun) && !defined(__VXWORKS__) && !defined(__hexagon__) && \ + !defined(__XTENSA__) #define ABSL_HAVE_ELF_MEM_IMAGE 1 #endif diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc index 1746b5d4c34..bbdce77f851 100644 --- a/absl/debugging/internal/stacktrace_aarch64-inl.inc +++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc @@ -123,7 +123,7 @@ static void **NextStackFrame(void **old_frame_pointer, const void *uc, // earlier in the stack than the old_frame_pointer, then use it. If it is // later, then we have already unwound through it and it needs no special // handling. - if (pre_signal_frame_pointer >= old_frame_pointer) { + if (pre_signal_frame_pointer > old_frame_pointer) { new_frame_pointer = pre_signal_frame_pointer; } } diff --git a/absl/debugging/internal/stacktrace_config.h b/absl/debugging/internal/stacktrace_config.h index 88949fe9740..c82d4a3694d 100644 --- a/absl/debugging/internal/stacktrace_config.h +++ b/absl/debugging/internal/stacktrace_config.h @@ -43,11 +43,12 @@ "absl/debugging/internal/stacktrace_emscripten-inl.inc" #elif defined(__ANDROID__) && __ANDROID_API__ >= 33 - +#ifdef ABSL_HAVE_THREAD_LOCAL // Use the generic implementation for Android 33+ (Android T+). This is the // first version of Android for which implements backtrace(). #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" +#endif // defined(ABSL_HAVE_THREAD_LOCAL) #elif defined(__linux__) && !defined(__ANDROID__) @@ -59,11 +60,11 @@ "absl/debugging/internal/stacktrace_libunwind-inl.inc" #define STACKTRACE_USES_LIBUNWIND 1 #elif defined(NO_FRAME_POINTER) && defined(__has_include) -#if __has_include() +#if __has_include() && defined(ABSL_HAVE_THREAD_LOCAL) // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif // __has_include() +#endif // __has_include() && defined(ABSL_HAVE_THREAD_LOCAL) #elif defined(__i386__) || defined(__x86_64__) #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_x86-inl.inc" @@ -77,11 +78,11 @@ #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_riscv-inl.inc" #elif defined(__has_include) -#if __has_include() +#if __has_include() && defined(ABSL_HAVE_THREAD_LOCAL) // Note: When using glibc this may require -funwind-tables to function properly. #define ABSL_STACKTRACE_INL_HEADER \ "absl/debugging/internal/stacktrace_generic-inl.inc" -#endif // __has_include() +#endif // __has_include() && defined(ABSL_HAVE_THREAD_LOCAL) #endif // defined(__has_include) #endif // defined(__linux__) && !defined(__ANDROID__) diff --git a/absl/debugging/internal/stacktrace_riscv-inl.inc b/absl/debugging/internal/stacktrace_riscv-inl.inc index f9919c6156b..7ae7fefad85 100644 --- a/absl/debugging/internal/stacktrace_riscv-inl.inc +++ b/absl/debugging/internal/stacktrace_riscv-inl.inc @@ -162,7 +162,8 @@ static int UnwindImpl(void **result, uintptr_t *frames, int *sizes, absl::debugging_internal::StripPointerMetadata(frame_pointer); } if (sizes != nullptr) { - sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer); + sizes[n] = static_cast( + ComputeStackFrameSize(frame_pointer, next_frame_pointer)); } } n++; diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc index 96b128e04ea..bf6e5abaa6b 100644 --- a/absl/debugging/internal/stacktrace_x86-inl.inc +++ b/absl/debugging/internal/stacktrace_x86-inl.inc @@ -261,17 +261,18 @@ static void **NextStackFrame(void **old_fp, const void *uc, // it's supposed to. if (STRICT_UNWINDING && (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) { - // With the stack growing downwards, older stack frame must be - // at a greater address that the current one. - if (new_fp_u <= old_fp_u) return nullptr; - + // With the stack growing downwards, older stack frame should be + // at a greater address that the current one. However if we get multiple + // signals handled on altstack the new frame pointer might return to the + // main stack, but be different than the value from the most recent + // ucontext. // If we get a very large frame size, it may be an indication that we // guessed frame pointers incorrectly and now risk a paging fault // dereferencing a wrong frame pointer. Or maybe not because large frames // are possible as well. The main stack is assumed to be readable, // so we assume the large frame is legit if we know the real stack bounds // and are within the stack. - if (new_fp_u - old_fp_u > kMaxFrameBytes) { + if (new_fp_u <= old_fp_u || new_fp_u - old_fp_u > kMaxFrameBytes) { if (stack_high < kUnknownStackEnd && static_cast(getpagesize()) < stack_low) { // Stack bounds are known. diff --git a/absl/debugging/internal/symbolize.h b/absl/debugging/internal/symbolize.h index 5593fde6b4c..509f4267c8d 100644 --- a/absl/debugging/internal/symbolize.h +++ b/absl/debugging/internal/symbolize.h @@ -28,8 +28,8 @@ #ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE #error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set -#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \ - && !defined(__asmjs__) && !defined(__wasm__) +#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__asmjs__) \ + && !defined(__wasm__) #define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1 #include diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc index 8a588eaffee..f7e2a443500 100644 --- a/absl/debugging/internal/vdso_support.cc +++ b/absl/debugging/internal/vdso_support.cc @@ -17,6 +17,7 @@ // VDSOSupport -- a class representing kernel VDSO (if present). #include "absl/debugging/internal/vdso_support.h" +#include "absl/base/attributes.h" #ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h @@ -190,6 +191,9 @@ long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int) // This function must be very fast, and may be called from very // low level (e.g. tcmalloc). Hence I avoid things like // GoogleOnceInit() and ::operator new. +// The destination in VDSO is unknown to CFI and VDSO does not set MSAN +// shadow for the return value. +ABSL_ATTRIBUTE_NO_SANITIZE_CFI ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int GetCPU() { unsigned cpu; diff --git a/absl/debugging/stacktrace.cc b/absl/debugging/stacktrace.cc index f71e80cfef0..67df814c5e3 100644 --- a/absl/debugging/stacktrace.cc +++ b/absl/debugging/stacktrace.cc @@ -38,40 +38,19 @@ #include #include +#include #include #include +#include #include "absl/base/attributes.h" #include "absl/base/config.h" +#include "absl/base/internal/low_level_alloc.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/debugging/internal/stacktrace_config.h" -#ifdef ABSL_INTERNAL_HAVE_ALLOCA -#error ABSL_INTERNAL_HAVE_ALLOCA cannot be directly set -#endif - -#ifdef _WIN32 -#include -#define ABSL_INTERNAL_HAVE_ALLOCA 1 -#else -#ifdef __has_include -#if __has_include() -#include -#define ABSL_INTERNAL_HAVE_ALLOCA 1 -#elif !defined(alloca) -static void* alloca(size_t) noexcept { return nullptr; } -#endif -#endif -#endif - -#ifdef ABSL_INTERNAL_HAVE_ALLOCA -static constexpr bool kHaveAlloca = true; -#else -static constexpr bool kHaveAlloca = false; -#endif - #if defined(ABSL_STACKTRACE_INL_HEADER) #include ABSL_STACKTRACE_INL_HEADER #else @@ -97,25 +76,102 @@ std::atomic custom; template ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, uintptr_t* frames, - int* sizes, int max_depth, + int* sizes, size_t max_depth, int skip_count, const void* uc, int* min_dropped_frames) { + static constexpr size_t kMinPageSize = 4096; + + // Allow up to ~half a page, leaving some slack space for local variables etc. + static constexpr size_t kMaxStackElements = + (kMinPageSize / 2) / (sizeof(*frames) + sizeof(*sizes)); + + // Allocate a buffer dynamically, using the signal-safe allocator. + static constexpr auto allocate = [](size_t num_bytes) -> void* { + base_internal::InitSigSafeArena(); + return base_internal::LowLevelAlloc::AllocWithArena( + num_bytes, base_internal::SigSafeArena()); + }; + + uintptr_t frames_stackbuf[kMaxStackElements]; + int sizes_stackbuf[kMaxStackElements]; + + // We only need to free the buffers if we allocated them with the signal-safe + // allocator. + bool must_free_frames = false; + bool must_free_sizes = false; + + bool unwind_with_fixup = internal_stacktrace::ShouldFixUpStack(); + +#ifdef _WIN32 + if (unwind_with_fixup) { + // TODO(b/434184677): Fixups are flaky and not supported on Windows + unwind_with_fixup = false; +#ifndef NDEBUG + abort(); +#endif + } +#endif + + if (unwind_with_fixup) { + // Some implementations of FixUpStack may need to be passed frame + // information from Unwind, even if the caller doesn't need that + // information. We allocate the necessary buffers for such implementations + // here. + + if (frames == nullptr) { + if (max_depth <= std::size(frames_stackbuf)) { + frames = frames_stackbuf; + } else { + frames = static_cast(allocate(max_depth * sizeof(*frames))); + must_free_frames = true; + } + } + + if (sizes == nullptr) { + if (max_depth <= std::size(sizes_stackbuf)) { + sizes = sizes_stackbuf; + } else { + sizes = static_cast(allocate(max_depth * sizeof(*sizes))); + must_free_sizes = true; + } + } + } + Unwinder g = custom.load(std::memory_order_acquire); - int size; + size_t size; // Add 1 to skip count for the unwinder function itself ++skip_count; if (g != nullptr) { - size = (*g)(result, sizes, max_depth, skip_count, uc, min_dropped_frames); + size = static_cast((*g)(result, sizes, static_cast(max_depth), + skip_count, uc, min_dropped_frames)); // Frame pointers aren't returned by existing hooks, so clear them. if (frames != nullptr) { std::fill(frames, frames + size, uintptr_t()); } } else { - size = UnwindImpl( - result, frames, sizes, max_depth, skip_count, uc, min_dropped_frames); + size = static_cast( + unwind_with_fixup + ? UnwindImpl( + result, frames, sizes, static_cast(max_depth), + skip_count, uc, min_dropped_frames) + : UnwindImpl( + result, frames, sizes, static_cast(max_depth), + skip_count, uc, min_dropped_frames)); + } + if (unwind_with_fixup) { + internal_stacktrace::FixUpStack(result, frames, sizes, max_depth, size); + } + + if (must_free_sizes) { + base_internal::LowLevelAlloc::Free(sizes); + } + + if (must_free_frames) { + base_internal::LowLevelAlloc::Free(frames); } + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); - return size; + return static_cast(size); } } // anonymous namespace @@ -123,15 +179,8 @@ ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, uintptr_t* frames, ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int internal_stacktrace::GetStackFrames(void** result, uintptr_t* frames, int* sizes, int max_depth, int skip_count) { - if (internal_stacktrace::ShouldFixUpStack()) { - size_t depth = static_cast(Unwind( - result, frames, sizes, max_depth, skip_count, nullptr, nullptr)); - internal_stacktrace::FixUpStack(result, frames, sizes, - static_cast(max_depth), depth); - return static_cast(depth); - } - - return Unwind(result, frames, sizes, max_depth, skip_count, + return Unwind(result, frames, sizes, + static_cast(max_depth), skip_count, nullptr, nullptr); } @@ -140,56 +189,24 @@ internal_stacktrace::GetStackFramesWithContext(void** result, uintptr_t* frames, int* sizes, int max_depth, int skip_count, const void* uc, int* min_dropped_frames) { - if (internal_stacktrace::ShouldFixUpStack()) { - size_t depth = static_cast(Unwind( - result, frames, sizes, max_depth, skip_count, uc, min_dropped_frames)); - internal_stacktrace::FixUpStack(result, frames, sizes, - static_cast(max_depth), depth); - return static_cast(depth); - } - - return Unwind(result, frames, sizes, max_depth, skip_count, uc, + return Unwind(result, frames, sizes, + static_cast(max_depth), skip_count, uc, min_dropped_frames); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace( void** result, int max_depth, int skip_count) { - if (internal_stacktrace::ShouldFixUpStack()) { - if constexpr (kHaveAlloca) { - const size_t nmax = static_cast(max_depth); - uintptr_t* frames = - static_cast(alloca(nmax * sizeof(*frames))); - int* sizes = static_cast(alloca(nmax * sizeof(*sizes))); - size_t depth = static_cast(Unwind( - result, frames, sizes, max_depth, skip_count, nullptr, nullptr)); - internal_stacktrace::FixUpStack(result, frames, sizes, nmax, depth); - return static_cast(depth); - } - } - - return Unwind(result, nullptr, nullptr, max_depth, skip_count, + return Unwind(result, nullptr, nullptr, + static_cast(max_depth), skip_count, nullptr, nullptr); } ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void* uc, int* min_dropped_frames) { - if (internal_stacktrace::ShouldFixUpStack()) { - if constexpr (kHaveAlloca) { - const size_t nmax = static_cast(max_depth); - uintptr_t* frames = - static_cast(alloca(nmax * sizeof(*frames))); - int* sizes = static_cast(alloca(nmax * sizeof(*sizes))); - size_t depth = static_cast( - Unwind(result, frames, sizes, max_depth, skip_count, uc, - min_dropped_frames)); - internal_stacktrace::FixUpStack(result, frames, sizes, nmax, depth); - return static_cast(depth); - } - } - - return Unwind(result, nullptr, nullptr, max_depth, skip_count, - uc, min_dropped_frames); + return Unwind(result, nullptr, nullptr, + static_cast(max_depth), skip_count, uc, + min_dropped_frames); } void SetStackUnwinder(Unwinder w) { diff --git a/absl/debugging/stacktrace_test.cc b/absl/debugging/stacktrace_test.cc index 4477d84c1df..c1d3d845eec 100644 --- a/absl/debugging/stacktrace_test.cc +++ b/absl/debugging/stacktrace_test.cc @@ -18,17 +18,23 @@ #include #include +#include +#include +#include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" +#include "absl/base/internal/errno_saver.h" #include "absl/base/optimization.h" #include "absl/types/span.h" static int g_should_fixup_calls = 0; static int g_fixup_calls = 0; static bool g_enable_fixup = false; +static uintptr_t g_last_fixup_frame_address = 0; #if ABSL_HAVE_ATTRIBUTE_WEAK bool absl::internal_stacktrace::ShouldFixUpStack() { @@ -38,6 +44,11 @@ bool absl::internal_stacktrace::ShouldFixUpStack() { void absl::internal_stacktrace::FixUpStack(void**, uintptr_t*, int*, size_t, size_t&) { + const void* frame_address = nullptr; +#if ABSL_HAVE_BUILTIN(__builtin_frame_address) + frame_address = __builtin_frame_address(0); +#endif + g_last_fixup_frame_address = reinterpret_cast(frame_address); ++g_fixup_calls; } #endif @@ -82,11 +93,21 @@ TEST(StackTrace, HugeFrame) { // This is a separate function to avoid inlining. ABSL_ATTRIBUTE_NOINLINE static void FixupNoFixupEquivalenceNoInline() { +#if !ABSL_HAVE_ATTRIBUTE_WEAK + GTEST_SKIP() << "Need weak symbol support"; +#endif #if defined(__riscv) GTEST_SKIP() << "Skipping test on RISC-V due to pre-existing failure"; #endif +#if defined(_WIN32) + // TODO(b/434184677): Add support for fixups on Windows if needed + GTEST_SKIP() << "Skipping test on Windows due to lack of support for fixups"; +#endif + bool can_rely_on_frame_pointers = false; + if (!can_rely_on_frame_pointers) { + GTEST_SKIP() << "Frame pointers are required, but not guaranteed in OSS"; + } -#if ABSL_HAVE_ATTRIBUTE_WEAK // This test is known not to pass on MSVC (due to weak symbols) const Cleanup restore_state([enable_fixup = g_enable_fixup, @@ -206,14 +227,107 @@ ABSL_ATTRIBUTE_NOINLINE static void FixupNoFixupEquivalenceNoInline() { ContainerEq(absl::MakeSpan(b.frames, static_cast(b.depth)))); EXPECT_GT(g_should_fixup_calls, 0); EXPECT_GE(g_should_fixup_calls, g_fixup_calls); +} - // ========================================================================== -#else - GTEST_SKIP() << "Need weak symbol support"; +TEST(StackTrace, FixupNoFixupEquivalence) { FixupNoFixupEquivalenceNoInline(); } + +TEST(StackTrace, FixupLowStackUsage) { +#if !ABSL_HAVE_ATTRIBUTE_WEAK + GTEST_SKIP() << "Skipping test on MSVC due to weak symbols"; +#endif +#if defined(_WIN32) + // TODO(b/434184677): Add support for fixups on Windows if needed + GTEST_SKIP() << "Skipping test on Windows due to lack of support for fixups"; +#endif + + const Cleanup restore_state([enable_fixup = g_enable_fixup, + fixup_calls = g_fixup_calls, + should_fixup_calls = g_should_fixup_calls]() { + g_enable_fixup = enable_fixup; + g_fixup_calls = fixup_calls; + g_should_fixup_calls = should_fixup_calls; + }); + + g_enable_fixup = true; + + // Request a ton of stack frames, regardless of how many are actually used. + // It's fine to request more frames than we have, since functions preallocate + // memory before discovering how high the stack really is, and we're really + // just trying to make sure the preallocations don't overflow the stack. + // + // Note that we loop in order to cover all sides of any branches in the + // implementation that switch allocation behavior (e.g., from stack to heap) + // and to ensure that no sides allocate too much stack space. + constexpr size_t kPageSize = 4096; + for (size_t depth = 2; depth < (1 << 20); depth += depth / 2) { + const auto stack = std::make_unique(depth); + const auto frames = std::make_unique(depth); + + absl::GetStackFrames(stack.get(), frames.get(), static_cast(depth), 0); + const void* frame_address = nullptr; +#if ABSL_HAVE_BUILTIN(__builtin_frame_address) + frame_address = __builtin_frame_address(0); #endif + size_t stack_usage = + reinterpret_cast(frame_address) - g_last_fixup_frame_address; + EXPECT_LT(stack_usage, kPageSize); + } } -TEST(StackTrace, FixupNoFixupEquivalence) { FixupNoFixupEquivalenceNoInline(); } +TEST(StackTrace, CustomUnwinderPerformsFixup) { +#if !ABSL_HAVE_ATTRIBUTE_WEAK + GTEST_SKIP() << "Need weak symbol support"; +#endif +#if defined(_WIN32) + // TODO(b/434184677): Add support for fixups on Windows if needed + GTEST_SKIP() << "Skipping test on Windows due to lack of support for fixups"; +#endif + + constexpr int kSkip = 1; // Skip our own frame, whose return PCs won't match + constexpr auto kStackCount = 1; + + absl::SetStackUnwinder(absl::DefaultStackUnwinder); + const Cleanup restore_state([enable_fixup = g_enable_fixup, + fixup_calls = g_fixup_calls, + should_fixup_calls = g_should_fixup_calls]() { + absl::SetStackUnwinder(nullptr); + g_enable_fixup = enable_fixup; + g_fixup_calls = fixup_calls; + g_should_fixup_calls = should_fixup_calls; + }); + + StackTrace trace; + + g_enable_fixup = true; + g_should_fixup_calls = 0; + g_fixup_calls = 0; + absl::GetStackTrace(trace.result, kSkip, kStackCount); + EXPECT_GT(g_should_fixup_calls, 0); + EXPECT_GT(g_fixup_calls, 0); + + g_enable_fixup = true; + g_should_fixup_calls = 0; + g_fixup_calls = 0; + absl::GetStackFrames(trace.result, trace.sizes, kSkip, kStackCount); + EXPECT_GT(g_should_fixup_calls, 0); + EXPECT_GT(g_fixup_calls, 0); + + g_enable_fixup = true; + g_should_fixup_calls = 0; + g_fixup_calls = 0; + absl::GetStackTraceWithContext(trace.result, kSkip, kStackCount, nullptr, + nullptr); + EXPECT_GT(g_should_fixup_calls, 0); + EXPECT_GT(g_fixup_calls, 0); + + g_enable_fixup = true; + g_should_fixup_calls = 0; + g_fixup_calls = 0; + absl::GetStackFramesWithContext(trace.result, trace.sizes, kSkip, kStackCount, + nullptr, nullptr); + EXPECT_GT(g_should_fixup_calls, 0); + EXPECT_GT(g_fixup_calls, 0); +} #if ABSL_HAVE_BUILTIN(__builtin_frame_address) struct FrameInfo { @@ -295,4 +409,75 @@ TEST(StackTrace, CanonicalFrameAddresses) { } #endif +// This test is Linux specific. +#if defined(__linux__) +const void* g_return_address = nullptr; +bool g_sigusr2_raised = false; + +void SigUsr2Handler(int, siginfo_t*, void* uc) { + absl::base_internal::ErrnoSaver errno_saver; + // Many platforms don't support this by default. + bool support_is_expected = false; + constexpr int kMaxStackDepth = 64; + void* result[kMaxStackDepth]; + int depth = + absl::GetStackTraceWithContext(result, kMaxStackDepth, 0, uc, nullptr); + // Verify we can unwind past the nested signal handlers. + if (support_is_expected) { + EXPECT_THAT(absl::MakeSpan(result, static_cast(depth)), + Contains(g_return_address).Times(1)); + } + depth = absl::GetStackTrace(result, kMaxStackDepth, 0); + if (support_is_expected) { + EXPECT_THAT(absl::MakeSpan(result, static_cast(depth)), + Contains(g_return_address).Times(1)); + } + g_sigusr2_raised = true; +} + +void SigUsr1Handler(int, siginfo_t*, void*) { + raise(SIGUSR2); + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +} + +ABSL_ATTRIBUTE_NOINLINE void RaiseSignal() { + g_return_address = __builtin_return_address(0); + raise(SIGUSR1); + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +} + +ABSL_ATTRIBUTE_NOINLINE void TestNestedSignal() { + constexpr size_t kAltstackSize = 1 << 14; + // Allocate altstack on regular stack to make sure it'll have a higher + // address than some of the regular stack frames. + char space[kAltstackSize]; + stack_t altstack; + stack_t old_stack; + altstack.ss_sp = space; + altstack.ss_size = kAltstackSize; + altstack.ss_flags = 0; + ASSERT_EQ(sigaltstack(&altstack, &old_stack), 0) << strerror(errno); + struct sigaction act; + struct sigaction oldusr1act; + struct sigaction oldusr2act; + act.sa_sigaction = SigUsr1Handler; + act.sa_flags = SA_SIGINFO | SA_ONSTACK; + sigemptyset(&act.sa_mask); + ASSERT_EQ(sigaction(SIGUSR1, &act, &oldusr1act), 0) << strerror(errno); + act.sa_sigaction = SigUsr2Handler; + ASSERT_EQ(sigaction(SIGUSR2, &act, &oldusr2act), 0) << strerror(errno); + RaiseSignal(); + ASSERT_EQ(sigaltstack(&old_stack, nullptr), 0) << strerror(errno); + ASSERT_EQ(sigaction(SIGUSR1, &oldusr1act, nullptr), 0) << strerror(errno); + ASSERT_EQ(sigaction(SIGUSR2, &oldusr2act, nullptr), 0) << strerror(errno); + ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +} + +TEST(StackTrace, NestedSignal) { + // Verify we can unwind past the nested signal handlers. + TestNestedSignal(); + EXPECT_TRUE(g_sigusr2_raised); +} +#endif + } // namespace diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc index 9836c93295c..0317bbc316a 100644 --- a/absl/debugging/symbolize_elf.inc +++ b/absl/debugging/symbolize_elf.inc @@ -167,22 +167,22 @@ struct FileMappingHint { // We are using SpinLock and not a Mutex here, because we may be called // from inside Mutex::Lock itself, and it prohibits recursive calls. // This happens in e.g. base/stacktrace_syscall_unittest. -// Moreover, we are using only TryLock(), if the decorator list +// Moreover, we are using only try_lock(), if the decorator list // is being modified (is busy), we skip all decorators, and possibly // loose some info. Sorry, that's the best we could do. ABSL_CONST_INIT absl::base_internal::SpinLock g_decorators_mu( - absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + absl::base_internal::SCHEDULE_KERNEL_ONLY); const int kMaxFileMappingHints = 8; int g_num_file_mapping_hints; FileMappingHint g_file_mapping_hints[kMaxFileMappingHints]; // Protects g_file_mapping_hints. ABSL_CONST_INIT absl::base_internal::SpinLock g_file_mapping_mu( - absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + absl::base_internal::SCHEDULE_KERNEL_ONLY); // Async-signal-safe function to zero a buffer. // memset() is not guaranteed to be async-signal-safe. -static void SafeMemZero(void* p, size_t size) { +static void SafeMemZero(void *p, size_t size) { unsigned char *c = static_cast(p); while (size--) { *c++ = 0; @@ -232,29 +232,6 @@ struct SymbolCacheLine { uint32_t age[ASSOCIATIVITY]; }; -// --------------------------------------------------------------- -// An async-signal-safe arena for LowLevelAlloc -static std::atomic g_sig_safe_arena; - -static base_internal::LowLevelAlloc::Arena *SigSafeArena() { - return g_sig_safe_arena.load(std::memory_order_acquire); -} - -static void InitSigSafeArena() { - if (SigSafeArena() == nullptr) { - base_internal::LowLevelAlloc::Arena *new_arena = - base_internal::LowLevelAlloc::NewArena( - base_internal::LowLevelAlloc::kAsyncSignalSafe); - base_internal::LowLevelAlloc::Arena *old_value = nullptr; - if (!g_sig_safe_arena.compare_exchange_strong(old_value, new_arena, - std::memory_order_release, - std::memory_order_relaxed)) { - // We lost a race to allocate an arena; deallocate. - base_internal::LowLevelAlloc::DeleteArena(new_arena); - } - } -} - // --------------------------------------------------------------- // An AddrMap is a vector of ObjFile, using SigSafeArena() for allocation. @@ -287,7 +264,7 @@ ObjFile *AddrMap::Add() { size_t new_allocated = allocated_ * 2 + 50; ObjFile *new_obj_ = static_cast(base_internal::LowLevelAlloc::AllocWithArena( - new_allocated * sizeof(*new_obj_), SigSafeArena())); + new_allocated * sizeof(*new_obj_), base_internal::SigSafeArena())); if (obj_) { memcpy(new_obj_, obj_, allocated_ * sizeof(*new_obj_)); base_internal::LowLevelAlloc::Free(obj_); @@ -335,8 +312,9 @@ class Symbolizer { private: char *CopyString(const char *s) { size_t len = strlen(s); - char *dst = static_cast( - base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); + char *dst = + static_cast(base_internal::LowLevelAlloc::AllocWithArena( + len + 1, base_internal::SigSafeArena())); ABSL_RAW_CHECK(dst != nullptr, "out of memory"); memcpy(dst, s, len + 1); return dst; @@ -441,14 +419,14 @@ static size_t SymbolizerSize() { // Return (and set null) g_cached_symbolized_state if it is not null. // Otherwise return a new symbolizer. static Symbolizer *AllocateSymbolizer() { - InitSigSafeArena(); + base_internal::InitSigSafeArena(); Symbolizer *symbolizer = g_cached_symbolizer.exchange(nullptr, std::memory_order_acquire); if (symbolizer != nullptr) { return symbolizer; } return new (base_internal::LowLevelAlloc::AllocWithArena( - SymbolizerSize(), SigSafeArena())) Symbolizer(); + SymbolizerSize(), base_internal::SigSafeArena())) Symbolizer(); } // Set g_cached_symbolize_state to s if it is null, otherwise @@ -1469,14 +1447,15 @@ static bool MaybeInitializeObjFile(ObjFile *obj) { constexpr int interesting = PF_X | PF_R; #endif - if (phdr.p_type != PT_LOAD - || (phdr.p_flags & interesting) != interesting) { + if (phdr.p_type != PT_LOAD || + (phdr.p_flags & interesting) != interesting) { // Not a LOAD segment, not executable code, and not a function // descriptor. continue; } if (num_interesting_load_segments < obj->phdr.size()) { - memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, sizeof(phdr)); + memcpy(&obj->phdr[num_interesting_load_segments++], &phdr, + sizeof(phdr)); } else { ABSL_RAW_LOG( WARNING, "%s: too many interesting LOAD segments: %zu >= %zu", @@ -1525,7 +1504,8 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) { ABSL_RAW_CHECK(p.p_type == PT_NULL, "unexpected p_type"); break; } - if (pc < reinterpret_cast(start_addr + p.p_vaddr + p.p_memsz)) { + if (pc < + reinterpret_cast(start_addr + p.p_vaddr + p.p_memsz)) { phdr = &p; break; } @@ -1569,7 +1549,7 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) { #endif } - if (g_decorators_mu.TryLock()) { + if (g_decorators_mu.try_lock()) { if (g_num_decorators > 0) { SymbolDecoratorArgs decorator_args = { pc, relocation, fd, symbol_buf_, sizeof(symbol_buf_), @@ -1579,7 +1559,7 @@ const char *Symbolizer::GetUncachedSymbol(const void *pc) { g_decorators[i].fn(&decorator_args); } } - g_decorators_mu.Unlock(); + g_decorators_mu.unlock(); } if (symbol_buf_[0] == '\0') { return nullptr; @@ -1625,17 +1605,17 @@ const char *Symbolizer::GetSymbol(const void *pc) { } bool RemoveAllSymbolDecorators(void) { - if (!g_decorators_mu.TryLock()) { + if (!g_decorators_mu.try_lock()) { // Someone else is using decorators. Get out. return false; } g_num_decorators = 0; - g_decorators_mu.Unlock(); + g_decorators_mu.unlock(); return true; } bool RemoveSymbolDecorator(int ticket) { - if (!g_decorators_mu.TryLock()) { + if (!g_decorators_mu.try_lock()) { // Someone else is using decorators. Get out. return false; } @@ -1649,14 +1629,14 @@ bool RemoveSymbolDecorator(int ticket) { break; } } - g_decorators_mu.Unlock(); + g_decorators_mu.unlock(); return true; // Decorator is known to be removed. } int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) { static int ticket = 0; - if (!g_decorators_mu.TryLock()) { + if (!g_decorators_mu.try_lock()) { // Someone else is using decorators. Get out. return -2; } @@ -1667,18 +1647,18 @@ int InstallSymbolDecorator(SymbolDecorator decorator, void *arg) { g_decorators[g_num_decorators] = {decorator, arg, ticket++}; ++g_num_decorators; } - g_decorators_mu.Unlock(); + g_decorators_mu.unlock(); return ret; } -bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset, - const char *filename) { +bool RegisterFileMappingHint(const void *start, const void *end, + uint64_t offset, const char *filename) { SAFE_ASSERT(start <= end); SAFE_ASSERT(filename != nullptr); - InitSigSafeArena(); + base_internal::InitSigSafeArena(); - if (!g_file_mapping_mu.TryLock()) { + if (!g_file_mapping_mu.try_lock()) { return false; } @@ -1688,8 +1668,9 @@ bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset } else { // TODO(ckennelly): Move this into a string copy routine. size_t len = strlen(filename); - char *dst = static_cast( - base_internal::LowLevelAlloc::AllocWithArena(len + 1, SigSafeArena())); + char *dst = + static_cast(base_internal::LowLevelAlloc::AllocWithArena( + len + 1, base_internal::SigSafeArena())); ABSL_RAW_CHECK(dst != nullptr, "out of memory"); memcpy(dst, filename, len + 1); @@ -1700,13 +1681,13 @@ bool RegisterFileMappingHint(const void *start, const void *end, uint64_t offset hint.filename = dst; } - g_file_mapping_mu.Unlock(); + g_file_mapping_mu.unlock(); return ret; } bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset, const char **filename) { - if (!g_file_mapping_mu.TryLock()) { + if (!g_file_mapping_mu.try_lock()) { return false; } bool found = false; @@ -1729,7 +1710,7 @@ bool GetFileMappingHint(const void **start, const void **end, uint64_t *offset, break; } } - g_file_mapping_mu.Unlock(); + g_file_mapping_mu.unlock(); return found; } @@ -1765,7 +1746,8 @@ ABSL_NAMESPACE_END } // namespace absl extern "C" bool AbslInternalGetFileMappingHint(const void **start, - const void **end, uint64_t *offset, + const void **end, + uint64_t *offset, const char **filename) { return absl::debugging_internal::GetFileMappingHint(start, end, offset, filename); diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel index 620af2b5aae..532721d23f0 100644 --- a/absl/flags/BUILD.bazel +++ b/absl/flags/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc index 37f6ef1e9d6..b05e1bdc1cb 100644 --- a/absl/flags/internal/flag.cc +++ b/absl/flags/internal/flag.cc @@ -73,8 +73,8 @@ bool ShouldValidateFlagValue(FlagFastTypeId flag_type_id) { // need to acquire these locks themselves. class MutexRelock { public: - explicit MutexRelock(absl::Mutex& mu) : mu_(mu) { mu_.Unlock(); } - ~MutexRelock() { mu_.Lock(); } + explicit MutexRelock(absl::Mutex& mu) : mu_(mu) { mu_.unlock(); } + ~MutexRelock() { mu_.lock(); } MutexRelock(const MutexRelock&) = delete; MutexRelock& operator=(const MutexRelock&) = delete; @@ -88,9 +88,9 @@ class MutexRelock { // we move the memory to the freelist where it lives indefinitely, so it can // still be safely accessed. This also prevents leak checkers from complaining // about the leaked memory that can no longer be accessed through any pointer. -absl::Mutex* FreelistMutex() { +absl::Mutex& FreelistMutex() { static absl::NoDestructor mutex; - return mutex.get(); + return *mutex; } ABSL_CONST_INIT std::vector* s_freelist ABSL_GUARDED_BY(FreelistMutex()) ABSL_PT_GUARDED_BY(FreelistMutex()) = nullptr; @@ -248,12 +248,12 @@ void FlagImpl::Init() { seq_lock_.MarkInitialized(); } -absl::Mutex* FlagImpl::DataGuard() const { +absl::Mutex& FlagImpl::DataGuard() const { absl::call_once(const_cast(this)->init_control_, &FlagImpl::Init, const_cast(this)); // data_guard_ is initialized inside Init. - return reinterpret_cast(&data_guard_); + return *reinterpret_cast(&data_guard_); } void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id, @@ -375,7 +375,7 @@ std::string FlagImpl::DefaultValue() const { } std::string FlagImpl::CurrentValue() const { - auto* guard = DataGuard(); // Make sure flag initialized + auto& guard = DataGuard(); // Make sure flag initialized switch (ValueStorageKind()) { case FlagValueStorageKind::kValueAndInitBit: case FlagValueStorageKind::kOneWordAtomic: { @@ -429,8 +429,8 @@ void FlagImpl::InvokeCallback() const { // and it also can be different by the time the callback invocation is // completed. Requires that *primary_lock be held in exclusive mode; it may be // released and reacquired by the implementation. - MutexRelock relock(*DataGuard()); - absl::MutexLock lock(&callback_->guard); + MutexRelock relock(DataGuard()); + absl::MutexLock lock(callback_->guard); cb(); } @@ -535,7 +535,7 @@ std::unique_ptr FlagImpl::TryParse( } void FlagImpl::Read(void* dst) const { - auto* guard = DataGuard(); // Make sure flag initialized + auto& guard = DataGuard(); // Make sure flag initialized switch (ValueStorageKind()) { case FlagValueStorageKind::kValueAndInitBit: case FlagValueStorageKind::kOneWordAtomic: { @@ -567,14 +567,14 @@ void FlagImpl::Read(void* dst) const { int64_t FlagImpl::ReadOneWord() const { assert(ValueStorageKind() == FlagValueStorageKind::kOneWordAtomic || ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); - auto* guard = DataGuard(); // Make sure flag initialized + auto& guard = DataGuard(); // Make sure flag initialized (void)guard; return OneWordValue().load(std::memory_order_acquire); } bool FlagImpl::ReadOneBool() const { assert(ValueStorageKind() == FlagValueStorageKind::kValueAndInitBit); - auto* guard = DataGuard(); // Make sure flag initialized + auto& guard = DataGuard(); // Make sure flag initialized (void)guard; return absl::bit_cast>( OneWordValue().load(std::memory_order_acquire)) diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h index b61a24737fd..cab9d16990a 100644 --- a/absl/flags/internal/flag.h +++ b/absl/flags/internal/flag.h @@ -601,17 +601,17 @@ class FlagImpl final : public CommandLineFlag { data_guard_{} {} // Constant access methods - int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(*DataGuard()); - bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(*DataGuard()); - void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - void Read(bool* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { + int64_t ReadOneWord() const ABSL_LOCKS_EXCLUDED(DataGuard()); + bool ReadOneBool() const ABSL_LOCKS_EXCLUDED(DataGuard()); + void Read(void* dst) const override ABSL_LOCKS_EXCLUDED(DataGuard()); + void Read(bool* value) const ABSL_LOCKS_EXCLUDED(DataGuard()) { *value = ReadOneBool(); } template () == FlagValueStorageKind::kOneWordAtomic, int> = 0> - void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { + void Read(T* value) const ABSL_LOCKS_EXCLUDED(DataGuard()) { int64_t v = ReadOneWord(); std::memcpy(value, static_cast(&v), sizeof(T)); } @@ -619,17 +619,17 @@ class FlagImpl final : public CommandLineFlag { typename std::enable_if() == FlagValueStorageKind::kValueAndInitBit, int>::type = 0> - void Read(T* value) const ABSL_LOCKS_EXCLUDED(*DataGuard()) { + void Read(T* value) const ABSL_LOCKS_EXCLUDED(DataGuard()) { *value = absl::bit_cast>(ReadOneWord()).value; } // Mutating access methods - void Write(const void* src) ABSL_LOCKS_EXCLUDED(*DataGuard()); + void Write(const void* src) ABSL_LOCKS_EXCLUDED(DataGuard()); // Interfaces to operate on callbacks. void SetCallback(const FlagCallbackFunc mutation_callback) - ABSL_LOCKS_EXCLUDED(*DataGuard()); - void InvokeCallback() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); + void InvokeCallback() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(DataGuard()); // Used in read/write operations to validate source/target has correct type. // For example if flag is declared as absl::Flag FLAGS_foo, a call to @@ -646,11 +646,11 @@ class FlagImpl final : public CommandLineFlag { friend class FlagState; // Ensures that `data_guard_` is initialized and returns it. - absl::Mutex* DataGuard() const + absl::Mutex& DataGuard() const ABSL_LOCK_RETURNED(reinterpret_cast(data_guard_)); // Returns heap allocated value of type T initialized with default value. std::unique_ptr MakeInitValue() const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + ABSL_EXCLUSIVE_LOCKS_REQUIRED(DataGuard()); // Flag initialization called via absl::call_once. void Init(); @@ -676,16 +676,15 @@ class FlagImpl final : public CommandLineFlag { // returns new value. Otherwise returns nullptr. std::unique_ptr TryParse(absl::string_view value, std::string& err) const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + ABSL_EXCLUSIVE_LOCKS_REQUIRED(DataGuard()); // Stores the flag value based on the pointer to the source. void StoreValue(const void* src, ValueSource source) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + ABSL_EXCLUSIVE_LOCKS_REQUIRED(DataGuard()); // Copy the flag data, protected by `seq_lock_` into `dst`. // // REQUIRES: ValueStorageKind() == kSequenceLocked. - void ReadSequenceLockedData(void* dst) const - ABSL_LOCKS_EXCLUDED(*DataGuard()); + void ReadSequenceLockedData(void* dst) const ABSL_LOCKS_EXCLUDED(DataGuard()); FlagHelpKind HelpSourceKind() const { return static_cast(help_source_kind_); @@ -694,7 +693,7 @@ class FlagImpl final : public CommandLineFlag { return static_cast(value_storage_kind_); } FlagDefaultKind DefaultKind() const - ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()) { + ABSL_EXCLUSIVE_LOCKS_REQUIRED(DataGuard()) { return static_cast(def_kind_); } @@ -705,30 +704,30 @@ class FlagImpl final : public CommandLineFlag { std::string Help() const override; FlagFastTypeId TypeId() const override; bool IsSpecifiedOnCommandLine() const override - ABSL_LOCKS_EXCLUDED(*DataGuard()); - std::string DefaultValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); - std::string CurrentValue() const override ABSL_LOCKS_EXCLUDED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); + std::string DefaultValue() const override ABSL_LOCKS_EXCLUDED(DataGuard()); + std::string CurrentValue() const override ABSL_LOCKS_EXCLUDED(DataGuard()); bool ValidateInputValue(absl::string_view value) const override - ABSL_LOCKS_EXCLUDED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); void CheckDefaultValueParsingRoundtrip() const override - ABSL_LOCKS_EXCLUDED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); - int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(*DataGuard()); + int64_t ModificationCount() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(DataGuard()); // Interfaces to save and restore flags to/from persistent state. // Returns current flag state or nullptr if flag does not support // saving and restoring a state. std::unique_ptr SaveState() override - ABSL_LOCKS_EXCLUDED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); // Restores the flag state to the supplied state object. If there is // nothing to restore returns false. Otherwise returns true. bool RestoreState(const FlagState& flag_state) - ABSL_LOCKS_EXCLUDED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); bool ParseFrom(absl::string_view value, FlagSettingMode set_mode, ValueSource source, std::string& error) override - ABSL_LOCKS_EXCLUDED(*DataGuard()); + ABSL_LOCKS_EXCLUDED(DataGuard()); // Immutable flag's state. @@ -758,9 +757,9 @@ class FlagImpl final : public CommandLineFlag { // locks. uint8_t def_kind_ : 2; // Has this flag's value been modified? - bool modified_ : 1 ABSL_GUARDED_BY(*DataGuard()); + bool modified_ : 1 ABSL_GUARDED_BY(DataGuard()); // Has this flag been specified on command line. - bool on_command_line_ : 1 ABSL_GUARDED_BY(*DataGuard()); + bool on_command_line_ : 1 ABSL_GUARDED_BY(DataGuard()); // Unique tag for absl::call_once call to initialize this flag. absl::once_flag init_control_; @@ -769,7 +768,7 @@ class FlagImpl final : public CommandLineFlag { flags_internal::SequenceLock seq_lock_; // Optional flag's callback and absl::Mutex to guard the invocations. - FlagCallback* callback_ ABSL_GUARDED_BY(*DataGuard()); + FlagCallback* callback_ ABSL_GUARDED_BY(DataGuard()); // Either a pointer to the function generating the default value based on the // value specified in ABSL_FLAG or pointer to the dynamically set default // value via SetCommandLineOptionWithMode. def_kind_ is used to distinguish diff --git a/absl/flags/internal/program_name.cc b/absl/flags/internal/program_name.cc index fb06643df5e..23185c626a0 100644 --- a/absl/flags/internal/program_name.cc +++ b/absl/flags/internal/program_name.cc @@ -29,9 +29,9 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace flags_internal { -static absl::Mutex* ProgramNameMutex() { +static absl::Mutex& ProgramNameMutex() { static absl::NoDestructor mutex; - return mutex.get(); + return *mutex; } ABSL_CONST_INIT static std::string* program_name ABSL_GUARDED_BY( ProgramNameMutex()) ABSL_PT_GUARDED_BY(ProgramNameMutex()) = nullptr; diff --git a/absl/flags/internal/usage.cc b/absl/flags/internal/usage.cc index fc68b03db29..3c44271a70c 100644 --- a/absl/flags/internal/usage.cc +++ b/absl/flags/internal/usage.cc @@ -434,9 +434,9 @@ HelpMode HandleUsageFlags(std::ostream& out, namespace { -absl::Mutex* HelpAttributesMutex() { +absl::Mutex& HelpAttributesMutex() { static absl::NoDestructor mutex; - return mutex.get(); + return *mutex; } ABSL_CONST_INIT std::string* match_substr ABSL_GUARDED_BY(HelpAttributesMutex()) ABSL_PT_GUARDED_BY(HelpAttributesMutex()) = nullptr; diff --git a/absl/flags/parse.cc b/absl/flags/parse.cc index c87cacdc7c5..df2a179e2ae 100644 --- a/absl/flags/parse.cc +++ b/absl/flags/parse.cc @@ -64,9 +64,9 @@ ABSL_NAMESPACE_BEGIN namespace flags_internal { namespace { -absl::Mutex* ProcessingChecksMutex() { +absl::Mutex& ProcessingChecksMutex() { static absl::NoDestructor mutex; - return mutex.get(); + return *mutex; } ABSL_CONST_INIT bool flagfile_needs_processing @@ -76,9 +76,9 @@ ABSL_CONST_INIT bool fromenv_needs_processing ABSL_CONST_INIT bool tryfromenv_needs_processing ABSL_GUARDED_BY(ProcessingChecksMutex()) = false; -absl::Mutex* SpecifiedFlagsMutex() { +absl::Mutex& SpecifiedFlagsMutex() { static absl::NoDestructor mutex; - return mutex.get(); + return *mutex; } ABSL_CONST_INIT std::vector* specified_flags diff --git a/absl/flags/reflection.cc b/absl/flags/reflection.cc index b8b4a2ea703..845099e2b05 100644 --- a/absl/flags/reflection.cc +++ b/absl/flags/reflection.cc @@ -53,8 +53,11 @@ class FlagRegistry { // Store a flag in this registry. Takes ownership of *flag. void RegisterFlag(CommandLineFlag& flag, const char* filename); - void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.Lock(); } - void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.Unlock(); } + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.lock(); } + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock(); } + + void unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.unlock(); } + inline void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { unlock(); } // Returns the flag object for the specified name, or nullptr if not found. // Will emit a warning if a 'retired' flag is specified. @@ -87,8 +90,8 @@ namespace { class FlagRegistryLock { public: - explicit FlagRegistryLock(FlagRegistry& fr) : fr_(fr) { fr_.Lock(); } - ~FlagRegistryLock() { fr_.Unlock(); } + explicit FlagRegistryLock(FlagRegistry& fr) : fr_(fr) { fr_.lock(); } + ~FlagRegistryLock() { fr_.unlock(); } private: FlagRegistry& fr_; diff --git a/absl/flags/usage.cc b/absl/flags/usage.cc index 267a5039f64..e42b4540228 100644 --- a/absl/flags/usage.cc +++ b/absl/flags/usage.cc @@ -40,7 +40,7 @@ ABSL_CONST_INIT std::string* program_usage_message // -------------------------------------------------------------------- // Sets the "usage" message to be used by help reporting routines. void SetProgramUsageMessage(absl::string_view new_usage_message) { - absl::MutexLock l(&flags_internal::usage_message_guard); + absl::MutexLock l(flags_internal::usage_message_guard); if (flags_internal::program_usage_message != nullptr) { ABSL_INTERNAL_LOG(FATAL, "SetProgramUsageMessage() called twice."); @@ -55,7 +55,7 @@ void SetProgramUsageMessage(absl::string_view new_usage_message) { // Note: We able to return string_view here only because calling // SetProgramUsageMessage twice is prohibited. absl::string_view ProgramUsageMessage() { - absl::MutexLock l(&flags_internal::usage_message_guard); + absl::MutexLock l(flags_internal::usage_message_guard); return flags_internal::program_usage_message != nullptr ? absl::string_view(*flags_internal::program_usage_message) diff --git a/absl/flags/usage_config.cc b/absl/flags/usage_config.cc index 5922c5e20f6..bbc020d1f44 100644 --- a/absl/flags/usage_config.cc +++ b/absl/flags/usage_config.cc @@ -105,9 +105,9 @@ std::string NormalizeFilename(absl::string_view filename) { // -------------------------------------------------------------------- -absl::Mutex* CustomUsageConfigMutex() { +absl::Mutex& CustomUsageConfigMutex() { static absl::NoDestructor mutex; - return mutex.get(); + return *mutex; } ABSL_CONST_INIT FlagsUsageConfig* custom_usage_config ABSL_GUARDED_BY(CustomUsageConfigMutex()) diff --git a/absl/functional/BUILD.bazel b/absl/functional/BUILD.bazel index aeed3b668e4..b7aa31f5b03 100644 --- a/absl/functional/BUILD.bazel +++ b/absl/functional/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -41,6 +44,7 @@ cc_library( deps = [ "//absl/base:config", "//absl/base:core_headers", + "//absl/base:nullability", "//absl/meta:type_traits", "//absl/utility", ], @@ -58,6 +62,7 @@ cc_test( ":any_invocable", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:nullability", "//absl/meta:type_traits", "//absl/utility", "@googletest//:gtest", @@ -99,8 +104,10 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":any_invocable", + "//absl/base:config", "//absl/base:core_headers", "//absl/meta:type_traits", + "//absl/utility", ], ) @@ -112,8 +119,10 @@ cc_test( deps = [ ":any_invocable", ":function_ref", + "//absl/base:config", "//absl/container:test_instance_tracker", "//absl/memory", + "//absl/utility", "@googletest//:gtest", "@googletest//:gtest_main", ], diff --git a/absl/functional/CMakeLists.txt b/absl/functional/CMakeLists.txt index 91939db5013..07f3dc0b914 100644 --- a/absl/functional/CMakeLists.txt +++ b/absl/functional/CMakeLists.txt @@ -24,6 +24,7 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::base absl::config absl::core_headers absl::type_traits @@ -41,6 +42,7 @@ absl_cc_test( ${ABSL_TEST_COPTS} DEPS absl::any_invocable + absl::base absl::config absl::core_headers absl::type_traits @@ -85,9 +87,11 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS + absl::config absl::core_headers absl::any_invocable absl::meta + absl::utility PUBLIC ) @@ -99,9 +103,11 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::config absl::function_ref absl::memory absl::test_instance_tracker + absl::utility GTest::gmock_main ) diff --git a/absl/functional/any_invocable.h b/absl/functional/any_invocable.h index 43ea9af8824..ade6cffc45b 100644 --- a/absl/functional/any_invocable.h +++ b/absl/functional/any_invocable.h @@ -40,6 +40,7 @@ #include #include "absl/base/config.h" +#include "absl/base/nullability.h" #include "absl/functional/internal/any_invocable.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" @@ -158,7 +159,8 @@ ABSL_NAMESPACE_BEGIN // AnyInvocable empty; // empty(); // WARNING: Undefined behavior! template -class AnyInvocable : private internal_any_invocable::Impl { +class ABSL_NULLABILITY_COMPATIBLE AnyInvocable + : private internal_any_invocable::Impl { private: static_assert( std::is_function::value, @@ -295,22 +297,22 @@ class AnyInvocable : private internal_any_invocable::Impl { // Equality operators - // Returns `true` if `*this` is empty. + // Returns `true` if `f` is empty. friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept { return !f.HasValue(); } - // Returns `true` if `*this` is empty. + // Returns `true` if `f` is empty. friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept { return !f.HasValue(); } - // Returns `false` if `*this` is empty. + // Returns `false` if `f` is empty. friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept { return f.HasValue(); } - // Returns `false` if `*this` is empty. + // Returns `false` if `f` is empty. friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept { return f.HasValue(); } diff --git a/absl/functional/any_invocable_test.cc b/absl/functional/any_invocable_test.cc index 6ad6323a353..7ddfcaba143 100644 --- a/absl/functional/any_invocable_test.cc +++ b/absl/functional/any_invocable_test.cc @@ -15,13 +15,17 @@ #include "absl/functional/any_invocable.h" #include +#include +#include #include +#include #include #include #include #include "gtest/gtest.h" #include "absl/base/config.h" +#include "absl/base/nullability.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" @@ -652,8 +656,8 @@ TYPED_TEST_P(AnyInvTestBasic, InPlaceVoidCovarianceConstruction) { TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromEmpty) { using AnyInvType = typename TypeParam::AnyInvType; - AnyInvType source_fun; - AnyInvType fun(std::move(source_fun)); + absl_nullable AnyInvType source_fun; + absl_nullable AnyInvType fun(std::move(source_fun)); EXPECT_FALSE(static_cast(fun)); diff --git a/absl/functional/function_ref.h b/absl/functional/function_ref.h index f1d087a77a2..edf61de7f03 100644 --- a/absl/functional/function_ref.h +++ b/absl/functional/function_ref.h @@ -47,12 +47,13 @@ #define ABSL_FUNCTIONAL_FUNCTION_REF_H_ #include -#include #include #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/functional/internal/function_ref.h" #include "absl/meta/type_traits.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN @@ -89,15 +90,17 @@ class FunctionRef { // signature of this FunctionRef. template > using EnableIfCompatible = - typename std::enable_if::value || - std::is_convertible::value>::type; + std::enable_if_t, std::true_type, + std::is_invocable_r>::value>; public: // Constructs a FunctionRef from any invocable type. - template > - // NOLINTNEXTLINE(runtime/explicit) - FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) - : invoker_(&absl::functional_internal::InvokeObject) { + template >, F&>>> + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(F&& f ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept + : invoker_(&absl::functional_internal::InvokeObject) { absl::functional_internal::AssertNonNull(f); ptr_.obj = &f; } @@ -111,14 +114,39 @@ class FunctionRef { template < typename F, typename = EnableIfCompatible, absl::functional_internal::EnableIf::value> = 0> - FunctionRef(F* f) // NOLINT(runtime/explicit) + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(F* f ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept : invoker_(&absl::functional_internal::InvokeFunction) { assert(f != nullptr); ptr_.fun = reinterpret_cast(f); } - FunctionRef& operator=(const FunctionRef& rhs) = default; - FunctionRef(const FunctionRef& rhs) = default; +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + // Similar to the other overloads, but passes the address of a known callable + // `F` at compile time. This allows calling arbitrary functions while avoiding + // an indirection. + // Needs C++20 as `nontype_t` needs C++20 for `auto` template parameters. + template + FunctionRef(nontype_t) noexcept // NOLINT(google-explicit-constructor) + : invoker_(&absl::functional_internal::InvokeFunction) {} + + template + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(nontype_t, Obj&& obj) noexcept + : invoker_(&absl::functional_internal::InvokeObject) { + ptr_.obj = std::addressof(obj); + } + + template + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(nontype_t, Obj* obj) noexcept + : invoker_(&absl::functional_internal::InvokePtr) { + ptr_.obj = obj; + } +#endif // Call the underlying object. R operator()(Args... args) const { @@ -134,8 +162,39 @@ class FunctionRef { // constness anyway we can just make this a no-op. template class FunctionRef : public FunctionRef { + using Base = FunctionRef; + + template + using EnableIfCallable = + std::enable_if_t> && + std::is_invocable_r_v && + std::is_constructible_v, + T>; + public: - using FunctionRef::FunctionRef; + template > + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(const F& f ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept : Base(f) {} + + template >> + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(F* f ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept : Base(f) {} + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + template > + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(nontype_t arg) noexcept : Base(arg) {} + + template > + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(nontype_t arg, Obj&& obj) noexcept + : Base(arg, std::forward(obj)) {} + + template > + // NOLINTNEXTLINE(google-explicit-constructor) + FunctionRef(nontype_t arg, Obj* obj) noexcept : Base(arg, obj) {} +#endif }; ABSL_NAMESPACE_END diff --git a/absl/functional/function_ref_test.cc b/absl/functional/function_ref_test.cc index 98d11f72666..c8ff080440b 100644 --- a/absl/functional/function_ref_test.cc +++ b/absl/functional/function_ref_test.cc @@ -16,26 +16,31 @@ #include #include +#include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/internal/test_instance_tracker.h" #include "absl/functional/any_invocable.h" #include "absl/memory/memory.h" +#include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { -void RunFun(FunctionRef f) { f(); } +int Function() { return 1337; } -TEST(FunctionRefTest, Lambda) { - bool ran = false; - RunFun([&] { ran = true; }); - EXPECT_TRUE(ran); +template +T Dereference(const T* v) { + return *v; } -int Function() { return 1337; } +template +T Copy(const T& v) { + return v; +} TEST(FunctionRefTest, Function1) { FunctionRef ref(&Function); @@ -251,11 +256,11 @@ TEST(FunctionRef, PassByValueTypes) { std::is_same, void (*)(VoidPtr, Trivial)>::value, "Small trivial types should be passed by value"); static_assert(std::is_same, - void (*)(VoidPtr, LargeTrivial &&)>::value, + void (*)(VoidPtr, LargeTrivial&&)>::value, "Large trivial types should be passed by rvalue reference"); static_assert( std::is_same, - void (*)(VoidPtr, CopyableMovableInstance &&)>::value, + void (*)(VoidPtr, CopyableMovableInstance&&)>::value, "Types with copy/move ctor should be passed by rvalue reference"); // References are passed as references. @@ -268,7 +273,7 @@ TEST(FunctionRef, PassByValueTypes) { "Reference types should be preserved"); static_assert( std::is_same, - void (*)(VoidPtr, CopyableMovableInstance &&)>::value, + void (*)(VoidPtr, CopyableMovableInstance&&)>::value, "Reference types should be preserved"); // Make sure the address of an object received by reference is the same as the @@ -298,6 +303,61 @@ TEST(FunctionRef, ReferenceToIncompleteType) { ref(obj); } +TEST(FunctionRefTest, CorrectConstQualifiers) { + struct S { + int operator()() { return 42; } + int operator()() const { return 1337; } + }; + S s; + EXPECT_EQ(42, FunctionRef(s)()); + EXPECT_EQ(1337, FunctionRef(s)()); + EXPECT_EQ(1337, FunctionRef(std::as_const(s))()); +} + +TEST(FunctionRefTest, Lambdas) { + // Stateless lambdas implicitly convert to function pointers, so their + // mutability is irrelevant. + EXPECT_TRUE(FunctionRef([]() /*const*/ { return true; })()); + EXPECT_TRUE(FunctionRef([]() mutable { return true; })()); + EXPECT_TRUE(FunctionRef([]() /*const*/ { return true; })()); +#if defined(__clang__) || (ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L && \ + defined(_MSC_VER) && !defined(__EDG__)) + // MSVC has problems compiling the following code pre-C++20: + // const auto f = []() mutable {}; + // f(); + // EDG's MSVC-compatible mode (which Visual C++ uses for Intellisense) + // exhibits the bug in C++20 as well. So we don't support them. + EXPECT_TRUE(FunctionRef([]() mutable { return true; })()); +#endif + + // Stateful lambdas are not implicitly convertible to function pointers, so + // a const stateful lambda is not mutably callable. + EXPECT_TRUE(FunctionRef([v = true]() /*const*/ { return v; })()); + EXPECT_TRUE(FunctionRef([v = true]() mutable { return v; })()); + EXPECT_TRUE( + FunctionRef([v = true]() /*const*/ { return v; })()); + const auto func = [v = true]() mutable { return v; }; + static_assert( + !std::is_convertible_v>); +} + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L +TEST(FunctionRefTest, NonTypeParameter) { + EXPECT_EQ(1337, FunctionRef(nontype<&Function>)()); + EXPECT_EQ(42, FunctionRef(nontype<&Copy>, 42)()); + EXPECT_EQ(42, FunctionRef(nontype<&Dereference>, + &std::integral_constant::value)()); +} +#endif + +TEST(FunctionRefTest, OptionalArguments) { + struct S { + int operator()(int = 0) const { return 1337; } + }; + S s; + EXPECT_EQ(1337, FunctionRef(s)()); +} + } // namespace ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/functional/internal/any_invocable.h b/absl/functional/internal/any_invocable.h index 167d947d4c5..a696fddd86c 100644 --- a/absl/functional/internal/any_invocable.h +++ b/absl/functional/internal/any_invocable.h @@ -66,6 +66,7 @@ #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/macros.h" +#include "absl/base/nullability.h" #include "absl/base/optimization.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" @@ -75,7 +76,7 @@ ABSL_NAMESPACE_BEGIN // Defined in functional/any_invocable.h template -class AnyInvocable; +class ABSL_NULLABILITY_COMPATIBLE AnyInvocable; namespace internal_any_invocable { diff --git a/absl/functional/internal/function_ref.h b/absl/functional/internal/function_ref.h index 27d45b886de..0796364aab5 100644 --- a/absl/functional/internal/function_ref.h +++ b/absl/functional/internal/function_ref.h @@ -72,8 +72,25 @@ using Invoker = R (*)(VoidPtr, typename ForwardT::type...); // static_cast handles the case the return type is void. template R InvokeObject(VoidPtr ptr, typename ForwardT::type... args) { - auto o = static_cast(ptr.obj); - return static_cast(std::invoke(*o, std::forward(args)...)); + using T = std::remove_reference_t; + return static_cast(std::invoke( + std::forward(*const_cast(static_cast(ptr.obj))), + std::forward::type>(args)...)); +} + +template +R InvokeObject(VoidPtr ptr, typename ForwardT::type... args) { + using T = std::remove_reference_t; + return static_cast( + F(std::forward(*const_cast(static_cast(ptr.obj))), + std::forward::type>(args)...)); +} + +template +R InvokePtr(VoidPtr ptr, typename ForwardT::type... args) { + return static_cast( + F(const_cast(static_cast(ptr.obj)), + std::forward::type>(args)...)); } template @@ -82,6 +99,12 @@ R InvokeFunction(VoidPtr ptr, typename ForwardT::type... args) { return static_cast(std::invoke(f, std::forward(args)...)); } +template +R InvokeFunction(VoidPtr, typename ForwardT::type... args) { + return static_cast( + F(std::forward::type>(args)...)); +} + template void AssertNonNull(const std::function& f) { assert(f != nullptr); @@ -98,7 +121,7 @@ template void AssertNonNull(const F&) {} template -void AssertNonNull(F C::*f) { +void AssertNonNull(F C::* f) { assert(f != nullptr); (void)f; } diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel index b2ffcd08d5d..187689f880e 100644 --- a/absl/hash/BUILD.bazel +++ b/absl/hash/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -43,11 +46,11 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":city", - ":low_level_hash", ":weakly_mixed_integer", "//absl/base:config", "//absl/base:core_headers", "//absl/base:endian", + "//absl/base:prefetch", "//absl/container:fixed_array", "//absl/functional:function_ref", "//absl/meta:type_traits", @@ -76,6 +79,7 @@ cc_library( cc_test( name = "hash_test", + size = "large", srcs = [ "hash_test.cc", "internal/hash_test.h", @@ -185,22 +189,6 @@ cc_test( ], ) -cc_library( - name = "low_level_hash", - srcs = ["internal/low_level_hash.cc"], - hdrs = ["internal/low_level_hash.h"], - copts = ABSL_DEFAULT_COPTS, - linkopts = ABSL_DEFAULT_LINKOPTS, - visibility = ["//visibility:private"], - deps = [ - "//absl/base:config", - "//absl/base:core_headers", - "//absl/base:endian", - "//absl/base:prefetch", - "//absl/numeric:int128", - ], -) - cc_library( name = "weakly_mixed_integer", hdrs = ["internal/weakly_mixed_integer.h"], @@ -223,7 +211,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:private"], deps = [ - ":low_level_hash", + ":hash", "//absl/strings", "@googletest//:gtest", "@googletest//:gtest_main", diff --git a/absl/hash/CMakeLists.txt b/absl/hash/CMakeLists.txt index 6996d930e72..b439e4ce2ee 100644 --- a/absl/hash/CMakeLists.txt +++ b/absl/hash/CMakeLists.txt @@ -38,7 +38,6 @@ absl_cc_library( absl::optional absl::variant absl::utility - absl::low_level_hash absl::weakly_mixed_integer PUBLIC ) @@ -153,24 +152,6 @@ absl_cc_test( GTest::gmock_main ) -# Internal-only target, do not depend on directly. -absl_cc_library( - NAME - low_level_hash - HDRS - "internal/low_level_hash.h" - SRCS - "internal/low_level_hash.cc" - COPTS - ${ABSL_DEFAULT_COPTS} - DEPS - absl::config - absl::core_headers - absl::endian - absl::int128 - absl::prefetch -) - # Internal-only target, do not depend on directly. absl_cc_library( NAME @@ -191,7 +172,7 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS - absl::low_level_hash + absl::hash absl::strings GTest::gmock_main ) diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc index c3182f1ff6a..a9282961965 100644 --- a/absl/hash/hash_test.cc +++ b/absl/hash/hash_test.cc @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -55,23 +56,19 @@ #include // NOLINT #endif -#ifdef ABSL_HAVE_STD_STRING_VIEW -#include -#endif - namespace { +using ::absl::Hash; +using ::absl::container_internal::hashtable_debug_internal:: + HashtableDebugAccess; +using ::absl::hash_internal::SpyHashState; using ::absl::hash_test_internal::is_hashable; using ::absl::hash_test_internal::TypeErasedContainer; using ::absl::hash_test_internal::TypeErasedValue; -using ::testing::SizeIs; template using TypeErasedVector = TypeErasedContainer>; -using absl::Hash; -using absl::hash_internal::SpyHashState; - template class HashValueIntTest : public testing::Test { }; @@ -174,6 +171,9 @@ TEST(HashValueTest, PointerAlignment) { constexpr size_t kLog2NumValues = 5; constexpr size_t kNumValues = 1 << kLog2NumValues; + int64_t test_count = 0; + int64_t total_stuck_bit_count = 0; + for (size_t align = 1; align < kTotalSize / kNumValues; align < 8 ? align += 1 : align < 1024 ? align += 8 : align += 32) { SCOPED_TRACE(align); @@ -191,9 +191,17 @@ TEST(HashValueTest, PointerAlignment) { // Limit the scope to the bits we would be using for Swisstable. constexpr size_t kMask = (1 << (kLog2NumValues + 7)) - 1; size_t stuck_bits = (~bits_or | bits_and) & kMask; - // Test that there are at most 3 stuck bits. - EXPECT_LE(absl::popcount(stuck_bits), 3) << "0x" << std::hex << stuck_bits; + int stuck_bit_count = absl::popcount(stuck_bits); + size_t max_stuck_bits = 5; + EXPECT_LE(stuck_bit_count, max_stuck_bits) + << "0x" << std::hex << stuck_bits; + + total_stuck_bit_count += stuck_bit_count; + ++test_count; } + // Test that average across alignments are at most 0.2 stuck bits. + // As of 2025-05-30 test is also passing with 0.07 stuck bits. + EXPECT_LE(total_stuck_bit_count, 0.2 * test_count); } TEST(HashValueTest, PointerToMember) { @@ -495,22 +503,15 @@ TEST(HashValueTest, U32String) { } TEST(HashValueTest, WStringView) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else EXPECT_TRUE((is_hashable::value)); EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple( std::wstring_view(), std::wstring_view(L"ABC"), std::wstring_view(L"ABC"), std::wstring_view(L"Some other different string_view"), std::wstring_view(L"Iñtërnâtiônàlizætiøn")))); -#endif } TEST(HashValueTest, U16StringView) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else EXPECT_TRUE((is_hashable::value)); EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( @@ -518,13 +519,9 @@ TEST(HashValueTest, U16StringView) { std::u16string_view(u"ABC"), std::u16string_view(u"Some other different string_view"), std::u16string_view(u"Iñtërnâtiônàlizætiøn")))); -#endif } TEST(HashValueTest, U32StringView) { -#ifndef ABSL_HAVE_STD_STRING_VIEW - GTEST_SKIP(); -#else EXPECT_TRUE((is_hashable::value)); EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly( @@ -532,7 +529,6 @@ TEST(HashValueTest, U32StringView) { std::u32string_view(U"ABC"), std::u32string_view(U"Some other different string_view"), std::u32string_view(U"Iñtërnâtiônàlizætiøn")))); -#endif } TEST(HashValueTest, StdFilesystemPath) { @@ -1239,4 +1235,83 @@ TEST(HashOf, AutoReturnTypeUser) { absl::Hash{}(AutoReturnTypeUser{1, s})); } +TEST(HashOf, DoubleSignCollision) { + // These values differ only in their most significant bit. + EXPECT_NE(absl::HashOf(-1.0), absl::HashOf(1.0)); +} + +// Test for collisions in short strings if PrecombineLengthMix is low quality. +TEST(PrecombineLengthMix, ShortStringCollision) { + std::string s1 = "00"; + std::string s2 = "000"; + constexpr char kMinChar = 0; + constexpr char kMaxChar = 32; + for (s1[0] = kMinChar; s1[0] < kMaxChar; ++s1[0]) { + for (s1[1] = kMinChar; s1[1] < kMaxChar; ++s1[1]) { + for (s2[0] = kMinChar; s2[0] < kMaxChar; ++s2[0]) { + for (s2[1] = kMinChar; s2[1] < kMaxChar; ++s2[1]) { + for (s2[2] = kMinChar; s2[2] < kMaxChar; ++s2[2]) { + ASSERT_NE(absl::HashOf(s1), absl::HashOf(s2)) + << "s1[0]: " << static_cast(s1[0]) + << "; s1[1]: " << static_cast(s1[1]) + << "; s2[0]: " << static_cast(s2[0]) + << "; s2[1]: " << static_cast(s2[1]) + << "; s2[2]: " << static_cast(s2[2]); + } + } + } + } + } +} + +// Test that we don't cause excessive collisions on the hash table for +// doubles in the range [-1024, 1024]. See cl/773069881 for more information. +TEST(SwisstableCollisions, DoubleRange) { + absl::flat_hash_set set; + for (double t = -1024.0; t < 1024.0; t += 1.0) { + set.insert(t); + ASSERT_LT(HashtableDebugAccess::GetNumProbes(set, t), 64) + << t; + } +} + +// Test that for each pair of adjacent bytes in a string, if there's only +// entropy in those two bytes, then we don't have excessive collisions. +TEST(SwisstableCollisions, LowEntropyStrings) { + constexpr char kMinChar = 0; + constexpr char kMaxChar = 64; + // These sizes cover the different hashing cases. + for (size_t size : {8u, 16u, 32u, 64u}) { + for (size_t b = 0; b < size - 1; ++b) { + absl::flat_hash_set set; + std::string s(size, '\0'); + for (char c1 = kMinChar; c1 < kMaxChar; ++c1) { + for (char c2 = kMinChar; c2 < kMaxChar; ++c2) { + s[b] = c1; + s[b + 1] = c2; + set.insert(s); + ASSERT_LT(HashtableDebugAccess::GetNumProbes(set, s), + 64) + << "size: " << size << "; bit: " << b; + } + } + } + } +} + +// Test that we don't have excessive collisions when keys are consecutive +// integers rotated by N bits. +TEST(SwisstableCollisions, LowEntropyInts) { + constexpr int kSizeTBits = sizeof(size_t) * 8; + for (int bit = 0; bit < kSizeTBits; ++bit) { + absl::flat_hash_set set; + for (size_t i = 0; i < 128 * 1024; ++i) { + size_t v = absl::rotl(i, bit); + set.insert(v); + ASSERT_LT(HashtableDebugAccess::GetNumProbes(set, v), 32) + << bit << " " << i; + } + } +} + } // namespace diff --git a/absl/hash/internal/hash.cc b/absl/hash/internal/hash.cc index 9abace5e2b7..9e43e94cc6e 100644 --- a/absl/hash/internal/hash.cc +++ b/absl/hash/internal/hash.cc @@ -14,52 +14,167 @@ #include "absl/hash/internal/hash.h" +#include #include #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" -#include "absl/hash/internal/low_level_hash.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/optimization.h" +#include "absl/base/prefetch.h" +#include "absl/hash/internal/city.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace hash_internal { -uint64_t MixingHashState::CombineLargeContiguousImpl32( - uint64_t state, const unsigned char* first, size_t len) { +namespace { + +uint64_t Mix32Bytes(const uint8_t* ptr, uint64_t current_state) { + uint64_t a = absl::base_internal::UnalignedLoad64(ptr); + uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); + uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16); + uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24); + + uint64_t cs0 = Mix(a ^ kStaticRandomData[1], b ^ current_state); + uint64_t cs1 = Mix(c ^ kStaticRandomData[2], d ^ current_state); + return cs0 ^ cs1; +} + +[[maybe_unused]] uint64_t LowLevelHashLenGt32(const void* data, size_t len, + uint64_t seed) { + assert(len > 32); + const uint8_t* ptr = static_cast(data); + uint64_t current_state = seed ^ kStaticRandomData[0] ^ len; + const uint8_t* last_32_ptr = ptr + len - 32; + + if (len > 64) { + // If we have more than 64 bytes, we're going to handle chunks of 64 + // bytes at a time. We're going to build up four separate hash states + // which we will then hash together. This avoids short dependency chains. + uint64_t duplicated_state0 = current_state; + uint64_t duplicated_state1 = current_state; + uint64_t duplicated_state2 = current_state; + + do { + PrefetchToLocalCache(ptr + 5 * ABSL_CACHELINE_SIZE); + + uint64_t a = absl::base_internal::UnalignedLoad64(ptr); + uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); + uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16); + uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24); + uint64_t e = absl::base_internal::UnalignedLoad64(ptr + 32); + uint64_t f = absl::base_internal::UnalignedLoad64(ptr + 40); + uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48); + uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56); + + current_state = Mix(a ^ kStaticRandomData[1], b ^ current_state); + duplicated_state0 = Mix(c ^ kStaticRandomData[2], d ^ duplicated_state0); + + duplicated_state1 = Mix(e ^ kStaticRandomData[3], f ^ duplicated_state1); + duplicated_state2 = Mix(g ^ kStaticRandomData[4], h ^ duplicated_state2); + + ptr += 64; + len -= 64; + } while (len > 64); + + current_state = (current_state ^ duplicated_state0) ^ + (duplicated_state1 + duplicated_state2); + } + + // We now have a data `ptr` with at most 64 bytes and the current state + // of the hashing state machine stored in current_state. + if (len > 32) { + current_state = Mix32Bytes(ptr, current_state); + } + + // We now have a data `ptr` with at most 32 bytes and the current state + // of the hashing state machine stored in current_state. But we can + // safely read from `ptr + len - 32`. + return Mix32Bytes(last_32_ptr, current_state); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t HashBlockOn32Bit( + const unsigned char* data, size_t len, uint64_t state) { + // TODO(b/417141985): expose and use CityHash32WithSeed. + // Note: we can't use PrecombineLengthMix here because len can be up to 1024. + return CombineRawImpl( + state + len, + hash_internal::CityHash32(reinterpret_cast(data), len)); +} + +ABSL_ATTRIBUTE_NOINLINE uint64_t +SplitAndCombineOn32Bit(const unsigned char* first, size_t len, uint64_t state) { while (len >= PiecewiseChunkSize()) { - state = Mix( - state ^ hash_internal::CityHash32(reinterpret_cast(first), - PiecewiseChunkSize()), - kMul); + state = HashBlockOn32Bit(first, PiecewiseChunkSize(), state); len -= PiecewiseChunkSize(); first += PiecewiseChunkSize(); } + // Do not call CombineContiguousImpl for empty range since it is modifying + // state. + if (len == 0) { + return state; + } // Handle the remainder. return CombineContiguousImpl(state, first, len, std::integral_constant{}); } -uint64_t MixingHashState::CombineLargeContiguousImpl64( - uint64_t state, const unsigned char* first, size_t len) { +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t HashBlockOn64Bit( + const unsigned char* data, size_t len, uint64_t state) { +#ifdef ABSL_HAVE_INTRINSIC_INT128 + return LowLevelHashLenGt32(data, len, state); +#else + return hash_internal::CityHash64WithSeed(reinterpret_cast(data), + len, state); +#endif +} + +ABSL_ATTRIBUTE_NOINLINE uint64_t +SplitAndCombineOn64Bit(const unsigned char* first, size_t len, uint64_t state) { while (len >= PiecewiseChunkSize()) { - state = Mix(state ^ Hash64(first, PiecewiseChunkSize()), kMul); + state = HashBlockOn64Bit(first, PiecewiseChunkSize(), state); len -= PiecewiseChunkSize(); first += PiecewiseChunkSize(); } + // Do not call CombineContiguousImpl for empty range since it is modifying + // state. + if (len == 0) { + return state; + } // Handle the remainder. return CombineContiguousImpl(state, first, len, std::integral_constant{}); } -ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed; +} // namespace -uint64_t MixingHashState::LowLevelHashImpl(const unsigned char* data, - size_t len) { - return LowLevelHashLenGt32(data, len, Seed(), &kStaticRandomData[0]); +uint64_t CombineLargeContiguousImplOn32BitLengthGt8(const unsigned char* first, + size_t len, + uint64_t state) { + assert(len > 8); + assert(sizeof(size_t) == 4); // NOLINT(misc-static-assert) + if (ABSL_PREDICT_TRUE(len <= PiecewiseChunkSize())) { + return HashBlockOn32Bit(first, len, state); + } + return SplitAndCombineOn32Bit(first, len, state); } +uint64_t CombineLargeContiguousImplOn64BitLengthGt32(const unsigned char* first, + size_t len, + uint64_t state) { + assert(len > 32); + assert(sizeof(size_t) == 8); // NOLINT(misc-static-assert) + if (ABSL_PREDICT_TRUE(len <= PiecewiseChunkSize())) { + return HashBlockOn64Bit(first, len, state); + } + return SplitAndCombineOn64Bit(first, len, state); +} + +ABSL_CONST_INIT const void* const MixingHashState::kSeed = &kSeed; + } // namespace hash_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h index c7916b50936..2f91a8beacd 100644 --- a/absl/hash/internal/hash.h +++ b/absl/hash/internal/hash.h @@ -39,7 +39,7 @@ // For feature testing and determining which headers can be included. #if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L || \ - ABSL_INTERNAL_VERSION_HEADER_AVAILABLE + defined(ABSL_INTERNAL_VERSION_HEADER_AVAILABLE) #include #else #include @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include @@ -73,7 +74,6 @@ #include #include "absl/base/attributes.h" -#include "absl/base/internal/endian.h" #include "absl/base/internal/unaligned_access.h" #include "absl/base/optimization.h" #include "absl/base/port.h" @@ -88,14 +88,11 @@ #include "absl/types/variant.h" #include "absl/utility/utility.h" -#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L +#if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \ + !defined(__XTENSA__) #include // NOLINT #endif -#ifdef ABSL_HAVE_STD_STRING_VIEW -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN @@ -107,8 +104,6 @@ namespace hash_internal { // returns the size of these chunks. constexpr size_t PiecewiseChunkSize() { return 1024; } -// PiecewiseCombiner -// // PiecewiseCombiner is an internal-only helper class for hashing a piecewise // buffer of `char` or `unsigned char` as though it were contiguous. This class // provides two methods: @@ -129,12 +124,10 @@ constexpr size_t PiecewiseChunkSize() { return 1024; } // return combiner.finalize(std::move(state)); class PiecewiseCombiner { public: - PiecewiseCombiner() : position_(0) {} + PiecewiseCombiner() = default; PiecewiseCombiner(const PiecewiseCombiner&) = delete; PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete; - // PiecewiseCombiner::add_buffer() - // // Appends the given range of bytes to the sequence to be hashed, which may // modify the provided hash state. template @@ -145,8 +138,6 @@ class PiecewiseCombiner { reinterpret_cast(data), size); } - // PiecewiseCombiner::finalize() - // // Finishes combining the hash sequence, which may may modify the provided // hash state. // @@ -159,21 +150,19 @@ class PiecewiseCombiner { private: unsigned char buf_[PiecewiseChunkSize()]; - size_t position_; + size_t position_ = 0; + bool added_something_ = false; }; -// is_hashable() -// // Trait class which returns true if T is hashable by the absl::Hash framework. // Used for the AbslHashValue implementations for composite types below. template struct is_hashable; -// HashStateBase -// -// An internal implementation detail that contains common implementation details -// for all of the "hash state objects" objects generated by Abseil. This is not -// a public API; users should not create classes that inherit from this. +// HashStateBase is an internal implementation detail that contains common +// implementation details for all of the "hash state objects" objects generated +// by Abseil. This is not a public API; users should not create classes that +// inherit from this. // // A hash state object is the template argument `H` passed to `AbslHashValue`. // It represents an intermediate state in the computation of an unspecified hash @@ -238,8 +227,6 @@ struct is_hashable; template class HashStateBase { public: - // HashStateBase::combine() - // // Combines an arbitrary number of values into a hash state, returning the // updated state. // @@ -259,8 +246,6 @@ class HashStateBase { static H combine(H state, const T& value, const Ts&... values); static H combine(H state) { return state; } - // HashStateBase::combine_contiguous() - // // Combines a contiguous array of `size` elements into a hash state, returning // the updated state. // @@ -300,8 +285,6 @@ class HashStateBase { }; }; -// is_uniquely_represented -// // `is_uniquely_represented` is a trait class that indicates whether `T` // is uniquely represented. // @@ -336,8 +319,6 @@ class HashStateBase { template struct is_uniquely_represented : std::false_type {}; -// is_uniquely_represented -// // unsigned char is a synonym for "byte", so it is guaranteed to be // uniquely represented. template <> @@ -352,9 +333,6 @@ struct is_uniquely_represented< Integral, typename std::enable_if::value>::type> : std::true_type {}; -// is_uniquely_represented -// -// template <> struct is_uniquely_represented : std::false_type {}; @@ -376,8 +354,15 @@ struct CombineRaw { } }; -// hash_bytes() -// +// For use in `raw_hash_set` to pass a seed to the hash function. +struct HashWithSeed { + template + size_t hash(const Hasher& hasher, const T& value, size_t seed) const { + // NOLINTNEXTLINE(clang-diagnostic-sign-conversion) + return hasher.hash_with_seed(value, seed); + } +}; + // Convenience function that combines `hash_state` with the byte representation // of `value`. template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, B value) { + // We use ~size_t{} instead of 1 so that all bits are different between + // true/false instead of only 1. return H::combine(std::move(hash_state), - static_cast(value ? 1 : 0)); + static_cast(value ? ~size_t{} : 0)); } // AbslHashValue() for hashing enum values @@ -505,10 +492,10 @@ std::enable_if_t::value, H> AbslHashValue(H hash_state, T ptr) { auto v = reinterpret_cast(ptr); // Due to alignment, pointers tend to have low bits as zero, and the next few - // bits follow a pattern since they are also multiples of some base value. The - // byte swap in WeakMix helps ensure we still have good entropy in low bits. - // Mix pointers twice to ensure we have good entropy in low bits. - return H::combine(std::move(hash_state), v, v); + // bits follow a pattern since they are also multiples of some base value. + // The PointerAlignment test verifies that our mixing is good enough to handle + // these cases. + return H::combine(std::move(hash_state), v); } // AbslHashValue() for hashing nullptr_t @@ -562,8 +549,6 @@ AbslHashValue(H hash_state, const std::pair& p) { return H::combine(std::move(hash_state), p.first, p.second); } -// hash_tuple() -// // Helper function for hashing a tuple. The third argument should // be an index_sequence running from 0 to tuple_size - 1. template @@ -622,9 +607,7 @@ H AbslHashValue(H hash_state, const std::shared_ptr& ptr) { // `eq()` member isn't equivalent to `==` on the underlying character type. template H AbslHashValue(H hash_state, absl::string_view str) { - return H::combine( - H::combine_contiguous(std::move(hash_state), str.data(), str.size()), - WeaklyMixedInteger{str.size()}); + return H::combine_contiguous(std::move(hash_state), str.data(), str.size()); } // Support std::wstring, std::u16string and std::u32string. @@ -635,31 +618,24 @@ template , Alloc>& str) { - return H::combine( - H::combine_contiguous(std::move(hash_state), str.data(), str.size()), - WeaklyMixedInteger{str.size()}); + return H::combine_contiguous(std::move(hash_state), str.data(), str.size()); } -#ifdef ABSL_HAVE_STD_STRING_VIEW - // Support std::wstring_view, std::u16string_view and std::u32string_view. template ::value || std::is_same::value || std::is_same::value>> H AbslHashValue(H hash_state, std::basic_string_view str) { - return H::combine( - H::combine_contiguous(std::move(hash_state), str.data(), str.size()), - WeaklyMixedInteger{str.size()}); + return H::combine_contiguous(std::move(hash_state), str.data(), str.size()); } -#endif // ABSL_HAVE_STD_STRING_VIEW - #if defined(__cpp_lib_filesystem) && __cpp_lib_filesystem >= 201703L && \ (!defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) || \ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= 130000) && \ (!defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) || \ - __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500) + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 101500) && \ + (!defined(__XTENSA__)) #define ABSL_INTERNAL_STD_FILESYSTEM_PATH_HASH_AVAILABLE 1 @@ -734,9 +710,8 @@ template typename std::enable_if::value && !std::is_same::value, H>::type AbslHashValue(H hash_state, const std::vector& vector) { - return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(), - vector.size()), - WeaklyMixedInteger{vector.size()}); + return H::combine_contiguous(std::move(hash_state), vector.data(), + vector.size()); } // AbslHashValue special cases for hashing std::vector @@ -895,7 +870,6 @@ typename std::enable_if::value, H>::type AbslHashValue( return H::combine(std::move(hash_state), opt.has_value()); } -// VariantVisitor template struct VariantVisitor { H&& hash_state; @@ -944,8 +918,6 @@ H AbslHashValue(H hash_state, const std::bitset& set) { // ----------------------------------------------------------------------------- -// hash_range_or_bytes() -// // Mixes all values in the range [data, data+size) into the hash state. // This overload accepts only uniquely-represented types, and hashes them by // hashing the entire range of bytes. @@ -956,14 +928,194 @@ hash_range_or_bytes(H hash_state, const T* data, size_t size) { return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size); } -// hash_range_or_bytes() template typename std::enable_if::value, H>::type hash_range_or_bytes(H hash_state, const T* data, size_t size) { for (const auto end = data + size; data < end; ++data) { hash_state = H::combine(std::move(hash_state), *data); } - return hash_state; + return H::combine(std::move(hash_state), + hash_internal::WeaklyMixedInteger{size}); +} + +inline constexpr uint64_t kMul = uint64_t{0x79d5f9e0de1e8cf5}; + +// Random data taken from the hexadecimal digits of Pi's fractional component. +// https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number +ABSL_CACHELINE_ALIGNED inline constexpr uint64_t kStaticRandomData[] = { + 0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0, + 0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377, +}; + +// Extremely weak mixture of length that is mixed into the state before +// combining the data. It is used only for small strings. This also ensures that +// we have high entropy in all bits of the state. +inline uint64_t PrecombineLengthMix(uint64_t state, size_t len) { + ABSL_ASSUME(len + sizeof(uint64_t) <= sizeof(kStaticRandomData)); + uint64_t data = absl::base_internal::UnalignedLoad64( + reinterpret_cast(&kStaticRandomData[0]) + len); + return state ^ data; +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t Mix(uint64_t lhs, uint64_t rhs) { + // Though the 128-bit product needs multiple instructions on non-x86-64 + // platforms, it is still a good balance between speed and hash quality. + absl::uint128 m = lhs; + m *= rhs; + return Uint128High64(m) ^ Uint128Low64(m); +} + +// Reads 8 bytes from p. +inline uint64_t Read8(const unsigned char* p) { +// Suppress erroneous array bounds errors on GCC. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" +#endif + return absl::base_internal::UnalignedLoad64(p); +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif +} + +// Reads 9 to 16 bytes from p. +// The first 8 bytes are in .first, and the rest of the bytes are in .second +// along with duplicated bytes from .first if len<16. +inline std::pair Read9To16(const unsigned char* p, + size_t len) { + return {Read8(p), Read8(p + len - 8)}; +} + +// Reads 4 to 8 bytes from p. +// Bytes are permuted and some input bytes may be duplicated in output. +inline uint64_t Read4To8(const unsigned char* p, size_t len) { + // If `len < 8`, we duplicate bytes. We always put low memory at the end. + // E.g., on little endian platforms: + // `ABCD` will be read as `ABCDABCD`. + // `ABCDE` will be read as `BCDEABCD`. + // `ABCDEF` will be read as `CDEFABCD`. + // `ABCDEFG` will be read as `DEFGABCD`. + // `ABCDEFGH` will be read as `EFGHABCD`. + // We also do not care about endianness. On big-endian platforms, bytes will + // be permuted differently. We always shift low memory by 32, because that + // can be pipelined earlier. Reading high memory requires computing + // `p + len - 4`. + uint64_t most_significant = + static_cast(absl::base_internal::UnalignedLoad32(p)) << 32; + uint64_t least_significant = + absl::base_internal::UnalignedLoad32(p + len - 4); + return most_significant | least_significant; +} + +// Reads 1 to 3 bytes from p. Some input bytes may be duplicated in output. +inline uint32_t Read1To3(const unsigned char* p, size_t len) { + // The trick used by this implementation is to avoid branches. + // We always read three bytes by duplicating. + // E.g., + // `A` is read as `AAA`. + // `AB` is read as `ABB`. + // `ABC` is read as `ABC`. + // We always shift `p[0]` so that it can be pipelined better. + // Other bytes require extra computation to find indices. + uint32_t mem0 = (static_cast(p[0]) << 16) | p[len - 1]; + uint32_t mem1 = static_cast(p[len / 2]) << 8; + return mem0 | mem1; +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineRawImpl(uint64_t state, + uint64_t value) { + return Mix(state ^ value, kMul); +} + +// Slow dispatch path for calls to CombineContiguousImpl with a size argument +// larger than inlined size. Has the same effect as calling +// CombineContiguousImpl() repeatedly with the chunk stride size. +uint64_t CombineLargeContiguousImplOn32BitLengthGt8(const unsigned char* first, + size_t len, uint64_t state); +uint64_t CombineLargeContiguousImplOn64BitLengthGt32(const unsigned char* first, + size_t len, + uint64_t state); + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineSmallContiguousImpl( + uint64_t state, const unsigned char* first, size_t len) { + ABSL_ASSUME(len <= 8); + uint64_t v; + if (len >= 4) { + v = Read4To8(first, len); + } else if (len > 0) { + v = Read1To3(first, len); + } else { + // Empty string must modify the state. + v = 0x57; + } + return CombineRawImpl(state, v); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl9to16( + uint64_t state, const unsigned char* first, size_t len) { + ABSL_ASSUME(len >= 9); + ABSL_ASSUME(len <= 16); + // Note: any time one half of the mix function becomes zero it will fail to + // incorporate any bits from the other half. However, there is exactly 1 in + // 2^64 values for each side that achieve this, and only when the size is + // exactly 16 -- for smaller sizes there is an overlapping byte that makes + // this impossible unless the seed is *also* incredibly unlucky. + auto p = Read9To16(first, len); + return Mix(state ^ p.first, kMul ^ p.second); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline uint64_t CombineContiguousImpl17to32( + uint64_t state, const unsigned char* first, size_t len) { + ABSL_ASSUME(len >= 17); + ABSL_ASSUME(len <= 32); + // Do two mixes of overlapping 16-byte ranges in parallel to minimize + // latency. + const uint64_t m0 = + Mix(Read8(first) ^ kStaticRandomData[1], Read8(first + 8) ^ state); + + const unsigned char* tail_16b_ptr = first + (len - 16); + const uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ kStaticRandomData[3], + Read8(tail_16b_ptr + 8) ^ state); + return m0 ^ m1; +} + +// Implementation of the base case for combine_contiguous where we actually +// mix the bytes into the state. +// Dispatch to different implementations of combine_contiguous depending +// on the value of `sizeof(size_t)`. +inline uint64_t CombineContiguousImpl( + uint64_t state, const unsigned char* first, size_t len, + std::integral_constant /* sizeof_size_t */) { + // For large values we use CityHash, for small ones we use custom low latency + // hash. + if (len <= 8) { + return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first, + len); + } + return CombineLargeContiguousImplOn32BitLengthGt8(first, len, state); +} + +inline uint64_t CombineContiguousImpl( + uint64_t state, const unsigned char* first, size_t len, + std::integral_constant /* sizeof_size_t */) { + // For large values we use LowLevelHash or CityHash depending on the platform, + // for small ones we use custom low latency hash. + if (len <= 8) { + return CombineSmallContiguousImpl(PrecombineLengthMix(state, len), first, + len); + } + if (len <= 16) { + return CombineContiguousImpl9to16(PrecombineLengthMix(state, len), first, + len); + } + if (len <= 32) { + return CombineContiguousImpl17to32(PrecombineLengthMix(state, len), first, + len); + } + // We must not mix length into the state here because calling + // CombineContiguousImpl twice with PiecewiseChunkSize() must be equivalent + // to calling CombineLargeContiguousImpl once with 2 * PiecewiseChunkSize(). + return CombineLargeContiguousImplOn64BitLengthGt32(first, len, state); } #if defined(ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \ @@ -973,8 +1125,6 @@ hash_range_or_bytes(H hash_state, const T* data, size_t size) { #define ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0 #endif -// HashSelect -// // Type trait to select the appropriate hash implementation to use. // HashSelect::type will give the proper hash implementation, to be invoked // as: @@ -1071,26 +1221,7 @@ template struct is_hashable : std::integral_constant::value> {}; -// MixingHashState class ABSL_DLL MixingHashState : public HashStateBase { - // absl::uint128 is not an alias or a thin wrapper around the intrinsic. - // We use the intrinsic when available to improve performance. -#ifdef ABSL_HAVE_INTRINSIC_INT128 - using uint128 = __uint128_t; -#else // ABSL_HAVE_INTRINSIC_INT128 - using uint128 = absl::uint128; -#endif // ABSL_HAVE_INTRINSIC_INT128 - - // Random data taken from the hexadecimal digits of Pi's fractional component. - // https://en.wikipedia.org/wiki/Nothing-up-my-sleeve_number - ABSL_CACHELINE_ALIGNED static constexpr uint64_t kStaticRandomData[] = { - 0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0, - 0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377, - }; - - static constexpr uint64_t kMul = - uint64_t{0xdcb22ca68cb134ed}; - template using IntegralFastPath = conjunction, is_uniquely_represented, @@ -1101,8 +1232,6 @@ class ABSL_DLL MixingHashState : public HashStateBase { MixingHashState(MixingHashState&&) = default; MixingHashState& operator=(MixingHashState&&) = default; - // MixingHashState::combine_contiguous() - // // Fundamental base case for hash recursion: mixes the given range of bytes // into the hash state. static MixingHashState combine_contiguous(MixingHashState hash_state, @@ -1114,34 +1243,59 @@ class ABSL_DLL MixingHashState : public HashStateBase { } using MixingHashState::HashStateBase::combine_contiguous; - // MixingHashState::hash() - // + template + static size_t hash(const T& value) { + return hash_with_seed(value, Seed()); + } + // For performance reasons in non-opt mode, we specialize this for // integral types. // Otherwise we would be instantiating and calling dozens of functions for // something that is just one multiplication and a couple xor's. // The result should be the same as running the whole algorithm, but faster. template ::value, int> = 0> - static size_t hash(T value) { + static size_t hash_with_seed(T value, size_t seed) { return static_cast( - WeakMix(Seed(), static_cast>(value))); + CombineRawImpl(seed, static_cast>(value))); } - // Overload of MixingHashState::hash() template ::value, int> = 0> - static size_t hash(const T& value) { - return static_cast(combine(MixingHashState{}, value).state_); + static size_t hash_with_seed(const T& value, size_t seed) { + return static_cast(combine(MixingHashState{seed}, value).state_); } private: - // Invoked only once for a given argument; that plus the fact that this is - // move-only ensures that there is only one non-moved-from object. - MixingHashState() : state_(Seed()) {} - friend class MixingHashState::HashStateBase; template friend H absl::hash_internal::hash_weakly_mixed_integer(H, WeaklyMixedInteger); + // Allow the HashState type-erasure implementation to invoke + // RunCombinedUnordered() directly. + friend class absl::HashState; + friend struct CombineRaw; + + // For use in Seed(). + static const void* const kSeed; + + // Invoked only once for a given argument; that plus the fact that this is + // move-only ensures that there is only one non-moved-from object. + MixingHashState() : state_(Seed()) {} + + // Workaround for MSVC bug. + // We make the type copyable to fix the calling convention, even though we + // never actually copy it. Keep it private to not affect the public API of the + // type. + MixingHashState(const MixingHashState&) = default; + + explicit MixingHashState(uint64_t state) : state_(state) {} + + // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows + // us to be consistent with IntegralFastPath when combining raw types, but + // optimize Read1To3 and Read4To8 differently for the string case. + static MixingHashState combine_raw(MixingHashState hash_state, + uint64_t value) { + return MixingHashState(CombineRawImpl(hash_state.state_, value)); + } static MixingHashState combine_weakly_mixed_integer( MixingHashState hash_state, WeaklyMixedInteger value) { @@ -1171,195 +1325,6 @@ class ABSL_DLL MixingHashState : public HashStateBase { return MixingHashState::combine(std::move(state), unordered_state); } - // Allow the HashState type-erasure implementation to invoke - // RunCombinedUnordered() directly. - friend class absl::HashState; - friend struct CombineRaw; - - // Workaround for MSVC bug. - // We make the type copyable to fix the calling convention, even though we - // never actually copy it. Keep it private to not affect the public API of the - // type. - MixingHashState(const MixingHashState&) = default; - - explicit MixingHashState(uint64_t state) : state_(state) {} - - // Combines a raw value from e.g. integrals/floats/pointers/etc. This allows - // us to be consistent with IntegralFastPath when combining raw types, but - // optimize Read1To3 and Read4To8 differently for the string case. - static MixingHashState combine_raw(MixingHashState hash_state, - uint64_t value) { - return MixingHashState(WeakMix(hash_state.state_, value)); - } - - // Implementation of the base case for combine_contiguous where we actually - // mix the bytes into the state. - // Dispatch to different implementations of the combine_contiguous depending - // on the value of `sizeof(size_t)`. - static uint64_t CombineContiguousImpl(uint64_t state, - const unsigned char* first, size_t len, - std::integral_constant - /* sizeof_size_t */); - static uint64_t CombineContiguousImpl(uint64_t state, - const unsigned char* first, size_t len, - std::integral_constant - /* sizeof_size_t */); - - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t CombineSmallContiguousImpl( - uint64_t state, const unsigned char* first, size_t len) { - ABSL_ASSUME(len <= 8); - uint64_t v; - if (len >= 4) { - v = Read4To8(first, len); - } else if (len > 0) { - v = Read1To3(first, len); - } else { - // Empty ranges have no effect. - return state; - } - return WeakMix(state, v); - } - - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t CombineContiguousImpl9to16( - uint64_t state, const unsigned char* first, size_t len) { - ABSL_ASSUME(len >= 9); - ABSL_ASSUME(len <= 16); - // Note: any time one half of the mix function becomes zero it will fail to - // incorporate any bits from the other half. However, there is exactly 1 in - // 2^64 values for each side that achieve this, and only when the size is - // exactly 16 -- for smaller sizes there is an overlapping byte that makes - // this impossible unless the seed is *also* incredibly unlucky. - auto p = Read9To16(first, len); - return Mix(state ^ p.first, kMul ^ p.second); - } - - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t CombineContiguousImpl17to32( - uint64_t state, const unsigned char* first, size_t len) { - ABSL_ASSUME(len >= 17); - ABSL_ASSUME(len <= 32); - // Do two mixes of overlapping 16-byte ranges in parallel to minimize - // latency. - const uint64_t m0 = - Mix(Read8(first) ^ kStaticRandomData[1], Read8(first + 8) ^ state); - - const unsigned char* tail_16b_ptr = first + (len - 16); - const uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ kStaticRandomData[3], - Read8(tail_16b_ptr + 8) ^ state); - return m0 ^ m1; - } - - // Slow dispatch path for calls to CombineContiguousImpl with a size argument - // larger than PiecewiseChunkSize(). Has the same effect as calling - // CombineContiguousImpl() repeatedly with the chunk stride size. - static uint64_t CombineLargeContiguousImpl32(uint64_t state, - const unsigned char* first, - size_t len); - static uint64_t CombineLargeContiguousImpl64(uint64_t state, - const unsigned char* first, - size_t len); - - // Reads 9 to 16 bytes from p. - // The least significant 8 bytes are in .first, and the rest of the bytes are - // in .second along with duplicated bytes from .first if len<16. - static std::pair Read9To16(const unsigned char* p, - size_t len) { - uint64_t low_mem = Read8(p); - uint64_t high_mem = Read8(p + len - 8); -#ifdef ABSL_IS_LITTLE_ENDIAN - uint64_t most_significant = high_mem; - uint64_t least_significant = low_mem; -#else - uint64_t most_significant = low_mem; - uint64_t least_significant = high_mem; -#endif - return {least_significant, most_significant}; - } - - // Reads 8 bytes from p. - static uint64_t Read8(const unsigned char* p) { - // Suppress erroneous array bounds errors on GCC. -#if defined(__GNUC__) && !defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Warray-bounds" -#endif - return absl::base_internal::UnalignedLoad64(p); -#if defined(__GNUC__) && !defined(__clang__) -#pragma GCC diagnostic pop -#endif - } - - // Reads 4 to 8 bytes from p. Some input bytes may be duplicated in output. - static uint64_t Read4To8(const unsigned char* p, size_t len) { - // If `len < 8`, we duplicate bytes in the middle. - // E.g.: - // `ABCD` will be read as `ABCDABCD`. - // `ABCDE` will be read as `ABCDBCDE`. - // `ABCDEF` will be read as `ABCDCDEF`. - // `ABCDEFG` will be read as `ABCDDEFG`. - // We also do not care about endianness. On big-endian platforms, bytes will - // be shuffled (it's fine). We always shift low memory by 32, because that - // can be pipelined earlier. Reading high memory requires computing - // `p + len - 4`. - uint64_t most_significant = - static_cast(absl::base_internal::UnalignedLoad32(p)) << 32; - uint64_t least_significant = - absl::base_internal::UnalignedLoad32(p + len - 4); - return most_significant | least_significant; - } - - // Reads 1 to 3 bytes from p. Some input bytes may be duplicated in output. - static uint32_t Read1To3(const unsigned char* p, size_t len) { - // The trick used by this implementation is to avoid branches. - // We always read three bytes by duplicating. - // E.g., - // `A` is read as `AAA`. - // `AB` is read as `ABB`. - // `ABC` is read as `ABC`. - // We always shift `p[0]` so that it can be pipelined better. - // Other bytes require extra computation to find indices. - uint32_t mem0 = (static_cast(p[0]) << 16) | p[len - 1]; - uint32_t mem1 = static_cast(p[len / 2]) << 8; - return mem0 | mem1; - } - - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t lhs, uint64_t rhs) { - // For 32 bit platforms we are trying to use all 64 lower bits. - if constexpr (sizeof(size_t) < 8) { - uint64_t m = lhs * rhs; - return m ^ (m >> 32); - } - // Though the 128-bit product on AArch64 needs two instructions, it is - // still a good balance between speed and hash quality. - uint128 m = lhs; - m *= rhs; - return Uint128High64(m) ^ Uint128Low64(m); - } - - // Slightly lower latency than Mix, but with lower quality. The byte swap - // helps ensure that low bits still have high quality. - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t WeakMix(uint64_t lhs, - uint64_t rhs) { - const uint64_t n = lhs ^ rhs; - // WeakMix doesn't work well on 32-bit platforms so just use Mix. - if constexpr (sizeof(size_t) < 8) return Mix(n, kMul); - return absl::gbswap_64(n * kMul); - } - - // An extern to avoid bloat on a direct call to LowLevelHash() with fixed - // values for both the seed and salt parameters. - static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len); - - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data, - size_t len) { -#ifdef ABSL_HAVE_INTRINSIC_INT128 - return LowLevelHashImpl(data, len); -#else - return hash_internal::CityHash64(reinterpret_cast(data), len); -#endif - } - - // Seed() - // // A non-deterministic seed. // // The current purpose of this seed is to generate non-deterministic results @@ -1374,64 +1339,23 @@ class ABSL_DLL MixingHashState : public HashStateBase { // // On other platforms this is still going to be non-deterministic but most // probably per-build and not per-process. - ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() { + ABSL_ATTRIBUTE_ALWAYS_INLINE static size_t Seed() { #if (!defined(__clang__) || __clang_major__ > 11) && \ (!defined(__apple_build_version__) || \ __apple_build_version__ >= 19558921) // Xcode 12 - return static_cast(reinterpret_cast(&kSeed)); + return static_cast(reinterpret_cast(&kSeed)); #else // Workaround the absence of // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021. - return static_cast(reinterpret_cast(kSeed)); + return static_cast(reinterpret_cast(kSeed)); #endif } - static const void* const kSeed; uint64_t state_; }; -// MixingHashState::CombineContiguousImpl() -inline uint64_t MixingHashState::CombineContiguousImpl( - uint64_t state, const unsigned char* first, size_t len, - std::integral_constant /* sizeof_size_t */) { - // For large values we use CityHash, for small ones we just use a - // multiplicative hash. - if (len <= 8) { - return CombineSmallContiguousImpl(state, first, len); - } - if (ABSL_PREDICT_TRUE(len <= PiecewiseChunkSize())) { - return Mix(state ^ hash_internal::CityHash32( - reinterpret_cast(first), len), - kMul); - } - return CombineLargeContiguousImpl32(state, first, len); -} - -// Overload of MixingHashState::CombineContiguousImpl() -inline uint64_t MixingHashState::CombineContiguousImpl( - uint64_t state, const unsigned char* first, size_t len, - std::integral_constant /* sizeof_size_t */) { - // For large values we use LowLevelHash or CityHash depending on the platform, - // for small ones we just use a multiplicative hash. - if (len <= 8) { - return CombineSmallContiguousImpl(state, first, len); - } - if (len <= 16) { - return CombineContiguousImpl9to16(state, first, len); - } - if (len <= 32) { - return CombineContiguousImpl17to32(state, first, len); - } - if (ABSL_PREDICT_TRUE(len <= PiecewiseChunkSize())) { - return Mix(state ^ Hash64(first, len), kMul); - } - return CombineLargeContiguousImpl64(state, first, len); -} - struct AggregateBarrier {}; -// HashImpl - // Add a private base class to make sure this type is not an aggregate. // Aggregates can be aggregate initialized even if the default constructor is // deleted. @@ -1446,6 +1370,13 @@ struct HashImpl { size_t operator()(const T& value) const { return MixingHashState::hash(value); } + + private: + friend struct HashWithSeed; + + size_t hash_with_seed(const T& value, size_t seed) const { + return MixingHashState::hash_with_seed(value, seed); + } }; template @@ -1460,14 +1391,12 @@ H HashStateBase::combine(H state, const T& value, const Ts&... values) { values...); } -// HashStateBase::combine_contiguous() template template H HashStateBase::combine_contiguous(H state, const T* data, size_t size) { return hash_internal::hash_range_or_bytes(std::move(state), data, size); } -// HashStateBase::combine_unordered() template template H HashStateBase::combine_unordered(H state, I begin, I end) { @@ -1475,7 +1404,6 @@ H HashStateBase::combine_unordered(H state, I begin, I end) { CombineUnorderedCallback{begin, end}); } -// HashStateBase::PiecewiseCombiner::add_buffer() template H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, size_t size) { @@ -1485,7 +1413,7 @@ H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, position_ += size; return state; } - + added_something_ = true; // If the buffer is partially filled we need to complete the buffer // and hash it. if (position_ != 0) { @@ -1508,10 +1436,14 @@ H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, return state; } -// HashStateBase::PiecewiseCombiner::finalize() template H PiecewiseCombiner::finalize(H state) { - // Hash the remainder left in the buffer, which may be empty + // Do not call combine_contiguous with empty remainder since it is modifying + // state. + if (added_something_ && position_ == 0) { + return state; + } + // We still call combine_contiguous for the entirely empty buffer. return H::combine_contiguous(std::move(state), buf_, position_); } diff --git a/absl/hash/internal/low_level_hash.cc b/absl/hash/internal/low_level_hash.cc deleted file mode 100644 index 1a107ec674c..00000000000 --- a/absl/hash/internal/low_level_hash.cc +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2020 The Abseil Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "absl/hash/internal/low_level_hash.h" - -#include -#include -#include - -#include "absl/base/config.h" -#include "absl/base/internal/unaligned_access.h" -#include "absl/base/optimization.h" -#include "absl/base/prefetch.h" -#include "absl/numeric/int128.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace hash_internal { -namespace { -uint64_t Mix(uint64_t v0, uint64_t v1) { - absl::uint128 p = v0; - p *= v1; - return absl::Uint128Low64(p) ^ absl::Uint128High64(p); -} -uint64_t Mix32Bytes(const uint8_t* ptr, uint64_t current_state, - const uint64_t salt[5]) { - uint64_t a = absl::base_internal::UnalignedLoad64(ptr); - uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); - uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16); - uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24); - - uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state); - uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state); - return cs0 ^ cs1; -} -} // namespace - -uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed, - const uint64_t salt[5]) { - assert(len > 32); - const uint8_t* ptr = static_cast(data); - uint64_t current_state = seed ^ salt[0] ^ len; - const uint8_t* last_32_ptr = ptr + len - 32; - - if (len > 64) { - // If we have more than 64 bytes, we're going to handle chunks of 64 - // bytes at a time. We're going to build up four separate hash states - // which we will then hash together. This avoids short dependency chains. - uint64_t duplicated_state0 = current_state; - uint64_t duplicated_state1 = current_state; - uint64_t duplicated_state2 = current_state; - - do { - // Always prefetch the next cacheline. - PrefetchToLocalCache(ptr + ABSL_CACHELINE_SIZE); - - uint64_t a = absl::base_internal::UnalignedLoad64(ptr); - uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); - uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16); - uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24); - uint64_t e = absl::base_internal::UnalignedLoad64(ptr + 32); - uint64_t f = absl::base_internal::UnalignedLoad64(ptr + 40); - uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48); - uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56); - - current_state = Mix(a ^ salt[1], b ^ current_state); - duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0); - - duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1); - duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2); - - ptr += 64; - len -= 64; - } while (len > 64); - - current_state = (current_state ^ duplicated_state0) ^ - (duplicated_state1 + duplicated_state2); - } - - // We now have a data `ptr` with at most 64 bytes and the current state - // of the hashing state machine stored in current_state. - if (len > 32) { - current_state = Mix32Bytes(ptr, current_state, salt); - } - - // We now have a data `ptr` with at most 32 bytes and the current state - // of the hashing state machine stored in current_state. But we can - // safely read from `ptr + len - 32`. - return Mix32Bytes(last_32_ptr, current_state, salt); -} - -} // namespace hash_internal -ABSL_NAMESPACE_END -} // namespace absl diff --git a/absl/hash/internal/low_level_hash.h b/absl/hash/internal/low_level_hash.h deleted file mode 100644 index 49e9ec46bad..00000000000 --- a/absl/hash/internal/low_level_hash.h +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2020 The Abseil Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// This file provides the Google-internal implementation of LowLevelHash. -// -// LowLevelHash is a fast hash function for hash tables, the fastest we've -// currently (late 2020) found that passes the SMHasher tests. The algorithm -// relies on intrinsic 128-bit multiplication for speed. This is not meant to be -// secure - just fast. -// -// It is closely based on a version of wyhash, but does not maintain or -// guarantee future compatibility with it. - -#ifndef ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_ -#define ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_ - -#include -#include - -#include "absl/base/config.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN -namespace hash_internal { - -// Hash function for a byte array. A 64-bit seed and a set of five 64-bit -// integers are hashed into the result. The length must be greater than 32. -// -// To allow all hashable types (including string_view and Span) to depend on -// this algorithm, we keep the API low-level, with as few dependencies as -// possible. -uint64_t LowLevelHashLenGt32(const void* data, size_t len, uint64_t seed, - const uint64_t salt[5]); - -} // namespace hash_internal -ABSL_NAMESPACE_END -} // namespace absl - -#endif // ABSL_HASH_INTERNAL_LOW_LEVEL_HASH_H_ diff --git a/absl/hash/internal/low_level_hash_test.cc b/absl/hash/internal/low_level_hash_test.cc index d370dc7b38b..9b7868c97b5 100644 --- a/absl/hash/internal/low_level_hash_test.cc +++ b/absl/hash/internal/low_level_hash_test.cc @@ -12,29 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "absl/hash/internal/low_level_hash.h" - -#include +#include #include +#include #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/hash/hash.h" #include "absl/strings/escaping.h" +#include "absl/strings/string_view.h" #define UPDATE_GOLDEN 0 namespace { -static const uint64_t kSalt[5] = {0xa0761d6478bd642f, 0xe7037ed1a0b428dbl, - 0x8ebc6af09c88c6e3, 0x589965cc75374cc3l, - 0x1d8e4e27c47d124f}; - TEST(LowLevelHashTest, VerifyGolden) { - constexpr size_t kNumGoldenOutputs = 94; + constexpr size_t kNumGoldenOutputs = 95; static struct { absl::string_view base64_data; uint64_t seed; - } cases[] = { + } cases[kNumGoldenOutputs] = { {"VprUGNH+5NnNRaORxgH/ySrZFQFDL+4VAodhfBNinmn8cg==", uint64_t{0x531858a40bfa7ea1}}, {"gc1xZaY+q0nPcUvOOnWnT3bqfmT/geth/f7Dm2e/DemMfk4=", @@ -361,54 +358,61 @@ TEST(LowLevelHashTest, VerifyGolden) { uint64_t{0xc9ae5c8759b4877a}}, }; -#if defined(ABSL_IS_BIG_ENDIAN) +#if defined(ABSL_IS_BIG_ENDIAN) || !defined(ABSL_HAVE_INTRINSIC_INT128) || \ + UINTPTR_MAX != UINT64_MAX constexpr uint64_t kGolden[kNumGoldenOutputs] = {}; - GTEST_SKIP() << "We only maintain golden data for little endian systems."; + GTEST_SKIP() + << "We only maintain golden data for little endian 64 bit systems with " + "128 bit intristics."; #else constexpr uint64_t kGolden[kNumGoldenOutputs] = { - 0x59b1542b0ff6b7b8, 0x3fb979d297096db9, 0xb391802c536343a9, - 0x94e0f7e4331081c4, 0x234d95e49e3ce30e, 0xca6351a3e568ed17, - 0xa62fcf7fa334293d, 0xb03111035f546067, 0x97b8c861e013d558, - 0xb6683803d9387949, 0xce5d907e0b3cb6a1, 0xab7466fae53ed201, - 0x8f13ca3f1cac3edd, 0xa2684a99cd909a2a, 0x03194f86b9440843, - 0xab3a745d96f75a66, 0xef2448606760ec3d, 0xd999e03247d5d5c5, - 0x4a25ab345d53f926, 0xa511b829ce9fc919, 0x4b76517f8e806cbf, - 0x006efd7ee09ff8d4, 0x790a4978bd0170a1, 0xc14f6e4b2dff057e, - 0xe0d2f4ae7c836d09, 0x4e2038a491ed939d, 0x23fd6f408e9598e0, - 0xa91cf8f1d92bcb08, 0x555cdec06df49d58, 0xe7d3e14bd6a8f3bd, - 0x4fdd25c1e75c009a, 0x3dffb8acf1ffbd17, 0x56946f33ed73a705, - 0x154c633d7690f3b0, 0x3e96f8e9a58a04e0, 0xb0279b244d3ccf9c, - 0x8571e87c882b2142, 0x9d9ada45132e7b41, 0xd5667655533f1dec, - 0x70607ace4ec36463, 0x691418d2eb63116c, 0xa70179d8e7142980, - 0xf8388d756bea25a7, 0xe5127c736d9826de, 0x7f1c95f9b6b656b6, - 0x66ab835b7bf4c7b3, 0xc03423b9a6db9728, 0xe88415a2b416b76d, - 0x8afd8c14d0b56c36, 0xe9a252b3ba217dad, 0x710150f5cd87a9ff, - 0xd66b147837fad9ae, 0x1af5f8ffbaa717a7, 0xe01f88d7a9a8ac17, - 0xd67870a7251fde72, 0xf32b837f845a676b, 0x0827717b1ffe59f7, - 0x80307212ca7645fb, 0xf0d22af71ea57c80, 0x459373765f2c114b, - 0x54d26109fab9cbaf, 0xc603da4e257b93db, 0x57fa334b5689d7d5, - 0x41cd1b2a8a91f620, 0xe1d6e7cd0fb015af, 0x8608e9035eb9d795, - 0x45c7b9fae739fee1, 0x9f5ae4f7a6b597ee, 0xfb771b6e0017757d, - 0x8dac6d29cfd8d027, 0x3c9ba4fb62ce6508, 0xa971fad8243844a7, - 0xd2126f49b2ea3b64, 0x5dd78fe7ac436861, 0xfe4004a6bb3494a8, - 0xe7c01cc63d770d7c, 0xa117075b8c801d37, 0xdf1dfe75f0e73069, - 0x7285b39700cefb98, 0x5e97ea1aa9a670eb, 0xe21872db2b9137a3, - 0x12630b02c6ca405e, 0xfe1f2d802151f97a, 0xb53b0ed3dea4fb02, - 0xc6d5ed56d1dbf9fd, 0xe5b92b558a5c70cb, 0xccd6eedf97277d08, - 0x08582fff2e1494ed, 0xa41f2b3d17f1c4c7, 0x29ec07e5ef950f3d, - 0x96aba32565a97084, 0xf26870eca10cebcd, 0xbe1432feb4d33361, - 0x21993a779845e6eb, + 0x669da02f8d009e0f, 0xceb19bf2255445cd, 0x0e746992d6d43a7c, + 0x41ed623b9dcc5fde, 0x187a5a30d7c72edc, 0x949ae2a9c1eb925a, + 0x7e9c76a7b7c35e68, 0x4f96bf15b8309ff6, 0x26c0c1fde233732e, + 0xb0453f72aa151615, 0xf24b621a9ce9fece, 0x99ed798408687b5f, + 0x3b13ec1221423b66, 0xc67cf148a28afe59, 0x22f7e0173f92e3fa, + 0x14186c5fda6683a0, 0x97d608caa2603b2c, 0xfde3b0bbba24ffa9, + 0xb7068eb48c472c77, 0x9e34d72866b9fda0, 0xbbb99c884cdef88e, + 0x81d3e01f472a8a1a, 0xf84f506b3b60366d, 0xfe3f42f01300db37, + 0xe385712a51c1f836, 0x41dfd5e394245c79, 0x60855dbedadb900a, + 0xbdb4c0aa38567476, 0x9748802e8eec02cc, 0x5ced256d257f88de, + 0x55acccdf9a80f155, 0xa64b55b071afbbea, 0xa205bfe6c724ce4d, + 0x69dd26ca8ac21744, 0xef80e2ff2f6a9bc0, 0xde266c0baa202c20, + 0xfa3463080ac74c50, 0x379d968a40125c2b, 0x4cbbd0a7b3c7d648, + 0xc92afd93f4c665d2, 0x6e28f5adb7ae38dc, 0x7c689c9c237be35e, + 0xaea41b29bd9d0f73, 0x832cef631d77e59f, 0x70cac8e87bc37dd3, + 0x8e8c98bbde68e764, 0xd6117aeb3ddedded, 0xd796ab808e766240, + 0x8953d0ea1a7d9814, 0xa212eba4281b391c, 0x21a555a8939ce597, + 0x809d31660f6d81a8, 0x2356524b20ab400f, 0x5bc611e1e49d0478, + 0xba9c065e2f385ce2, 0xb0a0fd12f4e83899, 0x14d076a35b1ff2ca, + 0x8acd0bb8cf9a93c0, 0xe62e8ec094039ee4, 0x38a536a7072bdc61, + 0xca256297602524f8, 0xfc62ebfb3530caeb, 0x8d8b0c05520569f6, + 0xbbaca65cf154c59d, 0x3739b5ada7e338d3, 0xdb9ea31f47365340, + 0x410b5c9c1da56755, 0x7e0abc03dbd10283, 0x136f87be70ed442e, + 0x6b727d4feddbe1e9, 0x074ebb21183b01df, 0x3fe92185b1985484, + 0xc5d8efd3c68305ca, 0xd9bada21b17e272e, 0x64d73133e1360f83, + 0xeb8563aa993e21f9, 0xe5e8da50cceab28f, 0x7a6f92eb3223d2f3, + 0xbdaf98370ea9b31b, 0x1682a84457f077bc, 0x4abd2d33b6e3be37, + 0xb35bc81a7c9d4c04, 0x3e5bde3fb7cfe63d, 0xff3abe6e2ffec974, + 0xb8116dd26cf6feec, 0x7a77a6e4ed0cf081, 0xb71eec2d5a184316, + 0x6fa932f77b4da817, 0x795f79b33909b2c4, 0x1b8755ef6b5eb34e, + 0x2255b72d7d6b2d79, 0xf2bdafafa90bd50a, 0x442a578f02cb1fc8, + 0xc25aefe55ecf83db, 0x3114c056f9c5a676, }; #endif + auto hash_fn = [](absl::string_view s, uint64_t state) { + return absl::hash_internal::CombineLargeContiguousImplOn64BitLengthGt32( + reinterpret_cast(s.data()), s.size(), state); + }; + #if UPDATE_GOLDEN (void)kGolden; // Silence warning. for (size_t i = 0; i < kNumGoldenOutputs; ++i) { std::string str; ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str)); ASSERT_GT(str.size(), 32); - uint64_t h = absl::hash_internal::LowLevelHashLenGt32( - str.data(), str.size(), cases[i].seed, kSalt); + uint64_t h = hash_fn(str, cases[i].seed); printf("0x%016" PRIx64 ", ", h); if (i % 3 == 2) { printf("\n"); @@ -423,9 +427,7 @@ TEST(LowLevelHashTest, VerifyGolden) { std::string str; ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str)); ASSERT_GT(str.size(), 32); - EXPECT_EQ(absl::hash_internal::LowLevelHashLenGt32(str.data(), str.size(), - cases[i].seed, kSalt), - kGolden[i]); + EXPECT_EQ(hash_fn(str, cases[i].seed), kGolden[i]); } #endif } diff --git a/absl/hash/internal/spy_hash_state.h b/absl/hash/internal/spy_hash_state.h index e403113b0ea..823e1e90fcd 100644 --- a/absl/hash/internal/spy_hash_state.h +++ b/absl/hash/internal/spy_hash_state.h @@ -151,6 +151,9 @@ class SpyHashStateImpl : public HashStateBase> { static SpyHashStateImpl combine_contiguous(SpyHashStateImpl hash_state, const unsigned char* begin, size_t size) { + if (size == 0) { + return SpyHashStateImpl::combine_raw(std::move(hash_state), 0); + } const size_t large_chunk_stride = PiecewiseChunkSize(); // Combining a large contiguous buffer must have the same effect as // doing it piecewise by the stride length, followed by the (possibly @@ -165,6 +168,7 @@ class SpyHashStateImpl : public HashStateBase> { if (size > 0) { hash_state.hash_representation_.emplace_back( reinterpret_cast(begin), size); + hash_state = SpyHashStateImpl::combine_raw(std::move(hash_state), size); } return hash_state; } @@ -224,8 +228,9 @@ class SpyHashStateImpl : public HashStateBase> { // Combines raw data from e.g. integrals/floats/pointers/etc. static SpyHashStateImpl combine_raw(SpyHashStateImpl state, uint64_t value) { - const unsigned char* data = reinterpret_cast(&value); - return SpyHashStateImpl::combine_contiguous(std::move(state), data, 8); + state.hash_representation_.emplace_back( + reinterpret_cast(&value), 8); + return state; } // This is true if SpyHashStateImpl has been passed to a call of diff --git a/absl/log/BUILD.bazel b/absl/log/BUILD.bazel index 62ece451292..3e965abd26c 100644 --- a/absl/log/BUILD.bazel +++ b/absl/log/BUILD.bazel @@ -14,6 +14,8 @@ # limitations under the License. # +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -152,6 +154,7 @@ cc_library( cc_library( name = "log_entry", + srcs = ["log_entry.cc"], hdrs = ["log_entry.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, @@ -160,6 +163,7 @@ cc_library( "//absl/base:core_headers", "//absl/base:log_severity", "//absl/log/internal:config", + "//absl/log/internal:proto", "//absl/strings", "//absl/time", "//absl/types:span", diff --git a/absl/log/CMakeLists.txt b/absl/log/CMakeLists.txt index 6aae05d1c30..c972c173a37 100644 --- a/absl/log/CMakeLists.txt +++ b/absl/log/CMakeLists.txt @@ -28,7 +28,6 @@ absl_cc_library( absl::core_headers absl::log_internal_check_op absl::log_internal_conditions - absl::log_internal_message absl::log_internal_strip ) @@ -47,6 +46,7 @@ absl_cc_library( absl::base absl::config absl::core_headers + absl::has_ostream_operator absl::leak_check absl::log_internal_nullguard absl::log_internal_nullstream @@ -218,6 +218,7 @@ absl_cc_library( absl::span absl::strerror absl::strings + absl::strings_internal absl::time ) @@ -395,6 +396,7 @@ absl_cc_library( DEPS absl::config absl::strings + absl::strings_internal absl::span ) @@ -558,6 +560,8 @@ absl_cc_library( absl_cc_library( NAME log_entry + SRCS + "log_entry.cc" HDRS "log_entry.h" COPTS @@ -568,6 +572,7 @@ absl_cc_library( absl::config absl::core_headers absl::log_internal_config + absl::log_internal_proto absl::log_severity absl::span absl::strings diff --git a/absl/log/check_test_impl.inc b/absl/log/check_test_impl.inc index 7a0000e1354..495f85a267a 100644 --- a/absl/log/check_test_impl.inc +++ b/absl/log/check_test_impl.inc @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// SKIP_ABSL_INLINE_NAMESPACE_CHECK + #ifndef ABSL_LOG_CHECK_TEST_IMPL_H_ #define ABSL_LOG_CHECK_TEST_IMPL_H_ @@ -22,6 +24,8 @@ #error ABSL_TEST_CHECK must be defined for these tests to work. #endif +#include +#include #include #include @@ -40,6 +44,7 @@ namespace absl_log_internal { using ::testing::AllOf; using ::testing::AnyOf; +using ::testing::ContainsRegex; using ::testing::HasSubstr; using ::testing::Not; @@ -238,6 +243,18 @@ TEST(CHECKTest, TestBinaryChecksWithPrimitives) { ABSL_TEST_CHECK_LT(1, 2); } +TEST(CHECKTest, TestBinaryChecksWithStringComparison) { + const std::string a = "a"; + ABSL_TEST_CHECK_EQ(a, "a"); + ABSL_TEST_CHECK_NE(a, "b"); + ABSL_TEST_CHECK_GE(a, a); + ABSL_TEST_CHECK_GE("b", a); + ABSL_TEST_CHECK_LE(a, "a"); + ABSL_TEST_CHECK_LE(a, "b"); + ABSL_TEST_CHECK_GT("b", a); + ABSL_TEST_CHECK_LT(a, "b"); +} + // For testing using CHECK*() on anonymous enums. enum { CASE_A, CASE_B }; @@ -262,6 +279,25 @@ TEST(CHECKTest, TestBinaryChecksWithNullptr) { ABSL_TEST_CHECK_NE(nullptr, p_not_null); } +struct ExampleTypeThatHasNoStreamOperator { + bool x; + + bool operator==(const ExampleTypeThatHasNoStreamOperator& other) const { + return x == other.x; + } + bool operator==(const bool& other) const { return x == other; } +}; + +TEST(CHECKDeathTest, TestBinaryChecksWithUnprintable) { + ExampleTypeThatHasNoStreamOperator a{true}; + ExampleTypeThatHasNoStreamOperator b{false}; + ABSL_TEST_CHECK_EQ(a, a); + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, b), "Check failed: a == b"); + ABSL_TEST_CHECK_EQ(a, true); + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(a, false), + "Check failed: a == false \\(UNPRINTABLE vs. 0\\)"); +} + #if GTEST_HAS_DEATH_TEST // Test logging of various char-typed values by failing CHECK*(). @@ -638,9 +674,8 @@ TEST(CHECKDeathTest, TestPointerPrintedAsNumberDespiteAbslStringify) { EXPECT_DEATH( ABSL_TEST_CHECK_EQ(p, nullptr), AnyOf( - HasSubstr("Check failed: p == nullptr (0000000000001234 vs. (null))"), - HasSubstr("Check failed: p == nullptr (0x1234 vs. (null))") - )); + HasSubstr("Check failed: p == nullptr (0000000000001234 vs. (null))"), + HasSubstr("Check failed: p == nullptr (0x1234 vs. (null))"))); } // An uncopyable object with operator<<. @@ -670,6 +705,273 @@ TEST(CHECKDeathTest, TestUncopyable) { HasSubstr("Check failed: v1 == v2 (Uncopyable{1} vs. Uncopyable{2})")); } +enum class ScopedEnum { kValue1 = 1, kValue2 = 2 }; + +TEST(CHECKTest, TestScopedEnumComparisonChecks) { + ABSL_TEST_CHECK_EQ(ScopedEnum::kValue1, ScopedEnum::kValue1); + ABSL_TEST_CHECK_NE(ScopedEnum::kValue1, ScopedEnum::kValue2); + ABSL_TEST_CHECK_LT(ScopedEnum::kValue1, ScopedEnum::kValue2); + ABSL_TEST_CHECK_LE(ScopedEnum::kValue1, ScopedEnum::kValue2); + ABSL_TEST_CHECK_GT(ScopedEnum::kValue2, ScopedEnum::kValue1); + ABSL_TEST_CHECK_GE(ScopedEnum::kValue2, ScopedEnum::kValue2); + ABSL_TEST_DCHECK_EQ(ScopedEnum::kValue1, ScopedEnum::kValue1); + ABSL_TEST_DCHECK_NE(ScopedEnum::kValue1, ScopedEnum::kValue2); + ABSL_TEST_DCHECK_LT(ScopedEnum::kValue1, ScopedEnum::kValue2); + ABSL_TEST_DCHECK_LE(ScopedEnum::kValue1, ScopedEnum::kValue2); + ABSL_TEST_DCHECK_GT(ScopedEnum::kValue2, ScopedEnum::kValue1); + ABSL_TEST_DCHECK_GE(ScopedEnum::kValue2, ScopedEnum::kValue2); + + // Check that overloads work correctly with references as well. + const ScopedEnum x = ScopedEnum::kValue1; + const ScopedEnum& x_ref = x; + ABSL_TEST_CHECK_EQ(x, x_ref); + ABSL_TEST_CHECK_EQ(x_ref, x_ref); +} + +#if GTEST_HAS_DEATH_TEST +TEST(CHECKDeathTest, TestScopedEnumCheckFailureMessagePrintsIntegerValues) { + const auto e1 = ScopedEnum::kValue1; + const auto e2 = ScopedEnum::kValue2; + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(e1, e2), + ContainsRegex(R"re(Check failed:.*\(1 vs. 2\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_NE(e1, e1), + ContainsRegex(R"re(Check failed:.*\(1 vs. 1\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_GT(e1, e1), + ContainsRegex(R"re(Check failed:.*\(1 vs. 1\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_GE(e1, e2), + ContainsRegex(R"re(Check failed:.*\(1 vs. 2\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LT(e2, e2), + ContainsRegex(R"re(Check failed:.*\(2 vs. 2\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LE(e2, e1), + ContainsRegex(R"re(Check failed:.*\(2 vs. 1\))re")); + + const auto& e1_ref = e1; + EXPECT_DEATH(ABSL_TEST_CHECK_NE(e1_ref, e1), + ContainsRegex(R"re(Check failed:.*\(1 vs. 1\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_NE(e1_ref, e1_ref), + ContainsRegex(R"re(Check failed:.*\(1 vs. 1\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(e2, e1_ref), + ContainsRegex(R"re(Check failed:.*\(2 vs. 1\))re")); + +#ifndef NDEBUG + EXPECT_DEATH(ABSL_TEST_DCHECK_EQ(e2, e1), + ContainsRegex(R"re(Check failed:.*\(2 vs. 1\))re")); +#else + // DHECK_EQ is not evaluated in non-debug mode. + ABSL_TEST_DCHECK_EQ(e2, e1); +#endif // NDEBUG +} +#endif // GTEST_HAS_DEATH_TEST + +enum class ScopedInt8Enum : int8_t { + kValue1 = 1, + kValue2 = 66 // Printable ascii value 'B'. +}; + +TEST(CHECKDeathTest, TestScopedInt8EnumCheckFailureMessagePrintsCharValues) { + const auto e1 = ScopedInt8Enum::kValue1; + const auto e2 = ScopedInt8Enum::kValue2; + EXPECT_DEATH( + ABSL_TEST_CHECK_EQ(e1, e2), + ContainsRegex(R"re(Check failed:.*\(signed char value 1 vs. 'B'\))re")); + EXPECT_DEATH( + ABSL_TEST_CHECK_NE(e1, e1), + ContainsRegex( + R"re(Check failed:.*\(signed char value 1 vs. signed char value 1\))re")); + EXPECT_DEATH( + ABSL_TEST_CHECK_GT(e1, e1), + ContainsRegex( + R"re(Check failed:.*\(signed char value 1 vs. signed char value 1\))re")); + EXPECT_DEATH( + ABSL_TEST_CHECK_GE(e1, e2), + ContainsRegex(R"re(Check failed:.*\(signed char value 1 vs. 'B'\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LT(e2, e2), + ContainsRegex(R"re(Check failed:.*\('B' vs. 'B'\))re")); + EXPECT_DEATH( + ABSL_TEST_CHECK_LE(e2, e1), + ContainsRegex(R"re(Check failed:.*\('B' vs. signed char value 1\))re")); +} + +enum class ScopedUnsignedEnum : uint16_t { + kValue1 = std::numeric_limits::min(), + kValue2 = std::numeric_limits::max() +}; + +TEST(CHECKDeathTest, + TestScopedUnsignedEnumCheckFailureMessagePrintsCorrectValues) { + const auto e1 = ScopedUnsignedEnum::kValue1; + const auto e2 = ScopedUnsignedEnum::kValue2; + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(e1, e2), + ContainsRegex(R"re(Check failed:.*\(0 vs. 65535\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_NE(e1, e1), + ContainsRegex(R"re(Check failed:.*\(0 vs. 0\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_GT(e1, e1), + ContainsRegex(R"re(Check failed:.*\(0 vs. 0\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_GE(e1, e2), + ContainsRegex(R"re(Check failed:.*\(0 vs. 65535\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LT(e1, e1), + ContainsRegex(R"re(Check failed:.*\(0 vs. 0\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LE(e2, e1), + ContainsRegex(R"re(Check failed:.*\(65535 vs. 0\))re")); +} + +enum class ScopedInt64Enum : int64_t { + kMin = std::numeric_limits::min(), + kMax = std::numeric_limits::max(), +}; + +// Tests that int64-backed enums are printed correctly even for very large and +// very small values. +TEST(CHECKDeathTest, TestScopedInt64EnumCheckFailureMessage) { + const auto min = ScopedInt64Enum::kMin; + const auto max = ScopedInt64Enum::kMax; + EXPECT_DEATH( + ABSL_TEST_CHECK_EQ(max, min), + ContainsRegex( + "Check failed:.*9223372036854775807 vs. -9223372036854775808")); + EXPECT_DEATH( + ABSL_TEST_CHECK_NE(max, max), + ContainsRegex( + "Check failed:.*9223372036854775807 vs. 9223372036854775807")); + EXPECT_DEATH( + ABSL_TEST_CHECK_GT(min, min), + ContainsRegex( + "Check failed:.*-9223372036854775808 vs. -9223372036854775808")); + EXPECT_DEATH( + ABSL_TEST_CHECK_GE(min, max), + ContainsRegex( + R"(Check failed:.*-9223372036854775808 vs. 9223372036854775807)")); + EXPECT_DEATH( + ABSL_TEST_CHECK_LT(max, max), + ContainsRegex( + R"(Check failed:.*9223372036854775807 vs. 9223372036854775807)")); + EXPECT_DEATH( + ABSL_TEST_CHECK_LE(max, min), + ContainsRegex( + R"(Check failed:.*9223372036854775807 vs. -9223372036854775808)")); +} + +enum class ScopedBoolEnum : bool { + kFalse, + kTrue, +}; + +TEST(CHECKDeathTest, TestScopedBoolEnumCheckFailureMessagePrintsCorrectValues) { + const auto t = ScopedBoolEnum::kTrue; + const auto f = ScopedBoolEnum::kFalse; + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(t, f), + ContainsRegex(R"re(Check failed:.*\(1 vs. 0\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_NE(f, f), + ContainsRegex(R"re(Check failed:.*\(0 vs. 0\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_GT(f, f), + ContainsRegex(R"re(Check failed:.*\(0 vs. 0\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_GE(f, t), + ContainsRegex(R"re(Check failed:.*\(0 vs. 1\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LT(t, t), + ContainsRegex(R"re(Check failed:.*\(1 vs. 1\))re")); + EXPECT_DEATH(ABSL_TEST_CHECK_LE(t, f), + ContainsRegex(R"re(Check failed:.*\(1 vs. 0\))re")); +} + +enum class ScopedEnumWithAbslStringify { + kValue1 = 1, + kValue2 = 2, + kValue3 = 3 +}; + +template +void AbslStringify(Sink& sink, ScopedEnumWithAbslStringify v) { + switch (v) { + case ScopedEnumWithAbslStringify::kValue1: + sink.Append("AbslStringify: kValue1"); + break; + case ScopedEnumWithAbslStringify::kValue2: + sink.Append("AbslStringify: kValue2"); + break; + case ScopedEnumWithAbslStringify::kValue3: + sink.Append("AbslStringify: kValue3"); + break; + } +} + +#if GTEST_HAS_DEATH_TEST +TEST(CHECKDeathTest, TestScopedEnumUsesAbslStringify) { + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(ScopedEnumWithAbslStringify::kValue1, + ScopedEnumWithAbslStringify::kValue2), + ContainsRegex("Check failed:.*AbslStringify: kValue1 vs. " + "AbslStringify: kValue2")); +} +#endif // GTEST_HAS_DEATH_TEST + +enum class ScopedEnumWithOutputOperator { + kValue1 = 1, + kValue2 = 2, +}; + +std::ostream& operator<<(std::ostream& os, ScopedEnumWithOutputOperator v) { + switch (v) { + case ScopedEnumWithOutputOperator::kValue1: + os << "OutputOperator: kValue1"; + break; + case ScopedEnumWithOutputOperator::kValue2: + os << "OutputOperator: kValue2"; + break; + } + return os; +} + +#if GTEST_HAS_DEATH_TEST +TEST(CHECKDeathTest, TestOutputOperatorIsUsedForScopedEnum) { + EXPECT_DEATH(ABSL_TEST_CHECK_EQ(ScopedEnumWithOutputOperator::kValue1, + ScopedEnumWithOutputOperator::kValue2), + ContainsRegex("Check failed:.*OutputOperator: kValue1 vs. " + "OutputOperator: kValue2")); +} +#endif // GTEST_HAS_DEATH_TEST + +enum class ScopedEnumWithAbslStringifyAndOutputOperator { + kValue1 = 1, + kValue2 = 2, +}; + +template +void AbslStringify(Sink& sink, ScopedEnumWithAbslStringifyAndOutputOperator v) { + switch (v) { + case ScopedEnumWithAbslStringifyAndOutputOperator::kValue1: + sink.Append("AbslStringify: kValue1"); + break; + case ScopedEnumWithAbslStringifyAndOutputOperator::kValue2: + sink.Append("AbslStringify: kValue2"); + break; + } +} + +std::ostream& operator<<(std::ostream& os, + ScopedEnumWithAbslStringifyAndOutputOperator v) { + switch (v) { + case ScopedEnumWithAbslStringifyAndOutputOperator::kValue1: + os << "OutputOperator: kValue1"; + break; + case ScopedEnumWithAbslStringifyAndOutputOperator::kValue2: + os << "OutputOperator: kValue2"; + break; + } + return os; +} + +#if GTEST_HAS_DEATH_TEST + +// Test that, if operator<< and AbslStringify are both defined for a scoped +// enum, streaming takes precedence over AbslStringify. +TEST(CHECKDeathTest, TestScopedEnumPrefersOutputOperatorOverAbslStringify) { + EXPECT_DEATH( + ABSL_TEST_CHECK_EQ(ScopedEnumWithAbslStringifyAndOutputOperator::kValue1, + ScopedEnumWithAbslStringifyAndOutputOperator::kValue2), + ContainsRegex("Check failed:.*OutputOperator: kValue1 vs. " + "OutputOperator: kValue2")); +} +#endif // GTEST_HAS_DEATH_TEST + } // namespace absl_log_internal // NOLINTEND(misc-definitions-in-headers) diff --git a/absl/log/flags_test.cc b/absl/log/flags_test.cc index 1080ea11966..f5a2b5189c7 100644 --- a/absl/log/flags_test.cc +++ b/absl/log/flags_test.cc @@ -93,6 +93,7 @@ TEST_F(LogFlagsTest, PrependLogPrefix) { TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) { absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); @@ -104,6 +105,7 @@ TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) { TEST_F(LogFlagsTest, BacktraceAtNonsense) { absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); @@ -117,6 +119,7 @@ TEST_F(LogFlagsTest, BacktraceAtWrongFile) { const int log_line = __LINE__ + 1; auto do_log = [] { LOG(INFO) << "hello world"; }; absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); @@ -131,6 +134,7 @@ TEST_F(LogFlagsTest, BacktraceAtWrongLine) { const int log_line = __LINE__ + 1; auto do_log = [] { LOG(INFO) << "hello world"; }; absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); @@ -145,6 +149,7 @@ TEST_F(LogFlagsTest, BacktraceAtWholeFilename) { const int log_line = __LINE__ + 1; auto do_log = [] { LOG(INFO) << "hello world"; }; absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); @@ -158,6 +163,7 @@ TEST_F(LogFlagsTest, BacktraceAtNonmatchingSuffix) { const int log_line = __LINE__ + 1; auto do_log = [] { LOG(INFO) << "hello world"; }; absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:"))))); @@ -172,6 +178,7 @@ TEST_F(LogFlagsTest, LogsBacktrace) { const int log_line = __LINE__ + 1; auto do_log = [] { LOG(INFO) << "hello world"; }; absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); testing::InSequence seq; EXPECT_CALL(test_sink, Send(TextMessage(HasSubstr("(stacktrace:")))); diff --git a/absl/log/internal/BUILD.bazel b/absl/log/internal/BUILD.bazel index 44ec71bb437..bb20a95fb1d 100644 --- a/absl/log/internal/BUILD.bazel +++ b/absl/log/internal/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -56,7 +59,6 @@ cc_library( deps = [ ":check_op", ":conditions", - ":log_message", ":strip", "//absl/base:core_headers", ], @@ -79,6 +81,7 @@ cc_library( "//absl/base:nullability", "//absl/debugging:leak_check", "//absl/strings", + "//absl/strings:has_ostream_operator", ], ) @@ -205,6 +208,7 @@ cc_library( "//absl/log:log_sink_registry", "//absl/memory", "//absl/strings", + "//absl/strings:internal", "//absl/time", "//absl/types:span", ], @@ -218,6 +222,7 @@ cc_library( deps = [ "//absl/base:config", "//absl/strings", + "//absl/strings:internal", "//absl/types:span", ], ) diff --git a/absl/log/internal/append_truncated.h b/absl/log/internal/append_truncated.h index f0e7912c2ac..d420a8b5c6a 100644 --- a/absl/log/internal/append_truncated.h +++ b/absl/log/internal/append_truncated.h @@ -17,8 +17,10 @@ #include #include +#include #include "absl/base/config.h" +#include "absl/strings/internal/utf8.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" @@ -33,6 +35,32 @@ inline size_t AppendTruncated(absl::string_view src, absl::Span &dst) { dst.remove_prefix(src.size()); return src.size(); } +// Likewise, but it also takes a wide character string and transforms it into a +// UTF-8 encoded byte string regardless of the current locale. +// - On platforms where `wchar_t` is 2 bytes (e.g., Windows), the input is +// treated as UTF-16. +// - On platforms where `wchar_t` is 4 bytes (e.g., Linux, macOS), the input +// is treated as UTF-32. +inline size_t AppendTruncated(std::wstring_view src, absl::Span &dst) { + absl::strings_internal::ShiftState state; + size_t total_bytes_written = 0; + for (const wchar_t wc : src) { + // If the destination buffer might not be large enough to write the next + // character, stop. + if (dst.size() < absl::strings_internal::kMaxEncodedUTF8Size) break; + size_t bytes_written = + absl::strings_internal::WideToUtf8(wc, dst.data(), state); + if (bytes_written == static_cast(-1)) { + // Invalid character. Encode REPLACEMENT CHARACTER (U+FFFD) instead. + constexpr wchar_t kReplacementCharacter = L'\uFFFD'; + bytes_written = absl::strings_internal::WideToUtf8(kReplacementCharacter, + dst.data(), state); + } + dst.remove_prefix(bytes_written); + total_bytes_written += bytes_written; + } + return total_bytes_written; +} // Likewise, but `n` copies of `c`. inline size_t AppendTruncated(char c, size_t n, absl::Span &dst) { if (n > dst.size()) n = dst.size(); diff --git a/absl/log/internal/check_impl.h b/absl/log/internal/check_impl.h index 00f25f80ba2..dc2e214de74 100644 --- a/absl/log/internal/check_impl.h +++ b/absl/log/internal/check_impl.h @@ -12,13 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +// SKIP_ABSL_INLINE_NAMESPACE_CHECK + #ifndef ABSL_LOG_INTERNAL_CHECK_IMPL_H_ #define ABSL_LOG_INTERNAL_CHECK_IMPL_H_ #include "absl/base/optimization.h" #include "absl/log/internal/check_op.h" #include "absl/log/internal/conditions.h" -#include "absl/log/internal/log_message.h" #include "absl/log/internal/strip.h" // CHECK diff --git a/absl/log/internal/check_op.cc b/absl/log/internal/check_op.cc index 23db63bf101..be8ceaf4caa 100644 --- a/absl/log/internal/check_op.cc +++ b/absl/log/internal/check_op.cc @@ -101,6 +101,10 @@ void MakeCheckOpValueString(std::ostream& os, const void* p) { } } +std::ostream& operator<<(std::ostream& os, UnprintableWrapper) { + return os << "UNPRINTABLE"; +} + // Helper functions for string comparisons. #define DEFINE_CHECK_STROP_IMPL(name, func, expected) \ const char* absl_nullable Check##func##expected##Impl( \ diff --git a/absl/log/internal/check_op.h b/absl/log/internal/check_op.h index dc7d19e9a13..532b37cae10 100644 --- a/absl/log/internal/check_op.h +++ b/absl/log/internal/check_op.h @@ -40,15 +40,16 @@ #include "absl/log/internal/nullstream.h" #include "absl/log/internal/strip.h" #include "absl/strings/has_absl_stringify.h" +#include "absl/strings/has_ostream_operator.h" #include "absl/strings/string_view.h" // `ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL` wraps string literals that // should be stripped when `ABSL_MIN_LOG_LEVEL` exceeds `kFatal`. #ifdef ABSL_MIN_LOG_LEVEL -#define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal) \ - (::absl::LogSeverity::kFatal >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \ - ? (literal) \ +#define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal) \ + (::absl::LogSeverity::kFatal >= \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL) \ + ? (literal) \ : "") #else #define ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(literal) (literal) @@ -133,41 +134,39 @@ // string literal and abort without doing any streaming. We don't need to // strip the call to stringify the non-ok `Status` as long as we don't log it; // dropping the `Status`'s message text is out of scope. -#define ABSL_LOG_INTERNAL_CHECK_OK(val, val_text) \ - for (::std::pair \ - absl_log_internal_check_ok_goo; \ - absl_log_internal_check_ok_goo.first = \ - ::absl::log_internal::AsStatus(val), \ - absl_log_internal_check_ok_goo.second = \ - ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok()) \ - ? nullptr \ - : ::absl::status_internal::MakeCheckFailString( \ - absl_log_internal_check_ok_goo.first, \ - ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val_text \ - " is OK")), \ - !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());) \ - ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true) \ - ABSL_LOG_INTERNAL_CHECK(::absl::implicit_cast( \ - absl_log_internal_check_ok_goo.second)) \ +#define ABSL_LOG_INTERNAL_CHECK_OK(val, val_text) \ + for (::std::pair \ + absl_log_internal_check_ok_goo; \ + absl_log_internal_check_ok_goo.first = \ + ::absl::log_internal::AsStatus(val), \ + absl_log_internal_check_ok_goo.second = \ + ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok()) \ + ? "" /* Don't use nullptr, to keep the annotation happy */ \ + : ::absl::status_internal::MakeCheckFailString( \ + absl_log_internal_check_ok_goo.first, \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val_text \ + " is OK")), \ + !ABSL_PREDICT_TRUE(absl_log_internal_check_ok_goo.first->ok());) \ + ABSL_LOG_INTERNAL_CONDITION_FATAL(STATELESS, true) \ + ABSL_LOG_INTERNAL_CHECK(absl_log_internal_check_ok_goo.second) \ .InternalStream() -#define ABSL_LOG_INTERNAL_QCHECK_OK(val, val_text) \ - for (::std::pair \ - absl_log_internal_qcheck_ok_goo; \ - absl_log_internal_qcheck_ok_goo.first = \ - ::absl::log_internal::AsStatus(val), \ - absl_log_internal_qcheck_ok_goo.second = \ - ABSL_PREDICT_TRUE(absl_log_internal_qcheck_ok_goo.first->ok()) \ - ? nullptr \ - : ::absl::status_internal::MakeCheckFailString( \ - absl_log_internal_qcheck_ok_goo.first, \ - ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val_text \ - " is OK")), \ - !ABSL_PREDICT_TRUE(absl_log_internal_qcheck_ok_goo.first->ok());) \ - ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true) \ - ABSL_LOG_INTERNAL_QCHECK(::absl::implicit_cast( \ - absl_log_internal_qcheck_ok_goo.second)) \ +#define ABSL_LOG_INTERNAL_QCHECK_OK(val, val_text) \ + for (::std::pair \ + absl_log_internal_qcheck_ok_goo; \ + absl_log_internal_qcheck_ok_goo.first = \ + ::absl::log_internal::AsStatus(val), \ + absl_log_internal_qcheck_ok_goo.second = \ + ABSL_PREDICT_TRUE(absl_log_internal_qcheck_ok_goo.first->ok()) \ + ? "" /* Don't use nullptr, to keep the annotation happy */ \ + : ::absl::status_internal::MakeCheckFailString( \ + absl_log_internal_qcheck_ok_goo.first, \ + ABSL_LOG_INTERNAL_STRIP_STRING_LITERAL(val_text \ + " is OK")), \ + !ABSL_PREDICT_TRUE(absl_log_internal_qcheck_ok_goo.first->ok());) \ + ABSL_LOG_INTERNAL_CONDITION_QFATAL(STATELESS, true) \ + ABSL_LOG_INTERNAL_QCHECK(absl_log_internal_qcheck_ok_goo.second) \ .InternalStream() namespace absl { @@ -224,7 +223,15 @@ inline void MakeCheckOpValueString(std::ostream& os, const T& v) { void MakeCheckOpValueString(std::ostream& os, char v); void MakeCheckOpValueString(std::ostream& os, signed char v); void MakeCheckOpValueString(std::ostream& os, unsigned char v); -void MakeCheckOpValueString(std::ostream& os, const void* p); +void MakeCheckOpValueString(std::ostream& os, const void* absl_nullable p); + +// A wrapper for types that have no operator<<. +struct UnprintableWrapper { + template + explicit UnprintableWrapper(const T&) {} + + friend std::ostream& operator<<(std::ostream& os, UnprintableWrapper); +}; namespace detect_specialization { @@ -266,8 +273,9 @@ float operator<<(std::ostream&, float value); double operator<<(std::ostream&, double value); long double operator<<(std::ostream&, long double value); bool operator<<(std::ostream&, bool value); -const void* operator<<(std::ostream&, const void* value); -const void* operator<<(std::ostream&, std::nullptr_t); +const void* absl_nullable operator<<(std::ostream&, + const void* absl_nullable value); +const void* absl_nullable operator<<(std::ostream&, std::nullptr_t); // These `char` overloads are specified like this in the standard, so we have to // write them exactly the same to ensure the call is ambiguous. @@ -281,13 +289,14 @@ signed char operator<<(std::basic_ostream&, signed char); template unsigned char operator<<(std::basic_ostream&, unsigned char); template -const char* operator<<(std::basic_ostream&, const char*); +const char* absl_nonnull operator<<(std::basic_ostream&, + const char* absl_nonnull); template -const signed char* operator<<(std::basic_ostream&, - const signed char*); +const signed char* absl_nonnull operator<<(std::basic_ostream&, + const signed char* absl_nonnull); template -const unsigned char* operator<<(std::basic_ostream&, - const unsigned char*); +const unsigned char* absl_nonnull operator<<(std::basic_ostream&, + const unsigned char* absl_nonnull); // This overload triggers when the call is not ambiguous. // It means that T is being printed with some overload not on this list. @@ -298,12 +307,11 @@ const T& Detect(int); // This overload triggers when the call is ambiguous. // It means that T is either one from this list or printed as one from this -// list. Eg an enum that decays to `int` for printing. +// list. Eg an unscoped enum that decays to `int` for printing. // We ask the overload set to give us the type we want to convert it to. template -decltype(detect_specialization::operator<<(std::declval(), - std::declval())) -Detect(char); +decltype(detect_specialization::operator<<( + std::declval(), std::declval())) Detect(char); // A sink for AbslStringify which redirects everything to a std::ostream. class StringifySink { @@ -312,7 +320,8 @@ class StringifySink { void Append(absl::string_view text); void Append(size_t length, char ch); - friend void AbslFormatFlush(StringifySink* sink, absl::string_view text); + friend void AbslFormatFlush(StringifySink* absl_nonnull sink, + absl::string_view text); private: std::ostream& os_; @@ -343,6 +352,47 @@ template std::enable_if_t::value, StringifyToStreamWrapper> Detect(...); // Ellipsis has lowest preference when int passed. + +// This overload triggers when T is neither possible to print nor an enum. +template +std::enable_if_t, std::is_enum, + std::is_pointer, std::is_same, + HasOstreamOperator, HasAbslStringify>>, + UnprintableWrapper> +Detect(...); + +// Equivalent to the updated std::underlying_type from C++20, which is no +// longer undefined behavior for non-enum types. +template +struct UnderlyingType {}; + +template +struct UnderlyingType>> { + using type = std::underlying_type_t; +}; +template +using UnderlyingTypeT = typename UnderlyingType::type; + +// This overload triggers when T is a scoped enum that has not defined an output +// stream operator (operator<<) or AbslStringify. It causes the enum value to be +// converted to a type that can be streamed. For consistency with other enums, a +// scoped enum backed by a bool or char is converted to its underlying type, and +// one backed by another integer is converted to (u)int64_t. +template +std::enable_if_t< + std::conjunction_v, + std::negation>, + std::negation>, + std::negation>>, + std::conditional_t, bool> || + std::is_same_v, char> || + std::is_same_v, signed char> || + std::is_same_v, unsigned char>, + UnderlyingTypeT, + std::conditional_t>, + int64_t, uint64_t>>> +Detect(...); } // namespace detect_specialization template @@ -356,10 +406,16 @@ ABSL_ATTRIBUTE_RETURNS_NONNULL const char* absl_nonnull MakeCheckOpString( template const char* absl_nonnull MakeCheckOpString(T1 v1, T2 v2, const char* absl_nonnull exprtext) { - CheckOpMessageBuilder comb(exprtext); - MakeCheckOpValueString(comb.ForVar1(), v1); - MakeCheckOpValueString(comb.ForVar2(), v2); - return comb.NewString(); + if constexpr (std::is_same_v, UnprintableWrapper> && + std::is_same_v, UnprintableWrapper>) { + // No sense printing " (UNPRINTABLE vs. UNPRINTABLE)" + return exprtext; + } else { + CheckOpMessageBuilder comb(exprtext); + MakeCheckOpValueString(comb.ForVar1(), v1); + MakeCheckOpValueString(comb.ForVar2(), v2); + return comb.NewString(); + } } // Add a few commonly used instantiations as extern to reduce size of objects @@ -376,10 +432,12 @@ ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(char); ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(unsigned char); ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const std::string&); ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const absl::string_view&); -ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const char*); -ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const signed char*); -ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const unsigned char*); -ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const char* absl_nonnull); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN( + const signed char* absl_nonnull); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN( + const unsigned char* absl_nonnull); +ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void* absl_nonnull); #undef ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN // `ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT` skips formatting the Check_OP result @@ -388,7 +446,7 @@ ABSL_LOG_INTERNAL_DEFINE_MAKE_CHECK_OP_STRING_EXTERN(const void*); #ifdef ABSL_MIN_LOG_LEVEL #define ABSL_LOG_INTERNAL_CHECK_OP_IMPL_RESULT(U1, U2, v1, v2, exprtext) \ ((::absl::LogSeverity::kFatal >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL)) \ ? MakeCheckOpString(v1, v2, exprtext) \ : "") #else diff --git a/absl/log/internal/conditions.h b/absl/log/internal/conditions.h index 6fb74b142bc..3325a318b17 100644 --- a/absl/log/internal/conditions.h +++ b/absl/log/internal/conditions.h @@ -108,46 +108,49 @@ #ifdef ABSL_MIN_LOG_LEVEL #define ABSL_LOG_INTERNAL_CONDITION_INFO(type, condition) \ ABSL_LOG_INTERNAL_##type##_CONDITION( \ - (condition) && ::absl::LogSeverity::kInfo >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) + (condition) && \ + ::absl::LogSeverity::kInfo >= \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL)) #define ABSL_LOG_INTERNAL_CONDITION_WARNING(type, condition) \ ABSL_LOG_INTERNAL_##type##_CONDITION( \ - (condition) && ::absl::LogSeverity::kWarning >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) + (condition) && \ + ::absl::LogSeverity::kWarning >= \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL)) #define ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) \ ABSL_LOG_INTERNAL_##type##_CONDITION( \ - (condition) && ::absl::LogSeverity::kError >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL)) + (condition) && \ + ::absl::LogSeverity::kError >= \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL)) #define ABSL_LOG_INTERNAL_CONDITION_DO_NOT_SUBMIT(type, condition) \ ABSL_LOG_INTERNAL_CONDITION_ERROR(type, condition) // NOTE: Use ternary operators instead of short-circuiting to mitigate // https://bugs.llvm.org/show_bug.cgi?id=51928. #define ABSL_LOG_INTERNAL_CONDITION_FATAL(type, condition) \ ABSL_LOG_INTERNAL_##type##_CONDITION( \ - ((condition) \ - ? (::absl::LogSeverity::kFatal >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \ - ? true \ - : (::absl::log_internal::AbortQuietly(), false)) \ - : false)) + ((condition) ? (::absl::LogSeverity::kFatal >= \ + static_cast<::absl::LogSeverityAtLeast>( \ + ABSL_MIN_LOG_LEVEL) \ + ? true \ + : (::absl::log_internal::AbortQuietly(), false)) \ + : false)) // NOTE: Use ternary operators instead of short-circuiting to mitigate // https://bugs.llvm.org/show_bug.cgi?id=51928. -#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition) \ - ABSL_LOG_INTERNAL_##type##_CONDITION( \ - ((condition) \ - ? (::absl::LogSeverity::kFatal >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) \ - ? true \ - : (::absl::log_internal::ExitQuietly(), false)) \ - : false)) -#define ABSL_LOG_INTERNAL_CONDITION_DFATAL(type, condition) \ - ABSL_LOG_INTERNAL_##type##_CONDITION( \ - (ABSL_ASSUME(absl::kLogDebugFatal == absl::LogSeverity::kError || \ - absl::kLogDebugFatal == absl::LogSeverity::kFatal), \ - (condition) && \ - (::absl::kLogDebugFatal >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) || \ - (::absl::kLogDebugFatal == ::absl::LogSeverity::kFatal && \ +#define ABSL_LOG_INTERNAL_CONDITION_QFATAL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + ((condition) ? (::absl::LogSeverity::kFatal >= \ + static_cast<::absl::LogSeverityAtLeast>( \ + ABSL_MIN_LOG_LEVEL) \ + ? true \ + : (::absl::log_internal::ExitQuietly(), false)) \ + : false)) +#define ABSL_LOG_INTERNAL_CONDITION_DFATAL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + (ABSL_ASSUME(absl::kLogDebugFatal == absl::LogSeverity::kError || \ + absl::kLogDebugFatal == absl::LogSeverity::kFatal), \ + (condition) && \ + (::absl::kLogDebugFatal >= \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL) || \ + (::absl::kLogDebugFatal == ::absl::LogSeverity::kFatal && \ (::absl::log_internal::AbortQuietly(), false))))) #define ABSL_LOG_INTERNAL_CONDITION_LEVEL(severity) \ @@ -157,13 +160,13 @@ ::absl::NormalizeLogSeverity(severity); \ absl_log_internal_severity_loop; absl_log_internal_severity_loop = 0) \ ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL -#define ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL(type, condition) \ - ABSL_LOG_INTERNAL_##type##_CONDITION(( \ - (condition) && \ - (absl_log_internal_severity >= \ - static_cast<::absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) || \ - (absl_log_internal_severity == ::absl::LogSeverity::kFatal && \ - (::absl::log_internal::AbortQuietly(), false))))) +#define ABSL_LOG_INTERNAL_CONDITION_LEVEL_IMPL(type, condition) \ + ABSL_LOG_INTERNAL_##type##_CONDITION( \ + ((condition) && \ + (absl_log_internal_severity >= \ + static_cast<::absl::LogSeverityAtLeast>(ABSL_MIN_LOG_LEVEL) || \ + (absl_log_internal_severity == ::absl::LogSeverity::kFatal && \ + (::absl::log_internal::AbortQuietly(), false))))) #else // ndef ABSL_MIN_LOG_LEVEL #define ABSL_LOG_INTERNAL_CONDITION_INFO(type, condition) \ ABSL_LOG_INTERNAL_##type##_CONDITION(condition) diff --git a/absl/log/internal/log_message.cc b/absl/log/internal/log_message.cc index aaaaf0357b1..3aed3a2fdfd 100644 --- a/absl/log/internal/log_message.cc +++ b/absl/log/internal/log_message.cc @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "absl/base/attributes.h" @@ -47,12 +48,14 @@ #include "absl/log/internal/globals.h" #include "absl/log/internal/log_format.h" #include "absl/log/internal/log_sink_set.h" +#include "absl/log/internal/nullguard.h" #include "absl/log/internal/proto.h" #include "absl/log/internal/structured_proto.h" #include "absl/log/log_entry.h" #include "absl/log/log_sink.h" #include "absl/log/log_sink_registry.h" #include "absl/memory/memory.h" +#include "absl/strings/internal/utf8.h" #include "absl/strings/string_view.h" #include "absl/time/clock.h" #include "absl/time/time.h" @@ -403,6 +406,34 @@ LogMessage& LogMessage::operator<<(absl::string_view v) { CopyToEncodedBuffer(v); return *this; } + +LogMessage& LogMessage::operator<<(const std::wstring& v) { + CopyToEncodedBuffer(v); + return *this; +} + +LogMessage& LogMessage::operator<<(std::wstring_view v) { + CopyToEncodedBuffer(v); + return *this; +} + +template <> +LogMessage& LogMessage::operator<< ( + const wchar_t* absl_nullable const& v) { + if (v == nullptr) { + CopyToEncodedBuffer( + absl::string_view(kCharNull.data(), kCharNull.size() - 1)); + } else { + CopyToEncodedBuffer(v); + } + return *this; +} + +LogMessage& LogMessage::operator<<(wchar_t v) { + CopyToEncodedBuffer(std::wstring_view(&v, 1)); + return *this; +} + LogMessage& LogMessage::operator<<(std::ostream& (*m)(std::ostream& os)) { OstreamView view(*data_); data_->manipulated << m; @@ -625,6 +656,37 @@ template void LogMessage::CopyToEncodedBuffer( template void LogMessage::CopyToEncodedBuffer< LogMessage::StringType::kNotLiteral>(char ch, size_t num); +template +void LogMessage::CopyToEncodedBuffer(std::wstring_view str) { + auto encoded_remaining_copy = data_->encoded_remaining(); + constexpr uint8_t tag_value = str_type == StringType::kLiteral + ? ValueTag::kStringLiteral + : ValueTag::kString; + size_t max_str_byte_length = + absl::strings_internal::kMaxEncodedUTF8Size * str.length(); + auto value_start = + EncodeMessageStart(EventTag::kValue, + BufferSizeFor(tag_value, WireType::kLengthDelimited) + + max_str_byte_length, + &encoded_remaining_copy); + auto str_start = EncodeMessageStart(tag_value, max_str_byte_length, + &encoded_remaining_copy); + if (str_start.data()) { + log_internal::AppendTruncated(str, encoded_remaining_copy); + EncodeMessageLength(str_start, &encoded_remaining_copy); + EncodeMessageLength(value_start, &encoded_remaining_copy); + data_->encoded_remaining() = encoded_remaining_copy; + } else { + // The field header(s) did not fit; zero `encoded_remaining()` so we don't + // write anything else later. + data_->encoded_remaining().remove_suffix(data_->encoded_remaining().size()); + } +} +template void LogMessage::CopyToEncodedBuffer( + std::wstring_view str); +template void LogMessage::CopyToEncodedBuffer< + LogMessage::StringType::kNotLiteral>(std::wstring_view str); + template void LogMessage::CopyToEncodedBufferWithStructuredProtoField< LogMessage::StringType::kLiteral>(StructuredProtoField field, absl::string_view str); @@ -682,17 +744,13 @@ LogMessageFatal::LogMessageFatal(const char* absl_nonnull file, int line, *this << "Check failed: " << failure_msg << " "; } -LogMessageFatal::~LogMessageFatal() { - FailWithoutStackTrace(); -} +LogMessageFatal::~LogMessageFatal() { FailWithoutStackTrace(); } LogMessageDebugFatal::LogMessageDebugFatal(const char* absl_nonnull file, int line) : LogMessage(file, line, absl::LogSeverity::kFatal) {} -LogMessageDebugFatal::~LogMessageDebugFatal() { - FailWithoutStackTrace(); -} +LogMessageDebugFatal::~LogMessageDebugFatal() { FailWithoutStackTrace(); } LogMessageQuietlyDebugFatal::LogMessageQuietlyDebugFatal( const char* absl_nonnull file, int line) @@ -700,9 +758,7 @@ LogMessageQuietlyDebugFatal::LogMessageQuietlyDebugFatal( SetFailQuietly(); } -LogMessageQuietlyDebugFatal::~LogMessageQuietlyDebugFatal() { - FailQuietly(); -} +LogMessageQuietlyDebugFatal::~LogMessageQuietlyDebugFatal() { FailQuietly(); } LogMessageQuietlyFatal::LogMessageQuietlyFatal(const char* absl_nonnull file, int line) @@ -717,9 +773,7 @@ LogMessageQuietlyFatal::LogMessageQuietlyFatal( *this << "Check failed: " << failure_msg << " "; } -LogMessageQuietlyFatal::~LogMessageQuietlyFatal() { - FailQuietly(); -} +LogMessageQuietlyFatal::~LogMessageQuietlyFatal() { FailQuietly(); } #if defined(_MSC_VER) && !defined(__clang__) #pragma warning(pop) #endif diff --git a/absl/log/internal/log_message.h b/absl/log/internal/log_message.h index e7eff47b406..1aaf05e31f4 100644 --- a/absl/log/internal/log_message.h +++ b/absl/log/internal/log_message.h @@ -27,12 +27,15 @@ #ifndef ABSL_LOG_INTERNAL_LOG_MESSAGE_H_ #define ABSL_LOG_INTERNAL_LOG_MESSAGE_H_ +#include + #include #include #include #include #include #include +#include #include #include "absl/base/attributes.h" @@ -158,6 +161,13 @@ class LogMessage { LogMessage& operator<<(const std::string& v); LogMessage& operator<<(absl::string_view v); + // Wide string overloads (since std::ostream does not provide them). + LogMessage& operator<<(const std::wstring& v); + LogMessage& operator<<(std::wstring_view v); + // `const wchar_t*` is handled by `operator<< `. + LogMessage& operator<<(wchar_t* absl_nullable v); + LogMessage& operator<<(wchar_t v); + // Handle stream manipulators e.g. std::endl. LogMessage& operator<<(std::ostream& (*absl_nonnull m)(std::ostream& os)); LogMessage& operator<<(std::ios_base& (*absl_nonnull m)(std::ios_base& os)); @@ -169,17 +179,20 @@ class LogMessage { // this template for every value of `SIZE` encountered in each source code // file. That significantly increases linker input sizes. Inlining is cheap // because the argument to this overload is almost always a string literal so - // the call to `strlen` can be replaced at compile time. The overload for - // `char[]` below should not be inlined. The compiler typically does not have - // the string at compile time and cannot replace the call to `strlen` so - // inlining it increases the binary size. See the discussion on + // the call to `strlen` can be replaced at compile time. The overloads for + // `char[]`/`wchar_t[]` below should not be inlined. The compiler typically + // does not have the string at compile time and cannot replace the call to + // `strlen` so inlining it increases the binary size. See the discussion on // cl/107527369. template LogMessage& operator<<(const char (&buf)[SIZE]); + template + LogMessage& operator<<(const wchar_t (&buf)[SIZE]); // This prevents non-const `char[]` arrays from looking like literals. template LogMessage& operator<<(char (&buf)[SIZE]) ABSL_ATTRIBUTE_NOINLINE; + // `wchar_t[SIZE]` is handled by `operator<< `. // Types that support `AbslStringify()` are serialized that way. // Types that don't support `AbslStringify()` but do support streaming into a @@ -243,6 +256,8 @@ class LogMessage { void CopyToEncodedBuffer(absl::string_view str) ABSL_ATTRIBUTE_NOINLINE; template void CopyToEncodedBuffer(char ch, size_t num) ABSL_ATTRIBUTE_NOINLINE; + template + void CopyToEncodedBuffer(std::wstring_view str) ABSL_ATTRIBUTE_NOINLINE; // Copies `field` to the encoded buffer, then appends `str` after it // (truncating `str` if necessary to fit). @@ -273,6 +288,22 @@ class LogMessage { absl_nonnull std::unique_ptr data_; }; +// Explicitly specializes the generic operator<< for `const wchar_t*` +// arguments. +// +// This method is used instead of a non-template `const wchar_t*` overload, +// as the latter was found to take precedence over the array template +// (`operator<<(const wchar_t(&)[SIZE])`) when handling string literals. +// This specialization ensures the array template now correctly processes +// literals. +template <> +LogMessage& LogMessage::operator<< ( + const wchar_t* absl_nullable const& v); + +inline LogMessage& LogMessage::operator<<(wchar_t* absl_nullable v) { + return operator<<(const_cast(v)); +} + // Helper class so that `AbslStringify()` can modify the LogMessage. class StringifySink final { public: @@ -317,6 +348,12 @@ LogMessage& LogMessage::operator<<(const char (&buf)[SIZE]) { return *this; } +template +LogMessage& LogMessage::operator<<(const wchar_t (&buf)[SIZE]) { + CopyToEncodedBuffer(buf); + return *this; +} + // Note: the following is declared `ABSL_ATTRIBUTE_NOINLINE` template LogMessage& LogMessage::operator<<(char (&buf)[SIZE]) { @@ -358,6 +395,10 @@ LogMessage::CopyToEncodedBuffer(char ch, size_t num); extern template void LogMessage::CopyToEncodedBuffer< LogMessage::StringType::kNotLiteral>(char ch, size_t num); +extern template void LogMessage::CopyToEncodedBuffer< + LogMessage::StringType::kLiteral>(std::wstring_view str); +extern template void LogMessage::CopyToEncodedBuffer< + LogMessage::StringType::kNotLiteral>(std::wstring_view str); // `LogMessageFatal` ensures the process will exit in failure after logging this // message. diff --git a/absl/log/internal/log_sink_set.cc b/absl/log/internal/log_sink_set.cc index 3d5c69952c2..c4c7e5ff8d0 100644 --- a/absl/log/internal/log_sink_set.cc +++ b/absl/log/internal/log_sink_set.cc @@ -192,7 +192,7 @@ class GlobalLogSinkSet final { absl::log_internal::WriteToStderr( entry.text_message_with_prefix_and_newline(), entry.log_severity()); } else { - absl::ReaderMutexLock global_sinks_lock(&guard_); + absl::ReaderMutexLock global_sinks_lock(guard_); ThreadIsLoggingStatus() = true; // Ensure the "thread is logging" status is reverted upon leaving the // scope even in case of exceptions. @@ -205,7 +205,7 @@ class GlobalLogSinkSet final { void AddLogSink(absl::LogSink* sink) ABSL_LOCKS_EXCLUDED(guard_) { { - absl::WriterMutexLock global_sinks_lock(&guard_); + absl::WriterMutexLock global_sinks_lock(guard_); auto pos = std::find(sinks_.begin(), sinks_.end(), sink); if (pos == sinks_.end()) { sinks_.push_back(sink); @@ -217,7 +217,7 @@ class GlobalLogSinkSet final { void RemoveLogSink(absl::LogSink* sink) ABSL_LOCKS_EXCLUDED(guard_) { { - absl::WriterMutexLock global_sinks_lock(&guard_); + absl::WriterMutexLock global_sinks_lock(guard_); auto pos = std::find(sinks_.begin(), sinks_.end(), sink); if (pos != sinks_.end()) { sinks_.erase(pos); @@ -235,7 +235,7 @@ class GlobalLogSinkSet final { guard_.AssertReaderHeld(); FlushLogSinksLocked(); } else { - absl::ReaderMutexLock global_sinks_lock(&guard_); + absl::ReaderMutexLock global_sinks_lock(guard_); // In case if LogSink::Flush overload decides to log ThreadIsLoggingStatus() = true; // Ensure the "thread is logging" status is reverted upon leaving the diff --git a/absl/log/internal/vlog_config.cc b/absl/log/internal/vlog_config.cc index f7c61bed52a..f70069f5f04 100644 --- a/absl/log/internal/vlog_config.cc +++ b/absl/log/internal/vlog_config.cc @@ -90,16 +90,16 @@ struct VModuleInfo final { // To avoid problems with the heap checker which calls into `VLOG`, `mutex` must // be a `SpinLock` that prevents fiber scheduling instead of a `Mutex`. ABSL_CONST_INIT absl::base_internal::SpinLock mutex( - absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY); + absl::base_internal::SCHEDULE_KERNEL_ONLY); // `GetUpdateSitesMutex()` serializes updates to all of the sites (i.e. those in // `site_list_head`) themselves. -absl::Mutex* GetUpdateSitesMutex() { +absl::Mutex& GetUpdateSitesMutex() { // Chromium requires no global destructors, so we can't use the // absl::kConstInit idiom since absl::Mutex as a non-trivial destructor. static absl::NoDestructor update_sites_mutex ABSL_ACQUIRED_AFTER( mutex); - return update_sites_mutex.get(); + return *update_sites_mutex; } ABSL_CONST_INIT int global_v ABSL_GUARDED_BY(mutex) = 0; @@ -222,7 +222,7 @@ int PrependVModuleLocked(absl::string_view module_pattern, int log_level) } // namespace int VLogLevel(absl::string_view file) ABSL_LOCKS_EXCLUDED(mutex) { - absl::base_internal::SpinLockHolder l(&mutex); + absl::base_internal::SpinLockHolder l(mutex); return VLogLevel(file, vmodule_info, global_v); } @@ -267,7 +267,7 @@ void UpdateVLogSites() ABSL_UNLOCK_FUNCTION(mutex) // have to wait on all updates in order to acquire `mutex` and initialize // themselves. absl::MutexLock ul(GetUpdateSitesMutex()); - mutex.Unlock(); + mutex.unlock(); VLogSite* n = site_list_head.load(std::memory_order_seq_cst); // Because sites are added to the list in the order they are executed, there // tend to be clusters of entries with the same file. @@ -299,7 +299,7 @@ void UpdateVModule(absl::string_view vmodule) if (!absl::SimpleAtoi(glob_level.substr(eq + 1), &level)) continue; glob_levels.emplace_back(glob, level); } - mutex.Lock(); // Unlocked by UpdateVLogSites(). + mutex.lock(); // unlocked by UpdateVLogSites(). get_vmodule_info().clear(); for (const auto& it : glob_levels) { const absl::string_view glob = it.first; @@ -311,10 +311,10 @@ void UpdateVModule(absl::string_view vmodule) int UpdateGlobalVLogLevel(int v) ABSL_LOCKS_EXCLUDED(mutex, GetUpdateSitesMutex()) { - mutex.Lock(); // Unlocked by UpdateVLogSites(). + mutex.lock(); // unlocked by UpdateVLogSites(). const int old_global_v = global_v; if (v == global_v) { - mutex.Unlock(); + mutex.unlock(); return old_global_v; } global_v = v; @@ -324,7 +324,7 @@ int UpdateGlobalVLogLevel(int v) int PrependVModule(absl::string_view module_pattern, int log_level) ABSL_LOCKS_EXCLUDED(mutex, GetUpdateSitesMutex()) { - mutex.Lock(); // Unlocked by UpdateVLogSites(). + mutex.lock(); // unlocked by UpdateVLogSites(). int old_v = PrependVModuleLocked(module_pattern, log_level); UpdateVLogSites(); return old_v; diff --git a/absl/log/log_basic_test_impl.inc b/absl/log/log_basic_test_impl.inc index c4b4e2429e2..3f007dc9cb3 100644 --- a/absl/log/log_basic_test_impl.inc +++ b/absl/log/log_basic_test_impl.inc @@ -94,6 +94,7 @@ TEST_P(BasicLogTest, Info) { absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 1; auto do_log = [] { ABSL_TEST_LOG(INFO) << "hello world"; }; @@ -125,6 +126,7 @@ TEST_P(BasicLogTest, Warning) { absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 1; auto do_log = [] { ABSL_TEST_LOG(WARNING) << "hello world"; }; @@ -156,6 +158,7 @@ TEST_P(BasicLogTest, Error) { absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 1; auto do_log = [] { ABSL_TEST_LOG(ERROR) << "hello world"; }; @@ -187,6 +190,7 @@ TEST_P(BasicLogTest, DoNotSubmit) { absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 1; auto do_log = [] { ABSL_TEST_LOG(DO_NOT_SUBMIT) << "hello world"; }; @@ -233,6 +237,7 @@ TEST_P(BasicLogDeathTest, Fatal) { { absl::ScopedMockLog test_sink( absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -299,6 +304,7 @@ TEST_P(BasicLogDeathTest, QFatal) { { absl::ScopedMockLog test_sink( absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -336,6 +342,7 @@ TEST_P(BasicLogTest, DFatal) { absl::log_internal::ScopedMinLogLevel scoped_min_log_level(GetParam()); absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 1; auto do_log = [] { ABSL_TEST_LOG(DFATAL) << "hello world"; }; @@ -375,6 +382,7 @@ TEST_P(BasicLogDeathTest, DFatal) { { absl::ScopedMockLog test_sink( absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -456,6 +464,7 @@ TEST_P(BasicLogTest, Level) { for (auto severity : {absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, absl::LogSeverity::kError}) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 2; auto do_log = [severity] { @@ -506,6 +515,7 @@ TEST_P(BasicLogDeathTest, Level) { { absl::ScopedMockLog test_sink( absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -567,6 +577,7 @@ TEST_P(BasicLogTest, LevelClampsNegativeValues) { } absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(LogSeverity(Eq(absl::LogSeverity::kInfo)))); @@ -583,6 +594,7 @@ TEST_P(BasicLogTest, LevelClampsLargeValues) { } absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(LogSeverity(Eq(absl::LogSeverity::kError)))); diff --git a/absl/log/log_entry.cc b/absl/log/log_entry.cc new file mode 100644 index 00000000000..358b8f5a7ce --- /dev/null +++ b/absl/log/log_entry.cc @@ -0,0 +1,263 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/log/log_entry.h" + +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/log/internal/proto.h" +#include "absl/strings/escaping.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace { +// message `logging.proto.Event` +enum EventTag : uint8_t { + kFileName = 2, + kFileLine = 3, + kTimeNsecs = 4, + kSeverity = 5, + kThreadId = 6, + kValue = 7, + kSequenceNumber = 9, + kThreadName = 10, +}; + +// message `logging.proto.Value` +enum ValueTag : uint8_t { + kString = 1, + kStringLiteral = 6, +}; + +// enum `logging.proto.Severity` +enum Severity : int { + FINEST = 300, + FINER = 400, + FINE = 500, + VERBOSE_0 = 600, + CONFIG = 700, + INFO = 800, + NOTICE = 850, + WARNING = 900, + ERROR = 950, + SEVERE = 1000, + FATAL = 1100, +}; + +void PrintEscapedRangeTo(const absl::string_view str, + const absl::string_view substr, std::ostream* os) { + const absl::string_view head = + str.substr(0, static_cast(substr.data() - str.data())); + const char old_fill = os->fill(); + const auto old_flags = os->flags(); + *os << std::right + << std::setw(static_cast(absl::CHexEscape(head).size())) << ""; + switch (substr.size()) { + case 0: + *os << "\\"; + break; + case 1: + *os << "^"; + break; + default: + *os << "[" << std::setw(static_cast(absl::CHexEscape(substr).size())) + << std::setfill('-') << ")"; + break; + } + os->fill(old_fill); + os->flags(old_flags); +} +} // namespace +void PrintTo(const LogEntry& entry, std::ostream* os) { + auto text_message_with_prefix_and_newline_and_nul = absl::string_view( + entry.text_message_with_prefix_and_newline_and_nul_.data(), + entry.text_message_with_prefix_and_newline_and_nul_.size()); + *os << "LogEntry {\n" + << " source_filename: \"" << absl::CHexEscape(entry.source_filename()) + << "\"\n" + << " source_basename: \"" << absl::CHexEscape(entry.source_basename()) + << "\"\n" + << " source_line: " << entry.source_line() << "\n" + << " prefix: " << std::boolalpha << entry.prefix() << "\n" + << " log_severity: " << entry.log_severity() << "\n" + << " verbosity: " << entry.verbosity(); + if (entry.verbosity() == absl::LogEntry::kNoVerbosityLevel) { + *os << " (kNoVerbosityLevel)"; + } + *os << "\n" + << " timestamp: " << entry.timestamp() << "\n" + << " tid: " << entry.tid() << "\n" + << " text_message_with_prefix_and_newline_and_nul_: \"" + << absl::CHexEscape(text_message_with_prefix_and_newline_and_nul) + << "\"\n" + << " text_message_with_prefix_and_newline: "; + PrintEscapedRangeTo(text_message_with_prefix_and_newline_and_nul, + entry.text_message_with_prefix_and_newline(), os); + *os << "\n" + << " text_message_with_prefix: "; + PrintEscapedRangeTo(text_message_with_prefix_and_newline_and_nul, + entry.text_message_with_prefix(), os); + *os << "\n" + << " text_message_with_newline: "; + PrintEscapedRangeTo(text_message_with_prefix_and_newline_and_nul, + entry.text_message_with_newline(), os); + *os << "\n" + << " text_message: "; + PrintEscapedRangeTo(text_message_with_prefix_and_newline_and_nul, + entry.text_message(), os); + *os << "\n" + << " text_message_with_prefix_and_newline_c_str: "; + PrintEscapedRangeTo( + text_message_with_prefix_and_newline_and_nul, + // NOLINTNEXTLINE(bugprone-string-constructor) + absl::string_view(entry.text_message_with_prefix_and_newline_c_str(), 0), + os); + *os << "\n" + << " encoded_message (raw): \"" + << absl::CHexEscape(entry.encoded_message()) << "\"\n" + << " encoded_message {\n"; + absl::Span event = entry.encoded_message(); + log_internal::ProtoField field; + while (field.DecodeFrom(&event)) { + switch (field.tag()) { + case EventTag::kFileName: + *os << " file_name: \"" << absl::CHexEscape(field.string_value()) + << "\"\n"; + break; + case EventTag::kFileLine: + *os << " file_line: " << field.int32_value() << "\n"; + break; + case EventTag::kTimeNsecs: + *os << " time_nsecs: " << field.int64_value() << " (" + << absl::FromUnixNanos(field.int64_value()) << ")\n"; + break; + case EventTag::kSeverity: + *os << " severity: " << field.int32_value(); + switch (field.int32_value()) { + case Severity::FINEST: + *os << " (FINEST)"; + break; + case Severity::FINER: + *os << " (FINER)"; + break; + case Severity::FINE: + *os << " (FINE)"; + break; + case Severity::VERBOSE_0: + *os << " (VERBOSE_0)"; + break; + case Severity::CONFIG: + *os << " (CONFIG)"; + break; + case Severity::INFO: + *os << " (INFO)"; + break; + case Severity::NOTICE: + *os << " (NOTICE)"; + break; + case Severity::WARNING: + *os << " (WARNING)"; + break; + case Severity::ERROR: + *os << " (ERROR)"; + break; + case Severity::SEVERE: + *os << " (SEVERE)"; + break; + case Severity::FATAL: + *os << " (FATAL)"; + break; + } + *os << "\n"; + break; + case EventTag::kThreadId: + *os << " thread_id: " << field.int64_value() << "\n"; + break; + case EventTag::kValue: { + *os << " value {\n"; + auto value = field.bytes_value(); + while (field.DecodeFrom(&value)) { + switch (field.tag()) { + case ValueTag::kString: + *os << " str: \"" << absl::CHexEscape(field.string_value()) + << "\"\n"; + break; + case ValueTag::kStringLiteral: + *os << " literal: \"" + << absl::CHexEscape(field.string_value()) << "\"\n"; + break; + default: + *os << " unknown field " << field.tag(); + switch (field.type()) { + case log_internal::WireType::kVarint: + *os << " (VARINT): " << std::hex << std::showbase + << field.uint64_value() << std::dec << "\n"; + break; + case log_internal::WireType::k64Bit: + *os << " (I64): " << std::hex << std::showbase + << field.uint64_value() << std::dec << "\n"; + break; + case log_internal::WireType::kLengthDelimited: + *os << " (LEN): \"" << absl::CHexEscape(field.string_value()) + << "\"\n"; + break; + case log_internal::WireType::k32Bit: + *os << " (I32): " << std::hex << std::showbase + << field.uint32_value() << std::dec << "\n"; + break; + } + break; + } + } + *os << " }\n"; + break; + } + default: + *os << " unknown field " << field.tag(); + switch (field.type()) { + case log_internal::WireType::kVarint: + *os << " (VARINT): " << std::hex << std::showbase + << field.uint64_value() << std::dec << "\n"; + break; + case log_internal::WireType::k64Bit: + *os << " (I64): " << std::hex << std::showbase + << field.uint64_value() << std::dec << "\n"; + break; + case log_internal::WireType::kLengthDelimited: + *os << " (LEN): \"" << absl::CHexEscape(field.string_value()) + << "\"\n"; + break; + case log_internal::WireType::k32Bit: + *os << " (I32): " << std::hex << std::showbase + << field.uint32_value() << std::dec << "\n"; + break; + } + break; + } + } + *os << " }\n" + << " stacktrace: \"" << absl::CHexEscape(entry.stacktrace()) << "\"\n" + << "}"; +} + +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/log/log_entry.h b/absl/log/log_entry.h index 7a55dfe29e3..c56685629cb 100644 --- a/absl/log/log_entry.h +++ b/absl/log/log_entry.h @@ -25,6 +25,7 @@ #define ABSL_LOG_LOG_ENTRY_H_ #include +#include #include #include "absl/base/attributes.h" @@ -213,6 +214,7 @@ class LogEntry final { friend class log_internal::LogEntryTestPeer; friend class log_internal::LogMessage; + friend void PrintTo(const absl::LogEntry& entry, std::ostream* os); }; ABSL_NAMESPACE_END diff --git a/absl/log/log_format_test.cc b/absl/log/log_format_test.cc index ecd69683009..9f1cc6b0e37 100644 --- a/absl/log/log_format_test.cc +++ b/absl/log/log_format_test.cc @@ -15,12 +15,14 @@ #include +#include #include #include #include #include #include #include +#include #include #ifdef __ANDROID__ @@ -28,6 +30,7 @@ #endif #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "absl/base/config.h" #include "absl/log/check.h" #include "absl/log/internal/test_matchers.h" #include "absl/log/log.h" @@ -44,6 +47,7 @@ using ::absl::log_internal::MatchesOstream; using ::absl::log_internal::RawEncodedMessage; using ::absl::log_internal::TextMessage; using ::absl::log_internal::TextPrefix; +using ::testing::_; using ::testing::AllOf; using ::testing::AnyOf; using ::testing::Each; @@ -69,6 +73,7 @@ std::ostringstream ComparisonStream() { TEST(LogFormatTest, NoMessage) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int log_line = __LINE__ + 1; auto do_log = [] { LOG(INFO); }; @@ -91,6 +96,7 @@ TYPED_TEST_SUITE(CharLogFormatTest, CharTypes); TYPED_TEST(CharLogFormatTest, Printable) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = 'x'; auto comparison_stream = ComparisonStream(); @@ -108,6 +114,7 @@ TYPED_TEST(CharLogFormatTest, Printable) { TYPED_TEST(CharLogFormatTest, Unprintable) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); constexpr auto value = static_cast(0xeeu); auto comparison_stream = ComparisonStream(); @@ -124,6 +131,35 @@ TYPED_TEST(CharLogFormatTest, Unprintable) { LOG(INFO) << value; } +TEST(WideCharLogFormatTest, Printable) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("€")), + ENCODED_MESSAGE(HasValues( + ElementsAre(ValueWithStr(Eq("€")))))))); + + test_sink.StartCapturingLogs(); + const wchar_t value = L'\u20AC'; + LOG(INFO) << value; +} + +TEST(WideCharLogFormatTest, Unprintable) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + // Using NEL (Next Line) Unicode character (U+0085). + // It is encoded as "\xC2\x85" in UTF-8. + constexpr wchar_t wide_value = L'\u0085'; + constexpr char value[] = "\xC2\x85"; + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << wide_value; +} + template class UnsignedIntLogFormatTest : public testing::Test {}; using UnsignedIntTypes = Types(224); auto comparison_stream = ComparisonStream(); @@ -292,6 +335,7 @@ TYPED_TEST(UnsignedEnumLogFormatTest, Positive) { TYPED_TEST(UnsignedEnumLogFormatTest, BitfieldPositive) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const struct { TypeParam bits : 6; @@ -334,6 +378,7 @@ TYPED_TEST_SUITE(SignedEnumLogFormatTest, SignedEnumTypes); TYPED_TEST(SignedEnumLogFormatTest, Positive) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = static_cast(224); auto comparison_stream = ComparisonStream(); @@ -352,6 +397,7 @@ TYPED_TEST(SignedEnumLogFormatTest, Positive) { TYPED_TEST(SignedEnumLogFormatTest, Negative) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = static_cast(-112); auto comparison_stream = ComparisonStream(); @@ -370,6 +416,7 @@ TYPED_TEST(SignedEnumLogFormatTest, Negative) { TYPED_TEST(SignedEnumLogFormatTest, BitfieldPositive) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const struct { TypeParam bits : 6; @@ -389,6 +436,7 @@ TYPED_TEST(SignedEnumLogFormatTest, BitfieldPositive) { TYPED_TEST(SignedEnumLogFormatTest, BitfieldNegative) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const struct { TypeParam bits : 6; @@ -410,6 +458,7 @@ TYPED_TEST(SignedEnumLogFormatTest, BitfieldNegative) { TEST(FloatLogFormatTest, Positive) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const float value = 6.02e23f; auto comparison_stream = ComparisonStream(); @@ -427,6 +476,7 @@ TEST(FloatLogFormatTest, Positive) { TEST(FloatLogFormatTest, Negative) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const float value = -6.02e23f; auto comparison_stream = ComparisonStream(); @@ -444,6 +494,7 @@ TEST(FloatLogFormatTest, Negative) { TEST(FloatLogFormatTest, NegativeExponent) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const float value = 6.02e-23f; auto comparison_stream = ComparisonStream(); @@ -461,6 +512,7 @@ TEST(FloatLogFormatTest, NegativeExponent) { TEST(DoubleLogFormatTest, Positive) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 6.02e23; auto comparison_stream = ComparisonStream(); @@ -478,6 +530,7 @@ TEST(DoubleLogFormatTest, Positive) { TEST(DoubleLogFormatTest, Negative) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = -6.02e23; auto comparison_stream = ComparisonStream(); @@ -495,6 +548,7 @@ TEST(DoubleLogFormatTest, Negative) { TEST(DoubleLogFormatTest, NegativeExponent) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 6.02e-23; auto comparison_stream = ComparisonStream(); @@ -517,6 +571,7 @@ TYPED_TEST_SUITE(FloatingPointLogFormatTest, FloatingPointTypes); TYPED_TEST(FloatingPointLogFormatTest, Zero) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = 0.0; auto comparison_stream = ComparisonStream(); @@ -534,6 +589,7 @@ TYPED_TEST(FloatingPointLogFormatTest, Zero) { TYPED_TEST(FloatingPointLogFormatTest, Integer) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = 1.0; auto comparison_stream = ComparisonStream(); @@ -551,6 +607,7 @@ TYPED_TEST(FloatingPointLogFormatTest, Integer) { TYPED_TEST(FloatingPointLogFormatTest, Infinity) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = std::numeric_limits::infinity(); auto comparison_stream = ComparisonStream(); @@ -569,6 +626,7 @@ TYPED_TEST(FloatingPointLogFormatTest, Infinity) { TYPED_TEST(FloatingPointLogFormatTest, NegativeInfinity) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = -std::numeric_limits::infinity(); auto comparison_stream = ComparisonStream(); @@ -587,6 +645,7 @@ TYPED_TEST(FloatingPointLogFormatTest, NegativeInfinity) { TYPED_TEST(FloatingPointLogFormatTest, NaN) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = std::numeric_limits::quiet_NaN(); auto comparison_stream = ComparisonStream(); @@ -604,6 +663,7 @@ TYPED_TEST(FloatingPointLogFormatTest, NaN) { TYPED_TEST(FloatingPointLogFormatTest, NegativeNaN) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = std::copysign(std::numeric_limits::quiet_NaN(), -1.0); @@ -635,11 +695,12 @@ TYPED_TEST(FloatingPointLogFormatTest, NegativeNaN) { template class VoidPtrLogFormatTest : public testing::Test {}; -using VoidPtrTypes = Types; +using VoidPtrTypes = Types; TYPED_TEST_SUITE(VoidPtrLogFormatTest, VoidPtrTypes); TYPED_TEST(VoidPtrLogFormatTest, Null) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = nullptr; auto comparison_stream = ComparisonStream(); @@ -657,6 +718,7 @@ TYPED_TEST(VoidPtrLogFormatTest, Null) { TYPED_TEST(VoidPtrLogFormatTest, NonNull) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = reinterpret_cast(0xdeadbeefULL); auto comparison_stream = ComparisonStream(); @@ -676,15 +738,15 @@ TYPED_TEST(VoidPtrLogFormatTest, NonNull) { template class VolatilePtrLogFormatTest : public testing::Test {}; -using VolatilePtrTypes = - Types; +using VolatilePtrTypes = Types< + volatile void*, const volatile void*, volatile char*, const volatile char*, + volatile signed char*, const volatile signed char*, volatile unsigned char*, + const volatile unsigned char*, volatile wchar_t*, const volatile wchar_t*>; TYPED_TEST_SUITE(VolatilePtrLogFormatTest, VolatilePtrTypes); TYPED_TEST(VolatilePtrLogFormatTest, Null) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = nullptr; auto comparison_stream = ComparisonStream(); @@ -712,6 +774,7 @@ TYPED_TEST(VolatilePtrLogFormatTest, Null) { TYPED_TEST(VolatilePtrLogFormatTest, NonNull) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const TypeParam value = reinterpret_cast(0xdeadbeefLL); auto comparison_stream = ComparisonStream(); @@ -747,6 +810,7 @@ TYPED_TEST_SUITE(CharPtrLogFormatTest, CharPtrTypes); TYPED_TEST(CharPtrLogFormatTest, Null) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); // Streaming `([cv] char *)nullptr` into a `std::ostream` is UB, and some C++ // standard library implementations choose to crash. We take measures to log @@ -767,6 +831,7 @@ TYPED_TEST(CharPtrLogFormatTest, Null) { TYPED_TEST(CharPtrLogFormatTest, NonNull) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); TypeParam data[] = {'v', 'a', 'l', 'u', 'e', '\0'}; TypeParam* const value = data; @@ -784,8 +849,43 @@ TYPED_TEST(CharPtrLogFormatTest, NonNull) { LOG(INFO) << value; } +template +class WideCharPtrLogFormatTest : public testing::Test {}; +using WideCharPtrTypes = Types; +TYPED_TEST_SUITE(WideCharPtrLogFormatTest, WideCharPtrTypes); + +TYPED_TEST(WideCharPtrLogFormatTest, Null) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam* const value = nullptr; + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("(null)")), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq("(null)")))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(WideCharPtrLogFormatTest, NonNull) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam data[] = {'v', 'a', 'l', 'u', 'e', '\0'}; + TypeParam* const value = data; + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("value")), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq("value")))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + TEST(BoolLogFormatTest, True) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const bool value = true; auto comparison_stream = ComparisonStream(); @@ -804,6 +904,7 @@ TEST(BoolLogFormatTest, True) { TEST(BoolLogFormatTest, False) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const bool value = false; auto comparison_stream = ComparisonStream(); @@ -822,6 +923,7 @@ TEST(BoolLogFormatTest, False) { TEST(LogFormatTest, StringLiteral) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); auto comparison_stream = ComparisonStream(); comparison_stream << "value"; @@ -836,8 +938,21 @@ TEST(LogFormatTest, StringLiteral) { LOG(INFO) << "value"; } +TEST(LogFormatTest, WideStringLiteral) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("value")), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithLiteral(Eq("value")))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << L"value"; +} + TEST(LogFormatTest, CharArray) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); char value[] = "value"; auto comparison_stream = ComparisonStream(); @@ -854,6 +969,203 @@ TEST(LogFormatTest, CharArray) { LOG(INFO) << value; } +TEST(LogFormatTest, WideCharArray) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + wchar_t value[] = L"value"; + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("value")), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq("value")))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +// Comprehensive test string for validating wchar_t to UTF-8 conversion. +// See details in absl/strings/internal/utf8_test.cc. +// +// clang-format off +#define ABSL_LOG_INTERNAL_WIDE_LITERAL L"Holá €1 你好 שָׁלוֹם 👍🏻🇺🇸👩‍❤️‍💋‍👨 中" +#define ABSL_LOG_INTERNAL_UTF8_LITERAL u8"Holá €1 你好 שָׁלוֹם 👍🏻🇺🇸👩‍❤️‍💋‍👨 中" +// clang-format on + +absl::string_view GetUtf8TestString() { + // `u8""` forces UTF-8 encoding; MSVC will default to e.g. CP1252 (and warn) + // without it. However, the resulting character type differs between pre-C++20 + // (`char`) and C++20 (`char8_t`). So we reinterpret_cast to `char*` and wrap + // it in a `string_view`. + static const absl::string_view kUtf8TestString( + reinterpret_cast(ABSL_LOG_INTERNAL_UTF8_LITERAL), + sizeof(ABSL_LOG_INTERNAL_UTF8_LITERAL) - 1); + return kUtf8TestString; +} + +template +class WideStringLogFormatTest : public testing::Test {}; +using StringTypes = + Types; +TYPED_TEST_SUITE(WideStringLogFormatTest, StringTypes); + +TYPED_TEST(WideStringLogFormatTest, NonLiterals) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = ABSL_LOG_INTERNAL_WIDE_LITERAL; + absl::string_view utf8_value = GetUtf8TestString(); + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(WideStringLogFormatTest, StringView) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + std::wstring_view value = ABSL_LOG_INTERNAL_WIDE_LITERAL; + absl::string_view utf8_value = GetUtf8TestString(); + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(WideStringLogFormatTest, Literal) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + absl::string_view utf8_value = GetUtf8TestString(); + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithLiteral(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << ABSL_LOG_INTERNAL_WIDE_LITERAL; +} + +#undef ABSL_LOG_INTERNAL_WIDE_LITERAL +#undef ABSL_LOG_INTERNAL_UTF8_LITERAL + +TYPED_TEST(WideStringLogFormatTest, IsolatedLowSurrogatesAreReplaced) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = L"AAA \xDC00 BBB"; + // NOLINTNEXTLINE(readability/utf8) + absl::string_view utf8_value = "AAA � BBB"; + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(WideStringLogFormatTest, + DISABLED_IsolatedHighSurrogatesAreReplaced) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = L"AAA \xD800 BBB"; + // NOLINTNEXTLINE(readability/utf8) + absl::string_view utf8_value = "AAA � BBB"; + // Currently, this is "AAA \xF0\x90 BBB". + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(WideStringLogFormatTest, + DISABLED_ConsecutiveHighSurrogatesAreReplaced) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = L"AAA \xD800\xD800 BBB"; + // NOLINTNEXTLINE(readability/utf8) + absl::string_view utf8_value = "AAA �� BBB"; + // Currently, this is "AAA \xF0\x90\xF0\x90 BBB". + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(WideStringLogFormatTest, + DISABLED_HighHighLowSurrogateSequencesAreReplaced) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = L"AAA \xD800\xD800\xDC00 BBB"; + // NOLINTNEXTLINE(readability/utf8) + absl::string_view utf8_value = "AAA �𐀀 BBB"; + // Currently, this is "AAA \xF0\x90𐀀 BBB". + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(WideStringLogFormatTest, + DISABLED_TrailingHighSurrogatesAreReplaced) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = L"AAA \xD800"; + // NOLINTNEXTLINE(readability/utf8) + absl::string_view utf8_value = "AAA �"; + // Currently, this is "AAA \xF0\x90". + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq(utf8_value)), + ENCODED_MESSAGE(HasValues(ElementsAre( + ValueWithStr(Eq(utf8_value)))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TYPED_TEST(WideStringLogFormatTest, EmptyWideString) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + TypeParam value = L""; + + EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("")), + ENCODED_MESSAGE(HasValues( + ElementsAre(ValueWithStr(Eq("")))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << value; +} + +TEST(WideStringLogFormatTest, MixedNarrowAndWideStrings) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + + EXPECT_CALL(test_sink, Log(_, _, "1234")); + + test_sink.StartCapturingLogs(); + LOG(INFO) << "1" << L"2" << "3" << L"4"; +} + class CustomClass {}; std::ostream& operator<<(std::ostream& os, const CustomClass&) { return os << "CustomClass{}"; @@ -861,6 +1173,7 @@ std::ostream& operator<<(std::ostream& os, const CustomClass&) { TEST(LogFormatTest, Custom) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); CustomClass value; auto comparison_stream = ComparisonStream(); @@ -887,6 +1200,7 @@ std::ostream& operator<<(std::ostream& os, const CustomClassNonCopyable&) { TEST(LogFormatTest, CustomNonCopyable) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); CustomClassNonCopyable value; auto comparison_stream = ComparisonStream(); @@ -914,6 +1228,7 @@ struct Point { TEST(LogFormatTest, AbslStringifyExample) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); Point p; @@ -945,6 +1260,7 @@ ABSL_ATTRIBUTE_UNUSED std::ostream& operator<<( TEST(LogFormatTest, CustomWithAbslStringifyAndOstream) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); PointWithAbslStringifiyAndOstream p; @@ -968,6 +1284,7 @@ struct PointStreamsNothing { TEST(LogFormatTest, AbslStringifyStreamsNothing) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); PointStreamsNothing p; @@ -994,6 +1311,7 @@ struct PointMultipleAppend { TEST(LogFormatTest, AbslStringifyMultipleAppend) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); PointMultipleAppend p; @@ -1009,6 +1327,7 @@ TEST(LogFormatTest, AbslStringifyMultipleAppend) { TEST(ManipulatorLogFormatTest, BoolAlphaTrue) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const bool value = true; auto comparison_stream = ComparisonStream(); @@ -1033,6 +1352,7 @@ TEST(ManipulatorLogFormatTest, BoolAlphaTrue) { TEST(ManipulatorLogFormatTest, BoolAlphaFalse) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const bool value = false; auto comparison_stream = ComparisonStream(); @@ -1057,6 +1377,7 @@ TEST(ManipulatorLogFormatTest, BoolAlphaFalse) { TEST(ManipulatorLogFormatTest, ShowPoint) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 77.0; auto comparison_stream = ComparisonStream(); @@ -1081,6 +1402,7 @@ TEST(ManipulatorLogFormatTest, ShowPoint) { TEST(ManipulatorLogFormatTest, ShowPos) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 77; auto comparison_stream = ComparisonStream(); @@ -1104,6 +1426,7 @@ TEST(ManipulatorLogFormatTest, ShowPos) { TEST(ManipulatorLogFormatTest, UppercaseFloat) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 7.7e7; auto comparison_stream = ComparisonStream(); @@ -1128,6 +1451,7 @@ TEST(ManipulatorLogFormatTest, UppercaseFloat) { TEST(ManipulatorLogFormatTest, Hex) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 0x77; auto comparison_stream = ComparisonStream(); @@ -1145,6 +1469,7 @@ TEST(ManipulatorLogFormatTest, Hex) { TEST(ManipulatorLogFormatTest, Oct) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 077; auto comparison_stream = ComparisonStream(); @@ -1163,6 +1488,7 @@ TEST(ManipulatorLogFormatTest, Oct) { TEST(ManipulatorLogFormatTest, Dec) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 77; auto comparison_stream = ComparisonStream(); @@ -1180,6 +1506,7 @@ TEST(ManipulatorLogFormatTest, Dec) { TEST(ManipulatorLogFormatTest, ShowbaseHex) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 0x77; auto comparison_stream = ComparisonStream(); @@ -1206,6 +1533,7 @@ TEST(ManipulatorLogFormatTest, ShowbaseHex) { TEST(ManipulatorLogFormatTest, ShowbaseOct) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 077; auto comparison_stream = ComparisonStream(); @@ -1231,6 +1559,7 @@ TEST(ManipulatorLogFormatTest, ShowbaseOct) { TEST(ManipulatorLogFormatTest, UppercaseHex) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 0xbeef; auto comparison_stream = ComparisonStream(); @@ -1258,6 +1587,7 @@ TEST(ManipulatorLogFormatTest, UppercaseHex) { TEST(ManipulatorLogFormatTest, FixedFloat) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 7.7e7; auto comparison_stream = ComparisonStream(); @@ -1275,6 +1605,7 @@ TEST(ManipulatorLogFormatTest, FixedFloat) { TEST(ManipulatorLogFormatTest, ScientificFloat) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 7.7e7; auto comparison_stream = ComparisonStream(); @@ -1298,6 +1629,7 @@ TEST(ManipulatorLogFormatTest, ScientificFloat) { #else TEST(ManipulatorLogFormatTest, FixedAndScientificFloat) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 7.7e7; auto comparison_stream = ComparisonStream(); @@ -1331,6 +1663,7 @@ TEST(ManipulatorLogFormatTest, FixedAndScientificFloat) { #else TEST(ManipulatorLogFormatTest, HexfloatFloat) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 7.7e7; auto comparison_stream = ComparisonStream(); @@ -1352,6 +1685,7 @@ TEST(ManipulatorLogFormatTest, HexfloatFloat) { TEST(ManipulatorLogFormatTest, DefaultFloatFloat) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 7.7e7; auto comparison_stream = ComparisonStream(); @@ -1369,6 +1703,7 @@ TEST(ManipulatorLogFormatTest, DefaultFloatFloat) { TEST(ManipulatorLogFormatTest, Ends) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); auto comparison_stream = ComparisonStream(); comparison_stream << std::ends; @@ -1385,6 +1720,7 @@ TEST(ManipulatorLogFormatTest, Ends) { TEST(ManipulatorLogFormatTest, Endl) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); auto comparison_stream = ComparisonStream(); comparison_stream << std::endl; @@ -1402,6 +1738,7 @@ TEST(ManipulatorLogFormatTest, Endl) { TEST(ManipulatorLogFormatTest, SetIosFlags) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 0x77; auto comparison_stream = ComparisonStream(); @@ -1431,6 +1768,7 @@ TEST(ManipulatorLogFormatTest, SetIosFlags) { TEST(ManipulatorLogFormatTest, SetBase) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 0x77; auto comparison_stream = ComparisonStream(); @@ -1455,6 +1793,7 @@ TEST(ManipulatorLogFormatTest, SetBase) { TEST(ManipulatorLogFormatTest, SetPrecision) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 6.022140857e23; auto comparison_stream = ComparisonStream(); @@ -1476,6 +1815,7 @@ TEST(ManipulatorLogFormatTest, SetPrecision) { TEST(ManipulatorLogFormatTest, SetPrecisionOverflow) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const double value = 6.022140857e23; auto comparison_stream = ComparisonStream(); @@ -1493,6 +1833,7 @@ TEST(ManipulatorLogFormatTest, SetPrecisionOverflow) { TEST(ManipulatorLogFormatTest, SetW) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 77; auto comparison_stream = ComparisonStream(); @@ -1514,6 +1855,7 @@ TEST(ManipulatorLogFormatTest, SetW) { TEST(ManipulatorLogFormatTest, Left) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = -77; auto comparison_stream = ComparisonStream(); @@ -1531,6 +1873,7 @@ TEST(ManipulatorLogFormatTest, Left) { TEST(ManipulatorLogFormatTest, Right) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = -77; auto comparison_stream = ComparisonStream(); @@ -1548,6 +1891,7 @@ TEST(ManipulatorLogFormatTest, Right) { TEST(ManipulatorLogFormatTest, Internal) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = -77; auto comparison_stream = ComparisonStream(); @@ -1565,6 +1909,7 @@ TEST(ManipulatorLogFormatTest, Internal) { TEST(ManipulatorLogFormatTest, SetFill) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); const int value = 77; auto comparison_stream = ComparisonStream(); @@ -1591,6 +1936,7 @@ std::ostream& operator<<(std::ostream& os, const FromCustomClass&) { TEST(ManipulatorLogFormatTest, FromCustom) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); FromCustomClass value; auto comparison_stream = ComparisonStream(); @@ -1613,6 +1959,7 @@ std::ostream& operator<<(std::ostream& os, const StreamsNothing&) { return os; } TEST(ManipulatorLogFormatTest, CustomClassStreamsNothing) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); StreamsNothing value; auto comparison_stream = ComparisonStream(); @@ -1640,6 +1987,7 @@ struct PointPercentV { TEST(ManipulatorLogFormatTest, IOManipsDoNotAffectAbslStringify) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); PointPercentV p; @@ -1655,6 +2003,7 @@ TEST(ManipulatorLogFormatTest, IOManipsDoNotAffectAbslStringify) { TEST(StructuredLoggingOverflowTest, TruncatesStrings) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); // This message is too long and should be truncated to some unspecified size // no greater than the buffer size but not too much less either. It should be @@ -1675,6 +2024,30 @@ TEST(StructuredLoggingOverflowTest, TruncatesStrings) { LOG(INFO) << std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x'); } +TEST(StructuredLoggingOverflowTest, TruncatesWideStrings) { + absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); + + // This message is too long and should be truncated to some unspecified size + // no greater than the buffer size but not too much less either. It should be + // truncated rather than discarded. + EXPECT_CALL( + test_sink, + Send(AllOf( + TextMessage(AllOf( + SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256), + Le(absl::log_internal::kLogMessageBufferSize))), + Each(Eq('x')))), + ENCODED_MESSAGE(HasOneStrThat(AllOf( + SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256), + Le(absl::log_internal::kLogMessageBufferSize))), + Each(Eq('x')))))))); + + test_sink.StartCapturingLogs(); + LOG(INFO) << std::wstring(2 * absl::log_internal::kLogMessageBufferSize, + L'x'); +} + struct StringLike { absl::string_view data; }; @@ -1684,6 +2057,7 @@ std::ostream& operator<<(std::ostream& os, StringLike str) { TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperators) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); // This message is too long and should be truncated to some unspecified size // no greater than the buffer size but not too much less either. It should be @@ -1735,6 +2109,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) { // sizes. To put any data in the field we need a fifth byte. { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit), Each(Eq('x'))))), @@ -1745,6 +2120,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 1), Each(Eq('x'))))), @@ -1755,6 +2131,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 2), Each(Eq('x'))))), @@ -1765,6 +2142,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 3), Each(Eq('x'))))), @@ -1775,6 +2153,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrAndOneLiteralThat( AllOf(SizeIs(longest_fit - 4), Each(Eq('x'))), @@ -1787,6 +2166,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrAndOneLiteralThat( @@ -1804,6 +2184,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) { // sizes. To put any data in the field we need a fifth byte. { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit), Each(Eq('x'))))), @@ -1814,6 +2195,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 1), Each(Eq('x'))))), @@ -1825,6 +2207,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 2), Each(Eq('x'))))), @@ -1836,6 +2219,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 3), Each(Eq('x'))))), @@ -1847,6 +2231,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ENCODED_MESSAGE(HasOneStrThat( AllOf(SizeIs(longest_fit - 4), Each(Eq('x'))))), @@ -1860,6 +2245,7 @@ TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) { } { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, Send(AllOf(ENCODED_MESSAGE(HasTwoStrsThat( diff --git a/absl/log/log_modifier_methods_test.cc b/absl/log/log_modifier_methods_test.cc index 4cee0c03c2f..7893557ef9f 100644 --- a/absl/log/log_modifier_methods_test.cc +++ b/absl/log/log_modifier_methods_test.cc @@ -60,6 +60,7 @@ using ::testing::Truly; TEST(TailCallsModifiesTest, AtLocationFileLine) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -89,6 +90,7 @@ TEST(TailCallsModifiesTest, AtLocationFileLineLifetime) { TEST(TailCallsModifiesTest, NoPrefix) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(Prefix(IsFalse()), TextPrefix(IsEmpty()), TextMessageWithPrefix(Eq("hello world"))))); @@ -99,6 +101,7 @@ TEST(TailCallsModifiesTest, NoPrefix) { TEST(TailCallsModifiesTest, NoPrefixNoMessageNoShirtNoShoesNoService) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(Prefix(IsFalse()), TextPrefix(IsEmpty()), @@ -110,6 +113,7 @@ TEST(TailCallsModifiesTest, NoPrefixNoMessageNoShirtNoShoesNoService) { TEST(TailCallsModifiesTest, WithVerbosity) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(Verbosity(Eq(2)))); @@ -119,6 +123,7 @@ TEST(TailCallsModifiesTest, WithVerbosity) { TEST(TailCallsModifiesTest, WithVerbosityNoVerbosity) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(Verbosity(Eq(absl::LogEntry::kNoVerbosityLevel)))); @@ -130,6 +135,7 @@ TEST(TailCallsModifiesTest, WithVerbosityNoVerbosity) { TEST(TailCallsModifiesTest, WithTimestamp) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(Timestamp(Eq(absl::UnixEpoch())))); @@ -139,6 +145,7 @@ TEST(TailCallsModifiesTest, WithTimestamp) { TEST(TailCallsModifiesTest, WithThreadID) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(ThreadID(Eq(absl::LogEntry::tid_t{1234}))))); @@ -157,6 +164,7 @@ TEST(TailCallsModifiesTest, WithMetadataFrom) { } forwarding_sink; absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -185,6 +193,7 @@ TEST(TailCallsModifiesTest, WithMetadataFrom) { TEST(TailCallsModifiesTest, WithPerror) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -211,6 +220,7 @@ TEST(ModifierMethodDeathTest, ToSinkOnlyQFatal) { { absl::ScopedMockLog test_sink( absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); auto do_log = [&test_sink] { LOG(QFATAL).ToSinkOnly(&test_sink.UseAsLocalSink()) << "hello world"; diff --git a/absl/log/log_streamer_test.cc b/absl/log/log_streamer_test.cc index 4fe88e9e61f..f226fef507e 100644 --- a/absl/log/log_streamer_test.cc +++ b/absl/log/log_streamer_test.cc @@ -66,6 +66,7 @@ void WriteToStreamRef(absl::string_view data, std::ostream& os) { TEST(LogStreamerTest, LogInfoStreamer) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -87,6 +88,7 @@ TEST(LogStreamerTest, LogInfoStreamer) { TEST(LogStreamerTest, LogWarningStreamer) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -109,6 +111,7 @@ TEST(LogStreamerTest, LogWarningStreamer) { TEST(LogStreamerTest, LogErrorStreamer) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -133,6 +136,7 @@ TEST(LogStreamerDeathTest, LogFatalStreamer) { EXPECT_EXIT( { absl::ScopedMockLog test_sink; + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -164,6 +168,7 @@ TEST(LogStreamerDeathTest, LogFatalStreamer) { #ifdef NDEBUG TEST(LogStreamerTest, LogDebugFatalStreamer) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -188,6 +193,7 @@ TEST(LogStreamerDeathTest, LogDebugFatalStreamer) { EXPECT_EXIT( { absl::ScopedMockLog test_sink; + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -218,6 +224,7 @@ TEST(LogStreamerDeathTest, LogDebugFatalStreamer) { TEST(LogStreamerTest, LogStreamer) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -244,6 +251,7 @@ TEST(LogStreamerDeathTest, LogStreamer) { EXPECT_EXIT( { absl::ScopedMockLog test_sink; + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) @@ -275,6 +283,7 @@ TEST(LogStreamerDeathTest, LogStreamer) { TEST(LogStreamerTest, PassedByReference) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -291,6 +300,7 @@ TEST(LogStreamerTest, PassedByReference) { TEST(LogStreamerTest, StoredAsLocal) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); auto streamer = absl::LogInfoStreamer("path/file.cc", 1234); WriteToStream("foo", &streamer.stream()); @@ -328,6 +338,7 @@ TEST(LogStreamerDeathTest, StoredAsLocal) { TEST(LogStreamerTest, LogsEmptyLine) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL(test_sink, Send(AllOf(SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)), TextMessage(Eq("")), @@ -345,8 +356,7 @@ TEST(LogStreamerDeathTest, LogsEmptyLine) { EXPECT_EXIT( { absl::ScopedMockLog test_sink; - - EXPECT_CALL(test_sink, Log) + EXPECT_CALL(test_sink, Send) .Times(AnyNumber()) .WillRepeatedly(DeathTestUnexpectedLogging()); @@ -368,6 +378,7 @@ TEST(LogStreamerDeathTest, LogsEmptyLine) { TEST(LogStreamerTest, MoveConstruction) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); EXPECT_CALL( test_sink, @@ -389,6 +400,7 @@ TEST(LogStreamerTest, MoveConstruction) { TEST(LogStreamerTest, MoveAssignment) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); testing::InSequence seq; EXPECT_CALL( @@ -423,6 +435,7 @@ TEST(LogStreamerTest, MoveAssignment) { TEST(LogStreamerTest, CorrectDefaultFlags) { absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected); + EXPECT_CALL(test_sink, Send).Times(0); // The `boolalpha` and `showbase` flags should be set by default, to match // `LOG`. diff --git a/absl/log/scoped_mock_log.h b/absl/log/scoped_mock_log.h index a3830667415..970087374e3 100644 --- a/absl/log/scoped_mock_log.h +++ b/absl/log/scoped_mock_log.h @@ -160,7 +160,13 @@ class ScopedMockLog final { // from the log message text, log message path and log message severity. // // If no expectations are specified for this mock, the default action is to - // forward the call to the `Log` mock. + // forward the call to the `Log` mock. Tests using `Send` are advised to call + // + // `EXPECT_CALL(sink, Send).Times(0);` + // + // prior to specifying other expectations to suppress forwarding to `Log`. + // That way, unexpected calls show up as calls to `Send` with complete data + // and metadata for easier debugging. MOCK_METHOD(void, Send, (const absl::LogEntry&)); // Implements the mock method: diff --git a/absl/log/structured_test.cc b/absl/log/structured_test.cc index 9fe0756f94d..cde81999a68 100644 --- a/absl/log/structured_test.cc +++ b/absl/log/structured_test.cc @@ -50,6 +50,7 @@ TEST(StreamingFormatTest, LogAsLiteral) { stream << LoggingDefaults << absl::LogAsLiteral(not_a_literal); absl::ScopedMockLog sink; + EXPECT_CALL(sink, Send).Times(0); EXPECT_CALL(sink, Send(AllOf(TextMessage(MatchesOstream(stream)), TextMessage(Eq("hello world")), diff --git a/absl/memory/BUILD.bazel b/absl/memory/BUILD.bazel index d50a502e70b..81e12facae3 100644 --- a/absl/memory/BUILD.bazel +++ b/absl/memory/BUILD.bazel @@ -14,6 +14,8 @@ # limitations under the License. # +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/meta/BUILD.bazel b/absl/meta/BUILD.bazel index d01cb8a2171..f6efa42be35 100644 --- a/absl/meta/BUILD.bazel +++ b/absl/meta/BUILD.bazel @@ -14,6 +14,8 @@ # limitations under the License. # +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/meta/type_traits.h b/absl/meta/type_traits.h index 5e57a154f57..e2f4600fe2c 100644 --- a/absl/meta/type_traits.h +++ b/absl/meta/type_traits.h @@ -38,7 +38,9 @@ #include #include #include +#include #include +#include #include #include "absl/base/attributes.h" @@ -48,10 +50,6 @@ #include // NOLINT(build/c++20) #endif -#ifdef ABSL_HAVE_STD_STRING_VIEW -#include -#endif - // Defines the default alignment. `__STDCPP_DEFAULT_NEW_ALIGNMENT__` is a C++17 // feature. #if defined(__STDCPP_DEFAULT_NEW_ALIGNMENT__) @@ -327,11 +325,17 @@ using swap_internal::Swap; // absl::is_trivially_relocatable // +// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2024/p2786r11.html +// // Detects whether a type is known to be "trivially relocatable" -- meaning it // can be relocated from one place to another as if by memcpy/memmove. // This implies that its object representation doesn't depend on its address, // and also none of its special member functions do anything strange. // +// Note that when relocating the caller code should ensure that if the object is +// polymorphic, the dynamic type is of the most derived type. Padding bytes +// should not be copied. +// // This trait is conservative. If it's true then the type is definitely // trivially relocatable, but if it's false then the type may or may not be. For // example, std::vector is trivially relocatable on every known STL @@ -349,11 +353,7 @@ using swap_internal::Swap; // // Upstream documentation: // -// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__is_trivially_relocatable - -// If the compiler offers a builtin that tells us the answer, we can use that. -// This covers all of the cases in the fallback below, plus types that opt in -// using e.g. [[clang::trivial_abi]]. +// https://clang.llvm.org/docs/LanguageExtensions.html#:~:text=__builtin_is_cpp_trivially_relocatable // // Clang on Windows has the builtin, but it falsely claims types with a // user-provided destructor are trivial (http://b/275003464). So we opt out @@ -378,15 +378,22 @@ using swap_internal::Swap; // // According to https://github.com/abseil/abseil-cpp/issues/1479, this does not // work with NVCC either. -#if ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \ - (defined(__cpp_impl_trivially_relocatable) || \ - (!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__))) +#if ABSL_HAVE_BUILTIN(__builtin_is_cpp_trivially_relocatable) +// https://github.com/llvm/llvm-project/pull/127636#pullrequestreview-2637005293 +// In the current implementation, __builtin_is_cpp_trivially_relocatable will +// only return true for types that are trivially relocatable according to the +// standard. Notably, this means that marking a type [[clang::trivial_abi]] aka +// ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI will have no effect on this trait. template struct is_trivially_relocatable - : std::integral_constant {}; + : std::integral_constant { +}; #elif ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && defined(__clang__) && \ !(defined(_WIN32) || defined(_WIN64)) && !defined(__APPLE__) && \ !defined(__NVCC__) +// https://github.com/llvm/llvm-project/pull/139061 +// __is_trivially_relocatable is deprecated. +// TODO(b/325479096): Remove this case. template struct is_trivially_relocatable : std::integral_constant< @@ -445,7 +452,7 @@ namespace type_traits_internal { // Detects if a class's definition has declared itself to be an owner by // declaring -// using absl_internal_is_view = std::true_type; +// using absl_internal_is_view = std::false_type; // as a member. // Types that don't want either must either omit this declaration entirely, or // (if e.g. inheriting from a base class) define the member to something that @@ -473,6 +480,17 @@ struct IsOwnerImpl< template struct IsOwner : IsOwnerImpl {}; +// This allows incomplete types to be used for associative containers, and also +// expands the set of types we can handle to include std::pair. +template +struct IsOwner> + : std::integral_constant< + bool, std::conditional_t, std::false_type, + IsOwner>>::value && + std::conditional_t, std::false_type, + IsOwner>>::value> { +}; + template struct IsOwner> : std::true_type {}; @@ -507,10 +525,15 @@ template struct IsView : std::integral_constant::value || IsViewImpl::value> {}; -#ifdef ABSL_HAVE_STD_STRING_VIEW +// This allows incomplete types to be used for associative containers, and also +// expands the set of types we can handle to include std::pair. +template +struct IsView> + : std::integral_constant>::value && + IsView>::value> {}; + template struct IsView> : std::true_type {}; -#endif #ifdef __cpp_lib_span template diff --git a/absl/meta/type_traits_test.cc b/absl/meta/type_traits_test.cc index bcf90d73877..81422903e0b 100644 --- a/absl/meta/type_traits_test.cc +++ b/absl/meta/type_traits_test.cc @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -26,10 +27,6 @@ #include "absl/time/clock.h" #include "absl/time/time.h" -#ifdef ABSL_HAVE_STD_STRING_VIEW -#include -#endif - namespace { using ::testing::StaticAssertTypeEq; @@ -39,18 +36,19 @@ using IsOwnerAndNotView = absl::conjunction, absl::negation>>; +static_assert( + IsOwnerAndNotView, std::string>>::value, + "pair of owners is an owner, not a view"); static_assert(IsOwnerAndNotView>::value, "vector is an owner, not a view"); static_assert(IsOwnerAndNotView::value, "string is an owner, not a view"); static_assert(IsOwnerAndNotView::value, "wstring is an owner, not a view"); -#ifdef ABSL_HAVE_STD_STRING_VIEW static_assert(!IsOwnerAndNotView::value, "string_view is a view, not an owner"); static_assert(!IsOwnerAndNotView::value, "wstring_view is a view, not an owner"); -#endif template struct simple_pair { @@ -338,51 +336,6 @@ TEST(TriviallyRelocatable, UserProvidedDestructor) { static_assert(!absl::is_trivially_relocatable::value, ""); } -// TODO(b/275003464): remove the opt-out for Clang on Windows once -// __is_trivially_relocatable is used there again. -// TODO(b/324278148): remove the opt-out for Apple once -// __is_trivially_relocatable is fixed there. -// TODO(b/325479096): remove the opt-out for Clang once -// __is_trivially_relocatable is fixed there. -#if defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) && \ - ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && \ - (defined(__cpp_impl_trivially_relocatable) || \ - (!defined(__clang__) && !defined(__APPLE__) && !defined(__NVCC__))) -// A type marked with the "trivial ABI" attribute is trivially relocatable even -// if it has user-provided special members. -TEST(TriviallyRelocatable, TrivialAbi) { - struct ABSL_ATTRIBUTE_TRIVIAL_ABI S { - S(S&&) {} // NOLINT(modernize-use-equals-default) - S(const S&) {} // NOLINT(modernize-use-equals-default) - S& operator=(S&&) { return *this; } - S& operator=(const S&) { return *this; } - ~S() {} // NOLINT(modernize-use-equals-default) - }; - - static_assert(absl::is_trivially_relocatable::value, ""); -} -#endif - -// TODO(b/275003464): remove the opt-out for Clang on Windows once -// __is_trivially_relocatable is used there again. -// TODO(b/324278148): remove the opt-out for Apple once -// __is_trivially_relocatable is fixed there. -#if defined(ABSL_HAVE_ATTRIBUTE_TRIVIAL_ABI) && \ - ABSL_HAVE_BUILTIN(__is_trivially_relocatable) && defined(__clang__) && \ - !(defined(_WIN32) || defined(_WIN64)) && !defined(__APPLE__) && \ - !defined(__NVCC__) -// A type marked with the "trivial ABI" attribute is trivially relocatable even -// if it has a user-provided copy constructor and a user-provided destructor. -TEST(TriviallyRelocatable, TrivialAbi_NoUserProvidedMove) { - struct ABSL_ATTRIBUTE_TRIVIAL_ABI S { - S(const S&) {} // NOLINT(modernize-use-equals-default) - ~S() {} // NOLINT(modernize-use-equals-default) - }; - - static_assert(absl::is_trivially_relocatable::value, ""); -} -#endif - #ifdef ABSL_HAVE_CONSTANT_EVALUATED constexpr int64_t NegateIfConstantEvaluated(int64_t i) { diff --git a/absl/numeric/BUILD.bazel b/absl/numeric/BUILD.bazel index f455d1e9173..edfe6b6fcee 100644 --- a/absl/numeric/BUILD.bazel +++ b/absl/numeric/BUILD.bazel @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/numeric/bits_test.cc b/absl/numeric/bits_test.cc index 3b71cccf88a..e2c64096658 100644 --- a/absl/numeric/bits_test.cc +++ b/absl/numeric/bits_test.cc @@ -26,16 +26,37 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace { +template +class UnsignedIntegerTypesTest : public ::testing::Test {}; template class IntegerTypesTest : public ::testing::Test {}; +using UnsignedIntegerTypes = + ::testing::Types; using OneByteIntegerTypes = ::testing::Types< unsigned char, uint8_t >; +TYPED_TEST_SUITE(UnsignedIntegerTypesTest, UnsignedIntegerTypes); TYPED_TEST_SUITE(IntegerTypesTest, OneByteIntegerTypes); +TYPED_TEST(UnsignedIntegerTypesTest, ReturnTypes) { + using UIntType = TypeParam; + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); +} + TYPED_TEST(IntegerTypesTest, HandlesTypes) { using UIntType = TypeParam; @@ -130,6 +151,9 @@ TEST(Rotate, Left) { EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -4), uint32_t{0x81234567UL}); EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -4), uint64_t{0x112345678ABCDEF0ULL}); + + EXPECT_EQ(rotl(uint32_t{1234}, std::numeric_limits::min()), + uint32_t{1234}); } TEST(Rotate, Right) { @@ -169,6 +193,9 @@ TEST(Rotate, Right) { EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -4), uint32_t{0x23456781UL}); EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -4), uint64_t{0x2345678ABCDEF011ULL}); + + EXPECT_EQ(rotl(uint32_t{1234}, std::numeric_limits::min()), + uint32_t{1234}); } TEST(Rotate, Symmetry) { diff --git a/absl/numeric/int128.h b/absl/numeric/int128.h index ae736b28463..32603b04ead 100644 --- a/absl/numeric/int128.h +++ b/absl/numeric/int128.h @@ -164,9 +164,9 @@ class constexpr explicit operator __int128() const; constexpr explicit operator unsigned __int128() const; #endif // ABSL_HAVE_INTRINSIC_INT128 - explicit operator float() const; - explicit operator double() const; - explicit operator long double() const; + constexpr explicit operator float() const; + constexpr explicit operator double() const; + constexpr explicit operator long double() const; // Trivial copy constructor, assignment operator and destructor. @@ -357,14 +357,18 @@ class int128 { constexpr int128(unsigned long v); // NOLINT(runtime/int) constexpr int128(long long v); // NOLINT(runtime/int) constexpr int128(unsigned long long v); // NOLINT(runtime/int) + constexpr explicit int128(uint128 v); #ifdef ABSL_HAVE_INTRINSIC_INT128 constexpr int128(__int128 v); // NOLINT(runtime/explicit) constexpr explicit int128(unsigned __int128 v); -#endif // ABSL_HAVE_INTRINSIC_INT128 - constexpr explicit int128(uint128 v); + constexpr explicit int128(float v); + constexpr explicit int128(double v); + constexpr explicit int128(long double v); +#else explicit int128(float v); explicit int128(double v); explicit int128(long double v); +#endif // ABSL_HAVE_INTRINSIC_INT128 // Assignment operators from arithmetic types int128& operator=(int v); @@ -401,9 +405,9 @@ class int128 { constexpr explicit operator __int128() const; constexpr explicit operator unsigned __int128() const; #endif // ABSL_HAVE_INTRINSIC_INT128 - explicit operator float() const; - explicit operator double() const; - explicit operator long double() const; + constexpr explicit operator float() const; + constexpr explicit operator double() const; + constexpr explicit operator long double() const; // Trivial copy constructor, assignment operator and destructor. @@ -609,9 +613,15 @@ constexpr uint128 operator<<(uint128 lhs, int amount); constexpr uint128 operator>>(uint128 lhs, int amount); constexpr uint128 operator+(uint128 lhs, uint128 rhs); constexpr uint128 operator-(uint128 lhs, uint128 rhs); +#if defined(ABSL_HAVE_INTRINSIC_INT128) +constexpr uint128 operator*(uint128 lhs, uint128 rhs); +constexpr uint128 operator/(uint128 lhs, uint128 rhs); +constexpr uint128 operator%(uint128 lhs, uint128 rhs); +#else // ABSL_HAVE_INTRINSIC_INT128 uint128 operator*(uint128 lhs, uint128 rhs); uint128 operator/(uint128 lhs, uint128 rhs); uint128 operator%(uint128 lhs, uint128 rhs); +#endif // ABSL_HAVE_INTRINSIC_INT128 inline uint128& uint128::operator<<=(int amount) { *this = *this << amount; @@ -788,18 +798,18 @@ constexpr uint128::operator unsigned __int128() const { // Conversion operators to floating point types. -inline uint128::operator float() const { +constexpr uint128::operator float() const { // Note: This method might return Inf. constexpr float pow_2_64 = 18446744073709551616.0f; return static_cast(lo_) + static_cast(hi_) * pow_2_64; } -inline uint128::operator double() const { +constexpr uint128::operator double() const { constexpr double pow_2_64 = 18446744073709551616.0; return static_cast(lo_) + static_cast(hi_) * pow_2_64; } -inline uint128::operator long double() const { +constexpr uint128::operator long double() const { constexpr long double pow_2_64 = 18446744073709551616.0L; return static_cast(lo_) + static_cast(hi_) * pow_2_64; @@ -1021,19 +1031,15 @@ constexpr uint128 operator-(uint128 lhs, uint128 rhs) { #endif } +#if !defined(ABSL_HAVE_INTRINSIC_INT128) inline uint128 operator*(uint128 lhs, uint128 rhs) { -#if defined(ABSL_HAVE_INTRINSIC_INT128) - // TODO(strel) Remove once alignment issues are resolved and unsigned __int128 - // can be used for uint128 storage. - return static_cast(lhs) * - static_cast(rhs); -#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC) +#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC) uint64_t carry; uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry); return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) + Uint128High64(lhs) * Uint128Low64(rhs) + carry, low); -#else // ABSL_HAVE_INTRINSIC128 +#else // _MSC_VER uint64_t a32 = Uint128Low64(lhs) >> 32; uint64_t a00 = Uint128Low64(lhs) & 0xffffffff; uint64_t b32 = Uint128Low64(rhs) >> 32; @@ -1045,16 +1051,24 @@ inline uint128 operator*(uint128 lhs, uint128 rhs) { result += uint128(a32 * b00) << 32; result += uint128(a00 * b32) << 32; return result; -#endif // ABSL_HAVE_INTRINSIC128 +#endif // _MSC_VER } +#endif // ABSL_HAVE_INTRINSIC_INT128 #if defined(ABSL_HAVE_INTRINSIC_INT128) -inline uint128 operator/(uint128 lhs, uint128 rhs) { +constexpr uint128 operator*(uint128 lhs, uint128 rhs) { + // TODO(strel) Remove once alignment issues are resolved and unsigned __int128 + // can be used for uint128 storage. + return static_cast(lhs) * + static_cast(rhs); +} + +constexpr uint128 operator/(uint128 lhs, uint128 rhs) { return static_cast(lhs) / static_cast(rhs); } -inline uint128 operator%(uint128 lhs, uint128 rhs) { +constexpr uint128 operator%(uint128 lhs, uint128 rhs) { return static_cast(lhs) % static_cast(rhs); } @@ -1112,9 +1126,15 @@ inline int128& int128::operator=(unsigned long long v) { constexpr int128 operator-(int128 v); constexpr int128 operator+(int128 lhs, int128 rhs); constexpr int128 operator-(int128 lhs, int128 rhs); +#if defined(ABSL_HAVE_INTRINSIC_INT128) +constexpr int128 operator*(int128 lhs, int128 rhs); +constexpr int128 operator/(int128 lhs, int128 rhs); +constexpr int128 operator%(int128 lhs, int128 rhs); +#else int128 operator*(int128 lhs, int128 rhs); int128 operator/(int128 lhs, int128 rhs); int128 operator%(int128 lhs, int128 rhs); +#endif // ABSL_HAVE_INTRINSIC_INT128 constexpr int128 operator|(int128 lhs, int128 rhs); constexpr int128 operator&(int128 lhs, int128 rhs); constexpr int128 operator^(int128 lhs, int128 rhs); diff --git a/absl/numeric/int128_have_intrinsic.inc b/absl/numeric/int128_have_intrinsic.inc index 216115a412b..dea1d213c9f 100644 --- a/absl/numeric/int128_have_intrinsic.inc +++ b/absl/numeric/int128_have_intrinsic.inc @@ -73,17 +73,11 @@ constexpr int128::int128(unsigned long long v) : v_{v} {} constexpr int128::int128(unsigned __int128 v) : v_{static_cast<__int128>(v)} {} -inline int128::int128(float v) { - v_ = static_cast<__int128>(v); -} +constexpr int128::int128(float v) : v_{static_cast<__int128>(v)} {} -inline int128::int128(double v) { - v_ = static_cast<__int128>(v); -} +constexpr int128::int128(double v) : v_{static_cast<__int128>(v)} {} -inline int128::int128(long double v) { - v_ = static_cast<__int128>(v); -} +constexpr int128::int128(long double v) : v_{static_cast<__int128>(v)} {} constexpr int128::int128(uint128 v) : v_{static_cast<__int128>(v)} {} @@ -119,9 +113,7 @@ constexpr int128::operator unsigned short() const { // NOLINT(runtime/int) return static_cast(v_); // NOLINT(runtime/int) } -constexpr int128::operator int() const { - return static_cast(v_); -} +constexpr int128::operator int() const { return static_cast(v_); } constexpr int128::operator unsigned int() const { return static_cast(v_); @@ -153,17 +145,17 @@ constexpr int128::operator unsigned __int128() const { // conversions. In that case, we do the conversion with a similar implementation // to the conversion operators in int128_no_intrinsic.inc. #if defined(__clang__) && !defined(__ppc64__) -inline int128::operator float() const { return static_cast(v_); } +constexpr int128::operator float() const { return static_cast(v_); } -inline int128::operator double() const { return static_cast(v_); } +constexpr int128::operator double() const { return static_cast(v_); } -inline int128::operator long double() const { +constexpr int128::operator long double() const { return static_cast(v_); } -#else // Clang on PowerPC +#else // Clang on PowerPC -inline int128::operator float() const { +constexpr int128::operator float() const { // We must convert the absolute value and then negate as needed, because // floating point types are typically sign-magnitude. Otherwise, the // difference between the high and low 64 bits when interpreted as two's @@ -177,7 +169,7 @@ inline int128::operator float() const { static_cast(Int128High64(*this)) * pow_2_64; } -inline int128::operator double() const { +constexpr int128::operator double() const { // See comment in int128::operator float() above. constexpr double pow_2_64 = 18446744073709551616.0; return v_ < 0 && *this != Int128Min() @@ -186,7 +178,7 @@ inline int128::operator double() const { static_cast(Int128High64(*this)) * pow_2_64; } -inline int128::operator long double() const { +constexpr int128::operator long double() const { // See comment in int128::operator float() above. constexpr long double pow_2_64 = 18446744073709551616.0L; return v_ < 0 && *this != Int128Min() @@ -254,17 +246,19 @@ constexpr int128 operator-(int128 lhs, int128 rhs) { return static_cast<__int128>(lhs) - static_cast<__int128>(rhs); } -inline int128 operator*(int128 lhs, int128 rhs) { +#if defined(ABSL_HAVE_INTRINSIC_INT128) +constexpr int128 operator*(int128 lhs, int128 rhs) { return static_cast<__int128>(lhs) * static_cast<__int128>(rhs); } -inline int128 operator/(int128 lhs, int128 rhs) { +constexpr int128 operator/(int128 lhs, int128 rhs) { return static_cast<__int128>(lhs) / static_cast<__int128>(rhs); } -inline int128 operator%(int128 lhs, int128 rhs) { +constexpr int128 operator%(int128 lhs, int128 rhs) { return static_cast<__int128>(lhs) % static_cast<__int128>(rhs); } +#endif // ABSL_HAVE_INTRINSIC_INT128 inline int128 int128::operator++(int) { int128 tmp(*this); diff --git a/absl/numeric/int128_no_intrinsic.inc b/absl/numeric/int128_no_intrinsic.inc index a7cdceabfa6..48bec2cc8fc 100644 --- a/absl/numeric/int128_no_intrinsic.inc +++ b/absl/numeric/int128_no_intrinsic.inc @@ -132,7 +132,7 @@ constexpr int128::operator unsigned long long() const { // NOLINT(runtime/int) return static_cast(lo_); // NOLINT(runtime/int) } -inline int128::operator float() const { +constexpr int128::operator float() const { // We must convert the absolute value and then negate as needed, because // floating point types are typically sign-magnitude. Otherwise, the // difference between the high and low 64 bits when interpreted as two's @@ -142,20 +142,18 @@ inline int128::operator float() const { constexpr float pow_2_64 = 18446744073709551616.0f; return hi_ < 0 && *this != Int128Min() ? -static_cast(-*this) - : static_cast(lo_) + - static_cast(hi_) * pow_2_64; + : static_cast(lo_) + static_cast(hi_) * pow_2_64; } -inline int128::operator double() const { +constexpr int128::operator double() const { // See comment in int128::operator float() above. constexpr double pow_2_64 = 18446744073709551616.0; return hi_ < 0 && *this != Int128Min() ? -static_cast(-*this) - : static_cast(lo_) + - static_cast(hi_) * pow_2_64; + : static_cast(lo_) + static_cast(hi_) * pow_2_64; } -inline int128::operator long double() const { +constexpr int128::operator long double() const { // See comment in int128::operator float() above. constexpr long double pow_2_64 = 18446744073709551616.0L; return hi_ < 0 && *this != Int128Min() diff --git a/absl/numeric/int128_test.cc b/absl/numeric/int128_test.cc index 13a0e7fdb15..77ee63c4683 100644 --- a/absl/numeric/int128_test.cc +++ b/absl/numeric/int128_test.cc @@ -350,7 +350,7 @@ TEST(Uint128, Multiply) { c = a * b; EXPECT_EQ(absl::MakeUint128(0x530EDA741C71D4C3, 0xBF25975319080000), c); EXPECT_EQ(0, c - b * a); - EXPECT_EQ(a*a - b*b, (a+b) * (a-b)); + EXPECT_EQ(a * a - b * b, (a + b) * (a - b)); // Verified with dc. a = absl::MakeUint128(0x0123456789abcdef, 0xfedcba9876543210); @@ -358,7 +358,7 @@ TEST(Uint128, Multiply) { c = a * b; EXPECT_EQ(absl::MakeUint128(0x97a87f4f261ba3f2, 0x342d0bbf48948200), c); EXPECT_EQ(0, c - b * a); - EXPECT_EQ(a*a - b*b, (a+b) * (a-b)); + EXPECT_EQ(a * a - b * b, (a + b) * (a - b)); } TEST(Uint128, AliasTests) { @@ -462,6 +462,26 @@ TEST(Uint128, ConstexprTest) { EXPECT_EQ(zero, absl::uint128(0)); EXPECT_EQ(one, absl::uint128(1)); EXPECT_EQ(minus_two, absl::MakeUint128(-1, -2)); + + constexpr double f = static_cast(absl::uint128(123)); + EXPECT_EQ(f, 123.0f); + + constexpr double d = static_cast(absl::uint128(123)); + EXPECT_EQ(d, 123.0); + + constexpr long double ld = static_cast(absl::uint128(123)); + EXPECT_EQ(ld, 123.0); + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr absl::uint128 division = absl::uint128(10) / absl::uint128(2); + EXPECT_EQ(division, absl::uint128(5)); + + constexpr absl::uint128 modulus = absl::int128(10) % absl::int128(3); + EXPECT_EQ(modulus, absl::uint128(1)); + + constexpr absl::uint128 multiplication = absl::uint128(10) * absl::uint128(3); + EXPECT_EQ(multiplication, absl::uint128(30)); +#endif // ABSL_HAVE_INTRINSIC_INT128 } TEST(Uint128, NumericLimitsTest) { @@ -522,7 +542,6 @@ TEST(Uint128, Hash) { EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(values)); } - TEST(Int128Uint128, ConversionTest) { absl::int128 nonnegative_signed_values[] = { 0, @@ -540,8 +559,7 @@ TEST(Int128Uint128, ConversionTest) { } absl::int128 negative_values[] = { - -1, -0x1234567890abcdef, - absl::MakeInt128(-0x5544332211ffeedd, 0), + -1, -0x1234567890abcdef, absl::MakeInt128(-0x5544332211ffeedd, 0), -absl::MakeInt128(0x76543210fedcba98, 0xabcdef0123456789)}; for (absl::int128 value : negative_values) { EXPECT_EQ(absl::uint128(-value), -absl::uint128(value)); @@ -769,6 +787,35 @@ TEST(Int128, ConstexprTest) { EXPECT_EQ(minus_two, absl::MakeInt128(-1, -2)); EXPECT_GT(max, one); EXPECT_LT(min, minus_two); + + constexpr double f = static_cast(absl::int128(123)); + EXPECT_EQ(f, 123.0f); + + constexpr double d = static_cast(absl::int128(123)); + EXPECT_EQ(d, 123.0); + + constexpr long double ld = static_cast(absl::int128(123)); + EXPECT_EQ(ld, 123.0); + +#ifdef ABSL_HAVE_INTRINSIC_INT128 + constexpr absl::int128 f_int128(static_cast(123.0)); + EXPECT_EQ(f_int128, absl::int128(123)); + + constexpr absl::int128 d_int128(static_cast(123.0)); + EXPECT_EQ(d_int128, absl::int128(123)); + + constexpr absl::int128 ld_int128(static_cast(123.0)); + EXPECT_EQ(ld_int128, absl::int128(123)); + + constexpr absl::int128 division = absl::int128(10) / absl::int128(2); + EXPECT_EQ(division, absl::int128(5)); + + constexpr absl::int128 modulus = absl::int128(10) % absl::int128(3); + EXPECT_EQ(modulus, absl::int128(1)); + + constexpr absl::int128 multiplication = absl::int128(10) * absl::int128(3); + EXPECT_EQ(multiplication, absl::int128(30)); +#endif // ABSL_HAVE_INTRINSIC_INT128 } TEST(Int128, ComparisonTest) { diff --git a/absl/numeric/internal/bits.h b/absl/numeric/internal/bits.h index e1d18b86334..e6815445ab7 100644 --- a/absl/numeric/internal/bits.h +++ b/absl/numeric/internal/bits.h @@ -77,8 +77,28 @@ template static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); - return static_cast(x >> (s & (std::numeric_limits::digits - 1))) | - static_cast(x << ((-s) & (std::numeric_limits::digits - 1))); + // Rotate by s mod the number of digits to avoid unnecessary rotations. + // + // A negative s represents a left rotation instead of a right rotation. + // We compute it as an equivalent complementary right rotation by leveraging + // its two's complement representation. + // + // For example, suppose we rotate a 3-bit number by -2. + // In that case: + // * s = 0b11111111111111111111111111111110 + // * n = 8 + // * r = (0b11111111111111111111111111111110 & 0b111) = 0b110 + // + // Instead of rotating by 2 to the left, we rotate by 6 to the right, which + // is equivalent. + const int n = std::numeric_limits::digits; + const int r = s & (n - 1); + + if (r == 0) { + return x; + } else { + return (x >> r) | (x << (n - r)); + } } template @@ -88,8 +108,16 @@ template static_assert(IsPowerOf2(std::numeric_limits::digits), "T must have a power-of-2 size"); - return static_cast(x << (s & (std::numeric_limits::digits - 1))) | - static_cast(x >> ((-s) & (std::numeric_limits::digits - 1))); + // Rotate by s mod the number of digits to avoid unnecessary rotations. + // See comment in RotateRight for a detailed explanation of the logic below. + const int n = std::numeric_limits::digits; + const int r = s & (n - 1); + + if (r == 0) { + return x; + } else { + return (x << r) | (x >> (n - r)); + } } ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int diff --git a/absl/profiling/BUILD.bazel b/absl/profiling/BUILD.bazel index ee4800de48f..5afdb96beb2 100644 --- a/absl/profiling/BUILD.bazel +++ b/absl/profiling/BUILD.bazel @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -49,6 +52,7 @@ cc_library( cc_test( name = "sample_recorder_test", srcs = ["internal/sample_recorder_test.cc"], + copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "no_test_wasm", @@ -69,6 +73,7 @@ cc_library( name = "exponential_biased", srcs = ["internal/exponential_biased.cc"], hdrs = ["internal/exponential_biased.h"], + copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", @@ -139,3 +144,44 @@ cc_binary( "@google_benchmark//:benchmark_main", ], ) + +cc_library( + name = "profile_builder", + srcs = ["internal/profile_builder.cc"], + hdrs = ["internal/profile_builder.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + "//absl/base", + "//absl/base:config", + "//absl/base:raw_logging_internal", + "//absl/container:btree", + "//absl/container:flat_hash_map", + "//absl/strings", + "//absl/types:span", + ], +) + +cc_library( + name = "hashtable", + srcs = ["hashtable.cc"], + hdrs = ["hashtable.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = [ + "//absl:__subpackages__", + ], + deps = [ + ":profile_builder", + "//absl/base:config", + "//absl/container:hashtablez_sampler", + "//absl/status:statusor", + "//absl/strings", + "//absl/strings:string_view", + "//absl/time", + "//absl/types:span", + ], +) diff --git a/absl/profiling/CMakeLists.txt b/absl/profiling/CMakeLists.txt index 84b8b3b8945..4807f0dde6f 100644 --- a/absl/profiling/CMakeLists.txt +++ b/absl/profiling/CMakeLists.txt @@ -92,3 +92,40 @@ absl_cc_test( GTest::gmock_main ) +# Internal-only target, do not depend on directly +absl_cc_library( + NAME + profile_builder + HDRS + "internal/profile_builder.h" + SRCS + "internal/profile_builder.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::config + absl::core_headers + absl::raw_logging_internal + absl::flat_hash_map + absl::btree + absl::strings + absl::span +) + +absl_cc_library( + NAME + hashtable_profiler + HDRS + "hashtable.h" + SRCS + "hashtable.cc" + COPTS + ${ABSL_DEFAULT_COPTS} + DEPS + absl::profile_builder + absl::config + absl::core_headers + absl::strings + absl::span + absl::hashtablez_sampler +) diff --git a/absl/profiling/hashtable.cc b/absl/profiling/hashtable.cc new file mode 100644 index 00000000000..bfec0d56c31 --- /dev/null +++ b/absl/profiling/hashtable.cc @@ -0,0 +1,124 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may +// obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/profiling/hashtable.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/profiling/internal/profile_builder.h" +#include "absl/status/statusor.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +StatusOr MarshalHashtableProfile() { + return debugging_internal::MarshalHashtableProfile( + container_internal::GlobalHashtablezSampler(), Now()); +} + +namespace debugging_internal { + +StatusOr MarshalHashtableProfile( + container_internal::HashtablezSampler& sampler, Time now) { + static constexpr absl::string_view kDropFrames = + "(::)?absl::container_internal::.*|" + "(::)?absl::(flat|node)_hash_(map|set).*"; + + ProfileBuilder builder; + StringId drop_frames_id = builder.InternString(kDropFrames); + builder.set_drop_frames_id(drop_frames_id); + builder.AddSampleType(builder.InternString("capacity"), + builder.InternString("count")); + builder.set_default_sample_type_id(builder.InternString("capacity")); + + const auto capacity_id = builder.InternString("capacity"); + const auto size_id = builder.InternString("size"); + const auto num_erases_id = builder.InternString("num_erases"); + const auto num_rehashes_id = builder.InternString("num_rehashes"); + const auto max_probe_length_id = builder.InternString("max_probe_length"); + const auto total_probe_length_id = builder.InternString("total_probe_length"); + const auto stuck_bits_id = builder.InternString("stuck_bits"); + const auto inline_element_size_id = + builder.InternString("inline_element_size"); + const auto key_size_id = builder.InternString("key_size"); + const auto value_size_id = builder.InternString("value_size"); + const auto soo_capacity_id = builder.InternString("soo_capacity"); + const auto checksum_id = builder.InternString("checksum"); + const auto table_age_id = builder.InternString("table_age"); + const auto max_reserve_id = builder.InternString("max_reserve"); + + size_t dropped = + sampler.Iterate([&](const container_internal::HashtablezInfo& info) { + const size_t capacity = info.capacity.load(std::memory_order_relaxed); + std::vector> labels; + + auto add_label = [&](StringId tag, uint64_t value) { + if (value == 0) { + return; + } + labels.emplace_back(tag, static_cast(value)); + }; + + add_label(capacity_id, capacity); + add_label(size_id, info.size.load(std::memory_order_relaxed)); + add_label(num_erases_id, + info.num_erases.load(std::memory_order_relaxed)); + add_label(num_rehashes_id, + info.num_rehashes.load(std::memory_order_relaxed)); + add_label(max_probe_length_id, + info.max_probe_length.load(std::memory_order_relaxed)); + add_label(total_probe_length_id, + info.total_probe_length.load(std::memory_order_relaxed)); + add_label(stuck_bits_id, + (info.hashes_bitwise_and.load(std::memory_order_relaxed) | + ~info.hashes_bitwise_or.load(std::memory_order_relaxed))); + add_label(inline_element_size_id, info.inline_element_size); + add_label(key_size_id, info.key_size); + add_label(value_size_id, info.value_size); + add_label(soo_capacity_id, info.soo_capacity); + add_label(checksum_id, + info.hashes_bitwise_xor.load(std::memory_order_relaxed)); + add_label( + table_age_id, + static_cast(ToInt64Microseconds(now - info.create_time))); + add_label(max_reserve_id, + info.max_reserve.load(std::memory_order_relaxed)); + builder.AddSample(static_cast(capacity) * info.weight, + MakeSpan(info.stack, info.depth), labels); + }); + + // TODO(b/262310142): Make this more structured data. + StringId comment_id = + builder.InternString(StrCat("dropped_samples: ", dropped)); + builder.set_comment_id(comment_id); + builder.AddCurrentMappings(); + return std::move(builder).Emit(); +} + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/profiling/hashtable.h b/absl/profiling/hashtable.h new file mode 100644 index 00000000000..9e490dcdb8a --- /dev/null +++ b/absl/profiling/hashtable.h @@ -0,0 +1,40 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_PROFILING_HASHTABLE_H_ +#define ABSL_PROFILING_HASHTABLE_H_ + +#include +#include + +#include "absl/container/internal/hashtablez_sampler.h" +#include "absl/status/statusor.h" +#include "absl/strings/string_view.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +absl::StatusOr MarshalHashtableProfile(); + +namespace debugging_internal { + +absl::StatusOr MarshalHashtableProfile( + container_internal::HashtablezSampler& sampler, absl::Time now); + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_PROFILING_HASHTABLE_H_ diff --git a/absl/profiling/internal/profile_builder.cc b/absl/profiling/internal/profile_builder.cc new file mode 100644 index 00000000000..f0bb40b7325 --- /dev/null +++ b/absl/profiling/internal/profile_builder.cc @@ -0,0 +1,462 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may +// obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/profiling/internal/profile_builder.h" + +#ifdef __linux__ +#include +#include +#endif // __linux__ + +#include +#include +#include +#include +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/strings/escaping.h" +#include "absl/strings/str_cat.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +namespace { + +// This file contains a simplified implementation of the pprof profile builder, +// which avoids a dependency on protobuf. +// +// The canonical profile proto definition is at +// https://github.com/google/pprof/blob/master/proto/profile.proto +// +// Wire-format encoding is a simple sequence of (tag, value) pairs. The tag +// is a varint-encoded integer, where the low 3 bits are the wire type, and the +// high bits are the field number. +// +// For the fields we care about, we'll be using the following wire types: +// +// Wire Type 0: Varint-encoded integer. +// Wire Type 2: Length-delimited. Used for strings and sub-messages. +enum class WireType { + kVarint = 0, + kLengthDelimited = 2, +}; + +#ifdef __linux__ +// Returns the Phdr of the first segment of the given type. +const ElfW(Phdr) * GetFirstSegment(const dl_phdr_info* const info, + const ElfW(Word) segment_type) { + for (int i = 0; i < info->dlpi_phnum; ++i) { + if (info->dlpi_phdr[i].p_type == segment_type) { + return &info->dlpi_phdr[i]; + } + } + return nullptr; +} + +// Return DT_SONAME for the given image. If there is no PT_DYNAMIC or if +// PT_DYNAMIC does not contain DT_SONAME, return nullptr. +static const char* GetSoName(const dl_phdr_info* const info) { + const ElfW(Phdr)* const pt_dynamic = GetFirstSegment(info, PT_DYNAMIC); + if (pt_dynamic == nullptr) { + return nullptr; + } + const ElfW(Dyn)* dyn = + reinterpret_cast(info->dlpi_addr + pt_dynamic->p_vaddr); + const ElfW(Dyn)* dt_strtab = nullptr; + const ElfW(Dyn)* dt_strsz = nullptr; + const ElfW(Dyn)* dt_soname = nullptr; + for (; dyn->d_tag != DT_NULL; ++dyn) { + if (dyn->d_tag == DT_SONAME) { + dt_soname = dyn; + } else if (dyn->d_tag == DT_STRTAB) { + dt_strtab = dyn; + } else if (dyn->d_tag == DT_STRSZ) { + dt_strsz = dyn; + } + } + if (dt_soname == nullptr) { + return nullptr; + } + ABSL_RAW_CHECK(dt_strtab != nullptr, "Unexpected nullptr"); + ABSL_RAW_CHECK(dt_strsz != nullptr, "Unexpected nullptr"); + const char* const strtab = reinterpret_cast( + info->dlpi_addr + static_cast(dt_strtab->d_un.d_val)); + ABSL_RAW_CHECK(dt_soname->d_un.d_val < dt_strsz->d_un.d_val, + "Unexpected order"); + return strtab + dt_soname->d_un.d_val; +} + +// Helper function to get the build ID of a shared object. +std::string GetBuildId(const dl_phdr_info* const info) { + std::string result; + + // pt_note contains entries (of type ElfW(Nhdr)) starting at + // info->dlpi_addr + pt_note->p_vaddr + // with length + // pt_note->p_memsz + // + // The length of each entry is given by + // Align(sizeof(ElfW(Nhdr)) + nhdr->n_namesz) + Align(nhdr->n_descsz) + for (int i = 0; i < info->dlpi_phnum; ++i) { + const ElfW(Phdr)* pt_note = &info->dlpi_phdr[i]; + if (pt_note->p_type != PT_NOTE) continue; + + const char* note = + reinterpret_cast(info->dlpi_addr + pt_note->p_vaddr); + const char* const last = note + pt_note->p_filesz; + const ElfW(Xword) align = pt_note->p_align; + while (note < last) { + const ElfW(Nhdr)* const nhdr = reinterpret_cast(note); + if (note + sizeof(*nhdr) > last) { + // Corrupt PT_NOTE + break; + } + + // Both the start and end of the descriptor are aligned by sh_addralign + // (= p_align). + const ElfW(Xword) desc_start = + (sizeof(*nhdr) + nhdr->n_namesz + align - 1) & -align; + const ElfW(Xword) size = + desc_start + ((nhdr->n_descsz + align - 1) & -align); + + // Beware of wrap-around. + if (nhdr->n_namesz >= static_cast(-align) || + nhdr->n_descsz >= static_cast(-align) || + desc_start < sizeof(*nhdr) || size < desc_start || + size > static_cast(last - note)) { + // Corrupt PT_NOTE + break; + } + + if (nhdr->n_type == NT_GNU_BUILD_ID) { + const char* const note_name = note + sizeof(*nhdr); + // n_namesz is the length of note_name. + if (nhdr->n_namesz == 4 && memcmp(note_name, "GNU\0", 4) == 0) { + if (!result.empty()) { + // Repeated build-ids. Ignore them. + return ""; + } + result = absl::BytesToHexString( + absl::string_view(note + desc_start, nhdr->n_descsz)); + } + } + note += size; + } + } + + return result; +} +#endif // __linux__ + +// A varint-encoded integer. +struct Varint { + explicit Varint(uint64_t v) : value(v) {} + explicit Varint(StringId v) : value(static_cast(v)) {} + explicit Varint(LocationId v) : value(static_cast(v)) {} + explicit Varint(MappingId v) : value(static_cast(v)) {} + + uint64_t value; + + template + friend void AbslStringify(Sink& sink, const Varint& v) { + char buf[10]; + char* p = buf; + uint64_t u = v.value; + while (u >= 0x80) { + *p++ = static_cast((u & 0x7f) | 0x80); + u >>= 7; + } + *p++ = static_cast(u); + sink.Append(absl::string_view(buf, static_cast(p - buf))); + } +}; + +struct Tag { + int field_number; + WireType wire_type; + + template + friend void AbslStringify(Sink& sink, const Tag& t) { + absl::Format(&sink, "%v", + Varint((static_cast(t.field_number) << 3) | + static_cast(t.wire_type))); + } +}; + +struct LengthDelimited { + int field_number; + absl::string_view value; + + template + friend void AbslStringify(Sink& sink, const LengthDelimited& ld) { + absl::Format(&sink, "%v%v%v", + Tag{ld.field_number, WireType::kLengthDelimited}, + Varint(ld.value.size()), ld.value); + } +}; + +struct VarintField { + int field_number; + Varint value; + + template + friend void AbslStringify(Sink& sink, const VarintField& vf) { + absl::Format(&sink, "%v%v", Tag{vf.field_number, WireType::kVarint}, + vf.value); + } +}; + +} // namespace + +StringId ProfileBuilder::InternString(absl::string_view str) { + if (str.empty()) return StringId(0); + return string_table_.emplace(str, StringId(string_table_.size())) + .first->second; +} + +LocationId ProfileBuilder::InternLocation(const void* address) { + return location_table_ + .emplace(absl::bit_cast(address), + LocationId(location_table_.size() + 1)) + .first->second; +} + +void ProfileBuilder::AddSample( + int64_t value, absl::Span stack, + absl::Span> labels) { + std::string sample_proto; + absl::StrAppend( + &sample_proto, + VarintField{SampleProto::kValue, Varint(static_cast(value))}); + + for (const void* addr : stack) { + // Profile addresses are raw stack unwind addresses, so they should be + // adjusted by -1 to land inside the call instruction (although potentially + // misaligned). + absl::StrAppend( + &sample_proto, + VarintField{SampleProto::kLocationId, + Varint(InternLocation(absl::bit_cast( + absl::bit_cast(addr) - 1)))}); + } + + for (const auto& label : labels) { + std::string label_proto = + absl::StrCat(VarintField{LabelProto::kKey, Varint(label.first)}, + VarintField{LabelProto::kNum, + Varint(static_cast(label.second))}); + absl::StrAppend(&sample_proto, + LengthDelimited{SampleProto::kLabel, label_proto}); + } + samples_.push_back(std::move(sample_proto)); +} + +void ProfileBuilder::AddSampleType(StringId type, StringId unit) { + std::string sample_type_proto = + absl::StrCat(VarintField{ValueTypeProto::kType, Varint(type)}, + VarintField{ValueTypeProto::kUnit, Varint(unit)}); + sample_types_.push_back(std::move(sample_type_proto)); +} + +MappingId ProfileBuilder::AddMapping(uintptr_t memory_start, + uintptr_t memory_limit, + uintptr_t file_offset, + absl::string_view filename, + absl::string_view build_id) { + size_t index = mappings_.size() + 1; + auto [it, inserted] = mapping_table_.emplace(memory_start, index); + if (!inserted) { + return static_cast(it->second); + } + + Mapping m; + m.start = memory_start; + m.limit = memory_limit; + m.offset = file_offset; + m.filename = std::string(filename); + m.build_id = std::string(build_id); + + mappings_.push_back(std::move(m)); + return static_cast(index); +} + +std::string ProfileBuilder::Emit() && { + std::string profile_proto; + for (const auto& sample_type : sample_types_) { + absl::StrAppend(&profile_proto, + LengthDelimited{ProfileProto::kSampleType, sample_type}); + } + for (const auto& sample : samples_) { + absl::StrAppend(&profile_proto, + LengthDelimited{ProfileProto::kSample, sample}); + } + + // Build mapping table. + for (size_t i = 0, n = mappings_.size(); i < n; ++i) { + const auto& mapping = mappings_[i]; + std::string mapping_proto = absl::StrCat( + VarintField{MappingProto::kId, Varint(static_cast(i + 1))}, + VarintField{MappingProto::kMemoryStart, Varint(mapping.start)}, + VarintField{MappingProto::kMemoryLimit, Varint(mapping.limit)}, + VarintField{MappingProto::kFileOffset, Varint(mapping.offset)}, + VarintField{MappingProto::kFilename, + Varint(InternString(mapping.filename))}, + VarintField{MappingProto::kBuildId, + Varint(InternString(mapping.build_id))}); + + absl::StrAppend(&profile_proto, + LengthDelimited{ProfileProto::kMapping, mapping_proto}); + } + + // Build location table. + for (const auto& [address, id] : location_table_) { + std::string location = + absl::StrCat(VarintField{LocationProto::kId, Varint(id)}, + VarintField{LocationProto::kAddress, Varint(address)}); + + if (!mappings_.empty()) { + // Find the mapping ID. + auto it = mapping_table_.upper_bound(address); + if (it != mapping_table_.begin()) { + --it; + } + + // If *it contains address, add mapping to location. + const size_t mapping_index = it->second; + const Mapping& mapping = mappings_[mapping_index - 1]; + + if (it->first <= address && address < mapping.limit) { + absl::StrAppend( + &location, + VarintField{LocationProto::kMappingId, + Varint(static_cast(mapping_index))}); + } + } + + absl::StrAppend(&profile_proto, + LengthDelimited{ProfileProto::kLocation, location}); + } + + std::string string_table_proto; + std::vector sorted_strings(string_table_.size()); + for (const auto& p : string_table_) { + sorted_strings[static_cast(p.second)] = p.first; + } + for (const auto& s : sorted_strings) { + absl::StrAppend(&string_table_proto, + LengthDelimited{ProfileProto::kStringTable, s}); + } + absl::StrAppend(&profile_proto, VarintField{ProfileProto::kDropFrames, + Varint(drop_frames_id_)}); + absl::StrAppend(&profile_proto, + VarintField{ProfileProto::kComment, Varint(comment_id_)}); + absl::StrAppend(&profile_proto, VarintField{ProfileProto::kDefaultSampleType, + Varint(default_sample_type_id_)}); + return absl::StrCat(string_table_proto, profile_proto); +} + +void ProfileBuilder::set_drop_frames_id(StringId drop_frames_id) { + drop_frames_id_ = drop_frames_id; +} + +void ProfileBuilder::set_comment_id(StringId comment_id) { + comment_id_ = comment_id; +} + +void ProfileBuilder::set_default_sample_type_id( + StringId default_sample_type_id) { + default_sample_type_id_ = default_sample_type_id; +} + +void ProfileBuilder::AddCurrentMappings() { +#ifdef __linux__ + dl_iterate_phdr( + +[](dl_phdr_info* info, size_t, void* data) { + auto& builder = *reinterpret_cast(data); + + // Skip dummy entry introduced since glibc 2.18. + if (info->dlpi_phdr == nullptr && info->dlpi_phnum == 0) { + return 0; + } + + const bool is_main_executable = builder.mappings_.empty(); + + // Evaluate all the loadable segments. + for (int i = 0; i < info->dlpi_phnum; ++i) { + if (info->dlpi_phdr[i].p_type != PT_LOAD) { + continue; + } + const ElfW(Phdr)* pt_load = &info->dlpi_phdr[i]; + + ABSL_RAW_CHECK(pt_load != nullptr, "Unexpected nullptr"); + + // Extract data. + const size_t memory_start = info->dlpi_addr + pt_load->p_vaddr; + const size_t memory_limit = memory_start + pt_load->p_memsz; + const size_t file_offset = pt_load->p_offset; + + // Storage for path to executable as dlpi_name isn't populated for the + // main executable. +1 to allow for the null terminator that readlink + // does not add. + char self_filename[PATH_MAX + 1]; + const char* filename = info->dlpi_name; + if (filename == nullptr || filename[0] == '\0') { + // This is either the main executable or the VDSO. The main + // executable is always the first entry processed by callbacks. + if (is_main_executable) { + // This is the main executable. + ssize_t ret = readlink("/proc/self/exe", self_filename, + sizeof(self_filename) - 1); + if (ret >= 0 && + static_cast(ret) < sizeof(self_filename)) { + self_filename[ret] = '\0'; + filename = self_filename; + } + } else { + // This is the VDSO. + filename = GetSoName(info); + } + } + + char resolved_path[PATH_MAX]; + absl::string_view resolved_filename; + if (realpath(filename, resolved_path)) { + resolved_filename = resolved_path; + } else { + resolved_filename = filename; + } + + const std::string build_id = GetBuildId(info); + + // Add to profile. + builder.AddMapping(memory_start, memory_limit, file_offset, + resolved_filename, build_id); + } + // Keep going. + return 0; + }, + this); +#endif // __linux__ +} + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/profiling/internal/profile_builder.h b/absl/profiling/internal/profile_builder.h new file mode 100644 index 00000000000..45075e6015d --- /dev/null +++ b/absl/profiling/internal/profile_builder.h @@ -0,0 +1,138 @@ +// Copyright 2025 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may +// obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_PROFILING_INTERNAL_PROFILE_BUILDER_H_ +#define ABSL_PROFILING_INTERNAL_PROFILE_BUILDER_H_ + +#include +#include +#include +#include + +#include "absl/container/btree_map.h" +#include "absl/container/flat_hash_map.h" +#include "absl/strings/string_view.h" +#include "absl/types/span.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace debugging_internal { + +// Field numbers for perftools.profiles.Profile. +// https://github.com/google/pprof/blob/master/proto/profile.proto +struct ProfileProto { + static constexpr int kSampleType = 1; + static constexpr int kSample = 2; + static constexpr int kMapping = 3; + static constexpr int kLocation = 4; + static constexpr int kStringTable = 6; + static constexpr int kDropFrames = 7; + static constexpr int kComment = 13; + static constexpr int kDefaultSampleType = 14; +}; + +struct ValueTypeProto { + static constexpr int kType = 1; + static constexpr int kUnit = 2; +}; + +struct SampleProto { + static constexpr int kLocationId = 1; + static constexpr int kValue = 2; + static constexpr int kLabel = 3; +}; + +struct LabelProto { + static constexpr int kKey = 1; + static constexpr int kStr = 2; + static constexpr int kNum = 3; + static constexpr int kNumUnit = 4; +}; + +struct MappingProto { + static constexpr int kId = 1; + static constexpr int kMemoryStart = 2; + static constexpr int kMemoryLimit = 3; + static constexpr int kFileOffset = 4; + static constexpr int kFilename = 5; + static constexpr int kBuildId = 6; +}; + +struct LocationProto { + static constexpr int kId = 1; + static constexpr int kMappingId = 2; + static constexpr int kAddress = 3; +}; + +enum class StringId : size_t {}; +enum class LocationId : size_t {}; +enum class MappingId : size_t {}; + +// A helper class to build a profile protocol buffer. +class ProfileBuilder { + public: + struct Mapping { + uint64_t start; + uint64_t limit; + uint64_t offset; + std::string filename; + std::string build_id; + }; + + StringId InternString(absl::string_view str); + + LocationId InternLocation(const void* address); + + void AddSample(int64_t value, absl::Span stack, + absl::Span> labels); + + void AddSampleType(StringId type, StringId unit); + + // Adds the current process mappings to the profile. + void AddCurrentMappings(); + + // Adds a single mapping to the profile and to lookup cache and returns the + // resulting ID. + MappingId AddMapping(uintptr_t memory_start, uintptr_t memory_limit, + uintptr_t file_offset, absl::string_view filename, + absl::string_view build_id); + + std::string Emit() &&; + + void set_drop_frames_id(StringId drop_frames_id); + void set_comment_id(StringId comment_id); + void set_default_sample_type_id(StringId default_sample_type_id); + + private: + absl::flat_hash_map string_table_{{"", StringId(0)}}; + absl::flat_hash_map location_table_; + // mapping_table_ stores the start address of each mapping in mapping_ + // to its index. + absl::btree_map mapping_table_; + std::vector mappings_; + + std::vector sample_types_; + std::vector samples_; + + StringId drop_frames_id_{}; + StringId comment_id_{}; + StringId default_sample_type_id_{}; +}; + +} // namespace debugging_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_PROFILING_INTERNAL_PROFILE_BUILDER_H_ diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h index 371f6c473ff..88a4b271712 100644 --- a/absl/profiling/internal/sample_recorder.h +++ b/absl/profiling/internal/sample_recorder.h @@ -75,7 +75,7 @@ class SampleRecorder { // Iterates over all the registered `StackInfo`s. Returning the number of // samples that have been dropped. - int64_t Iterate(const std::function& f); + size_t Iterate(const std::function& f); size_t GetMaxSamples() const; void SetMaxSamples(size_t max); @@ -130,7 +130,7 @@ SampleRecorder::SetDisposeCallback(DisposeCallback f) { template SampleRecorder::SampleRecorder() : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) { - absl::MutexLock l(&graveyard_.init_mu); + absl::MutexLock l(graveyard_.init_mu); graveyard_.dead = &graveyard_; } @@ -159,8 +159,8 @@ void SampleRecorder::PushDead(T* sample) { dispose(*sample); } - absl::MutexLock graveyard_lock(&graveyard_.init_mu); - absl::MutexLock sample_lock(&sample->init_mu); + absl::MutexLock graveyard_lock(graveyard_.init_mu); + absl::MutexLock sample_lock(sample->init_mu); sample->dead = graveyard_.dead; graveyard_.dead = sample; } @@ -168,7 +168,7 @@ void SampleRecorder::PushDead(T* sample) { template template T* SampleRecorder::PopDead(Targs... args) { - absl::MutexLock graveyard_lock(&graveyard_.init_mu); + absl::MutexLock graveyard_lock(graveyard_.init_mu); // The list is circular, so eventually it collapses down to // graveyard_.dead == &graveyard_ @@ -176,7 +176,7 @@ T* SampleRecorder::PopDead(Targs... args) { T* sample = graveyard_.dead; if (sample == &graveyard_) return nullptr; - absl::MutexLock sample_lock(&sample->init_mu); + absl::MutexLock sample_lock(sample->init_mu); graveyard_.dead = sample->dead; sample->dead = nullptr; sample->PrepareForSampling(std::forward(args)...); @@ -198,7 +198,7 @@ T* SampleRecorder::Register(Targs&&... args) { // Resurrection failed. Hire a new warlock. sample = new T(); { - absl::MutexLock sample_lock(&sample->init_mu); + absl::MutexLock sample_lock(sample->init_mu); // If flag initialization happens to occur (perhaps in another thread) // while in this block, it will lock `graveyard_` which is usually always // locked before any sample. This will appear as a lock inversion. @@ -222,11 +222,11 @@ void SampleRecorder::Unregister(T* sample) { } template -int64_t SampleRecorder::Iterate( +size_t SampleRecorder::Iterate( const std::function& f) { T* s = all_.load(std::memory_order_acquire); while (s != nullptr) { - absl::MutexLock l(&s->init_mu); + absl::MutexLock l(s->init_mu); if (s->dead == nullptr) { f(*s); } diff --git a/absl/random/BUILD.bazel b/absl/random/BUILD.bazel index 887ab0f2203..8986211d2f2 100644 --- a/absl/random/BUILD.bazel +++ b/absl/random/BUILD.bazel @@ -16,6 +16,9 @@ # ABSL random-number generation libraries. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel index 994fb5c9f38..1a3fef849bb 100644 --- a/absl/random/internal/BUILD.bazel +++ b/absl/random/internal/BUILD.bazel @@ -15,6 +15,9 @@ # load("@bazel_skylib//lib:selects.bzl", "selects") +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") # Internal-only implementation classes for Abseil Random load( @@ -799,6 +802,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "benchmark", + "no_test_ios_sim_arm64", "no_test_ios_x86_64", "no_test_loonix", # Crashing. "no_test_wasm", diff --git a/absl/random/internal/entropy_pool.cc b/absl/random/internal/entropy_pool.cc index fa47d0de6f5..13867005129 100644 --- a/absl/random/internal/entropy_pool.cc +++ b/absl/random/internal/entropy_pool.cc @@ -55,7 +55,7 @@ class alignas(std::max(size_t{ABSL_CACHELINE_SIZE}, size_t{32})) RandenTraits::kCapacityBytes / sizeof(uint32_t); void Init(absl::Span data) { - SpinLockHolder l(&mu_); // Always uncontested. + SpinLockHolder l(mu_); // Always uncontested. std::copy(data.begin(), data.end(), std::begin(state_)); next_ = kState; } @@ -84,7 +84,7 @@ class alignas(std::max(size_t{ABSL_CACHELINE_SIZE}, size_t{32})) }; void RandenPoolEntry::Fill(uint8_t* out, size_t bytes) { - SpinLockHolder l(&mu_); + SpinLockHolder l(mu_); while (bytes > 0) { MaybeRefill(); size_t remaining = available() * sizeof(state_[0]); diff --git a/absl/random/internal/entropy_pool_test.cc b/absl/random/internal/entropy_pool_test.cc index 89ea72fe21d..7739f19dfb6 100644 --- a/absl/random/internal/entropy_pool_test.cc +++ b/absl/random/internal/entropy_pool_test.cc @@ -44,7 +44,7 @@ TEST(EntropyPoolTest, DistinctSequencesPerThread) { threads.emplace_back([&]() { std::vector v(kValuesPerThread); GetEntropyFromRandenPool(v.data(), sizeof(result_type) * v.size()); - absl::MutexLock l(&mu); + absl::MutexLock l(mu); data.push_back(std::move(v)); }); } diff --git a/absl/random/internal/fastmath_test.cc b/absl/random/internal/fastmath_test.cc index 0d6f9dc1577..0b9ae5e6df1 100644 --- a/absl/random/internal/fastmath_test.cc +++ b/absl/random/internal/fastmath_test.cc @@ -16,12 +16,10 @@ #include "gtest/gtest.h" -#if defined(__native_client__) || defined(__EMSCRIPTEN__) -// NACL has a less accurate implementation of std::log2 than most of +#if defined(__EMSCRIPTEN__) +// Emscripten has a less accurate implementation of std::log2 than most of // the other platforms. For some values which should have integral results, -// sometimes NACL returns slightly larger values. -// -// The MUSL libc used by emscripten also has a similar bug. +// sometimes Emscripten returns slightly larger values. #define ABSL_RANDOM_INACCURATE_LOG2 #endif diff --git a/absl/random/internal/nonsecure_base_test.cc b/absl/random/internal/nonsecure_base_test.cc index 6b6f2d55cb8..6e3e712b0a7 100644 --- a/absl/random/internal/nonsecure_base_test.cc +++ b/absl/random/internal/nonsecure_base_test.cc @@ -214,7 +214,7 @@ TEST(NonsecureURBGBase, DistinctSequencesPerThread) { std::vector v(kValuesPerThread); std::generate(v.begin(), v.end(), [&]() { return gen(); }); - absl::MutexLock l(&mu); + absl::MutexLock l(mu); data.push_back(std::move(v)); }); } diff --git a/absl/random/internal/platform.h b/absl/random/internal/platform.h index bd2993e181c..a6fde4b2775 100644 --- a/absl/random/internal/platform.h +++ b/absl/random/internal/platform.h @@ -35,7 +35,6 @@ // Darwin (macOS and iOS) __APPLE__ // Akaros (http://akaros.org) __ros__ // Windows _WIN32 -// NaCL __native_client__ // AsmJS __asmjs__ // WebAssembly __wasm__ // Fuchsia __Fuchsia__ @@ -125,12 +124,6 @@ #endif -// NaCl does not allow AES. -#if defined(__native_client__) -#undef ABSL_HAVE_ACCELERATED_AES -#define ABSL_HAVE_ACCELERATED_AES 0 -#endif - // ABSL_RANDOM_INTERNAL_AES_DISPATCH indicates whether the currently active // platform has, or should use run-time dispatch for selecting the // accelerated Randen implementation. @@ -162,10 +155,4 @@ #define ABSL_RANDOM_INTERNAL_AES_DISPATCH 1 #endif -// NaCl does not allow dispatch. -#if defined(__native_client__) -#undef ABSL_RANDOM_INTERNAL_AES_DISPATCH -#define ABSL_RANDOM_INTERNAL_AES_DISPATCH 0 -#endif - #endif // ABSL_RANDOM_INTERNAL_PLATFORM_H_ diff --git a/absl/random/internal/randen_engine_test.cc b/absl/random/internal/randen_engine_test.cc index a94f4916052..122d90b8501 100644 --- a/absl/random/internal/randen_engine_test.cc +++ b/absl/random/internal/randen_engine_test.cc @@ -632,7 +632,6 @@ TEST(RandenTest, IsFastOrSlow) { // // linux, optimized ~5ns // ppc, optimized ~7ns - // nacl (slow), ~1100ns // // `kCount` is chosen below so that, in debug builds and without hardware // acceleration, the test (assuming ~1us per call) should finish in ~0.1s diff --git a/absl/random/internal/seed_material.cc b/absl/random/internal/seed_material.cc index 8099ec73602..b6380c8374e 100644 --- a/absl/random/internal/seed_material.cc +++ b/absl/random/internal/seed_material.cc @@ -41,12 +41,7 @@ #include "absl/types/optional.h" #include "absl/types/span.h" -#if defined(__native_client__) - -#include -#define ABSL_RANDOM_USE_NACL_SECURE_RANDOM 1 - -#elif defined(_WIN32) +#if defined(_WIN32) #include #define ABSL_RANDOM_USE_BCRYPT 1 @@ -109,27 +104,6 @@ bool ReadSeedMaterialFromOSEntropyImpl(absl::Span values) { return BCRYPT_SUCCESS(ret); } -#elif defined(ABSL_RANDOM_USE_NACL_SECURE_RANDOM) - -// On NaCL use nacl_secure_random to acquire bytes. -bool ReadSeedMaterialFromOSEntropyImpl(absl::Span values) { - auto buffer = reinterpret_cast(values.data()); - size_t buffer_size = sizeof(uint32_t) * values.size(); - - uint8_t* output_ptr = buffer; - while (buffer_size > 0) { - size_t nread = 0; - const int error = nacl_secure_random(output_ptr, buffer_size, &nread); - if (error != 0 || nread > buffer_size) { - ABSL_RAW_LOG(ERROR, "Failed to read secure_random seed data: %d", error); - return false; - } - output_ptr += nread; - buffer_size -= nread; - } - return true; -} - #elif defined(__Fuchsia__) bool ReadSeedMaterialFromOSEntropyImpl(absl::Span values) { diff --git a/absl/random/mock_distributions_test.cc b/absl/random/mock_distributions_test.cc index 622aff7f92f..93af3f92ecf 100644 --- a/absl/random/mock_distributions_test.cc +++ b/absl/random/mock_distributions_test.cc @@ -69,10 +69,11 @@ TEST(MockDistributions, Examples) { .WillOnce(Return(0.001)); EXPECT_EQ(absl::Gaussian(gen, 0.0, 1.0), 0.001); - EXPECT_NE(absl::LogUniform(gen, 0, 1000000, 2), 2040); - EXPECT_CALL(absl::MockLogUniform(), Call(gen, 0, 1000000, 2)) - .WillOnce(Return(2040)); - EXPECT_EQ(absl::LogUniform(gen, 0, 1000000, 2), 2040); + const int kHigh = (1 << 30) - 1; + EXPECT_NE(absl::LogUniform(gen, 0, kHigh, 2), kHigh); + EXPECT_CALL(absl::MockLogUniform(), Call(gen, 0, kHigh, 2)) + .WillOnce(Return(kHigh)); + EXPECT_EQ(absl::LogUniform(gen, 0, kHigh, 2), kHigh); } TEST(MockDistributions, UniformUInt128BoundariesAreAllowed) { diff --git a/absl/status/BUILD.bazel b/absl/status/BUILD.bazel index b61abeb43cf..7537797a4ca 100644 --- a/absl/status/BUILD.bazel +++ b/absl/status/BUILD.bazel @@ -17,6 +17,9 @@ # It will expand later to have utilities around `Status` like `StatusOr`, # `StatusBuilder` and macros. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/status/internal/statusor_internal.h b/absl/status/internal/statusor_internal.h index ca7c5502c2e..b6641041eac 100644 --- a/absl/status/internal/statusor_internal.h +++ b/absl/status/internal/statusor_internal.h @@ -39,12 +39,23 @@ template struct HasConversionOperatorToStatusOr : std::false_type {}; template -void test(char (*)[sizeof(std::declval().operator absl::StatusOr())]); +void test(char (*absl_nullable)[sizeof( + std::declval().operator absl::StatusOr())]); template struct HasConversionOperatorToStatusOr(0))> : std::true_type {}; +// Detects whether `T` is equality-comparable. +template +struct IsEqualityComparable : std::false_type {}; + +template +struct IsEqualityComparable< + T, std::enable_if_t() == std::declval()), + bool>::value>> : std::true_type {}; + // Detects whether `T` is constructible or convertible from `StatusOr`. template using IsConstructibleOrConvertibleFromStatusOr = @@ -79,17 +90,34 @@ template struct IsDirectInitializationAmbiguous> : public IsConstructibleOrConvertibleFromStatusOr {}; +// Checks whether the conversion from U to T can be done without dangling +// temporaries. +// REQUIRES: T and U are references. +template +using IsReferenceConversionValid = absl::conjunction< // + std::is_reference, std::is_reference, + // The references are convertible. This checks for + // lvalue/rvalue compatibility. + std::is_convertible, + // The pointers are convertible. This checks we don't have + // a temporary. + std::is_convertible*, + std::remove_reference_t*>>; + // Checks against the constraints of the direction initialization, i.e. when // `StatusOr::StatusOr(U&&)` should participate in overload resolution. template using IsDirectInitializationValid = absl::disjunction< // Short circuits if T is basically U. - std::is_same>, - absl::negation, absl::remove_cvref_t>, - std::is_same>, - std::is_same>, - IsDirectInitializationAmbiguous>>>; + std::is_same>, // + std::conditional_t< + std::is_reference_v, // + IsReferenceConversionValid, + absl::negation, absl::remove_cvref_t>, + std::is_same>, + std::is_same>, + IsDirectInitializationAmbiguous>>>>; // This trait detects whether `StatusOr::operator=(U&&)` is ambiguous, which // is equivalent to whether all the following conditions are met: @@ -129,7 +157,9 @@ using Equality = std::conditional_t>; template using IsConstructionValid = absl::conjunction< Equality>, + absl::disjunction< + std::is_reference, + type_traits_internal::IsLifetimeBoundAssignment>>, IsDirectInitializationValid, std::is_constructible, Equality>, absl::disjunction< @@ -145,8 +175,13 @@ using IsConstructionValid = absl::conjunction< template using IsAssignmentValid = absl::conjunction< Equality>, - std::is_constructible, std::is_assignable, + absl::disjunction< + std::is_reference, + type_traits_internal::IsLifetimeBoundAssignment>>, + std::conditional_t, + IsReferenceConversionValid, + absl::conjunction, + std::is_assignable>>, absl::disjunction< std::is_same>, absl::conjunction< @@ -167,6 +202,9 @@ template using IsConstructionFromStatusOrValid = absl::conjunction< absl::negation>, + // If `T` is a reference, then U must be a compatible one. + absl::disjunction>, + IsReferenceConversionValid>, Equality>, std::is_constructible, @@ -182,6 +220,16 @@ using IsStatusOrAssignmentValid = absl::conjunction< absl::negation>>>; +template +using IsValueOrValid = absl::conjunction< + // If `T` is a reference, then U must be a compatible one. + absl::disjunction>, + IsReferenceConversionValid>, + Equality, + type_traits_internal::IsLifetimeBoundAssignment>>>; + class Helper { public: // Move type-agnostic error handling to the .cc. @@ -198,6 +246,26 @@ void PlacementNew(void* absl_nonnull p, Args&&... args) { new (p) T(std::forward(args)...); } +template +class Reference { + public: + constexpr explicit Reference(T ref ABSL_ATTRIBUTE_LIFETIME_BOUND) + : payload_(std::addressof(ref)) {} + + Reference(const Reference&) = default; + Reference& operator=(const Reference&) = default; + Reference& operator=(T value) { + payload_ = std::addressof(value); + return *this; + } + + operator T() const { return static_cast(*payload_); } // NOLINT + T get() const { return *this; } + + private: + std::remove_reference_t* absl_nonnull payload_; +}; + // Helper base class to hold the data and all operations. // We move all this to a base class to allow mixing with the appropriate // TraitsBase specialization. @@ -206,6 +274,14 @@ class StatusOrData { template friend class StatusOrData; + decltype(auto) MaybeMoveData() { + if constexpr (std::is_reference_v) { + return data_.get(); + } else { + return std::move(data_); + } + } + public: StatusOrData() = delete; @@ -220,7 +296,7 @@ class StatusOrData { StatusOrData(StatusOrData&& other) noexcept { if (other.ok()) { - MakeValue(std::move(other.data_)); + MakeValue(other.MaybeMoveData()); MakeStatus(); } else { MakeStatus(std::move(other.status_)); @@ -240,7 +316,7 @@ class StatusOrData { template explicit StatusOrData(StatusOrData&& other) { if (other.ok()) { - MakeValue(std::move(other.data_)); + MakeValue(other.MaybeMoveData()); MakeStatus(); } else { MakeStatus(std::move(other.status_)); @@ -253,13 +329,6 @@ class StatusOrData { MakeStatus(); } - explicit StatusOrData(const T& value) : data_(value) { - MakeStatus(); - } - explicit StatusOrData(T&& value) : data_(std::move(value)) { - MakeStatus(); - } - template ::value, int> = 0> @@ -279,7 +348,7 @@ class StatusOrData { StatusOrData& operator=(StatusOrData&& other) { if (this == &other) return *this; if (other.ok()) - Assign(std::move(other.data_)); + Assign(other.MaybeMoveData()); else AssignStatus(std::move(other.status_)); return *this; @@ -288,7 +357,9 @@ class StatusOrData { ~StatusOrData() { if (ok()) { status_.~Status(); - data_.~T(); + if constexpr (!std::is_trivially_destructible_v) { + data_.~T(); + } } else { status_.~Status(); } @@ -329,11 +400,13 @@ class StatusOrData { // When T is const, we need some non-const object we can cast to void* for // the placement new. dummy_ is that object. Dummy dummy_; - T data_; + std::conditional_t, Reference, T> data_; }; void Clear() { - if (ok()) data_.~T(); + if constexpr (!std::is_trivially_destructible_v) { + if (ok()) data_.~T(); + } } void EnsureOk() const { @@ -348,7 +421,8 @@ class StatusOrData { // argument. template void MakeValue(Arg&&... arg) { - internal_statusor::PlacementNew(&dummy_, std::forward(arg)...); + internal_statusor::PlacementNew(&dummy_, + std::forward(arg)...); } // Construct the status (ie. status_) through placement new with the passed @@ -358,6 +432,94 @@ class StatusOrData { internal_statusor::PlacementNew(&status_, std::forward(args)...); } + + template + T ValueOrImpl(U&& default_value) const& { + if (ok()) { + return data_; + } + return std::forward(default_value); + } + + template + T ValueOrImpl(U&& default_value) && { + if (ok()) { + return std::move(data_); + } + return std::forward(default_value); + } +}; + +[[noreturn]] void ThrowBadStatusOrAccess(absl::Status status); + +template +struct OperatorBase { + auto& self() const { return static_cast&>(*this); } + auto& self() { return static_cast&>(*this); } + + const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND { + self().EnsureOk(); + return self().data_; + } + T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND { + self().EnsureOk(); + return self().data_; + } + const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { + self().EnsureOk(); + return std::move(self().data_); + } + T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND { + self().EnsureOk(); + return std::move(self().data_); + } + + const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!self().ok()) internal_statusor::ThrowBadStatusOrAccess(self().status_); + return self().data_; + } + T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!self().ok()) internal_statusor::ThrowBadStatusOrAccess(self().status_); + return self().data_; + } + const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!self().ok()) { + internal_statusor::ThrowBadStatusOrAccess(std::move(self().status_)); + } + return std::move(self().data_); + } + T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND { + if (!self().ok()) { + internal_statusor::ThrowBadStatusOrAccess(std::move(self().status_)); + } + return std::move(self().data_); + } + + const T* absl_nonnull operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return std::addressof(**this); + } + T* absl_nonnull operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND { + return std::addressof(**this); + } +}; + +template +struct OperatorBase { + auto& self() const { return static_cast&>(*this); } + + T& operator*() const { + self().EnsureOk(); + return self().data_; + } + + T& value() const { + if (!self().ok()) internal_statusor::ThrowBadStatusOrAccess(self().status_); + return self().data_; + } + + T* absl_nonnull operator->() const { + return std::addressof(**this); + } }; // Helper base classes to allow implicitly deleted constructors and assignment @@ -400,8 +562,9 @@ struct MoveCtorBase { MoveCtorBase& operator=(MoveCtorBase&&) = default; }; -template ::value&& - std::is_copy_assignable::value> +template ::value && + std::is_copy_assignable::value) || + std::is_reference_v> struct CopyAssignBase { CopyAssignBase() = default; CopyAssignBase(const CopyAssignBase&) = default; @@ -419,8 +582,9 @@ struct CopyAssignBase { CopyAssignBase& operator=(CopyAssignBase&&) = default; }; -template ::value&& - std::is_move_assignable::value> +template ::value && + std::is_move_assignable::value) || + std::is_reference_v> struct MoveAssignBase { MoveAssignBase() = default; MoveAssignBase(const MoveAssignBase&) = default; @@ -438,8 +602,6 @@ struct MoveAssignBase { MoveAssignBase& operator=(MoveAssignBase&&) = delete; }; -[[noreturn]] void ThrowBadStatusOrAccess(absl::Status status); - // Used to introduce jitter into the output of printing functions for // `StatusOr` (i.e. `AbslStringify` and `operator<<`). class StringifyRandom { diff --git a/absl/status/status_matchers_test.cc b/absl/status/status_matchers_test.cc index b8ccaa4cb76..51a5f27670c 100644 --- a/absl/status/status_matchers_test.cc +++ b/absl/status/status_matchers_test.cc @@ -18,6 +18,7 @@ #include "absl/status/status_matchers.h" #include +#include #include "gmock/gmock.h" #include "gtest/gtest-spi.h" @@ -31,9 +32,12 @@ namespace { using ::absl_testing::IsOk; using ::absl_testing::IsOkAndHolds; using ::absl_testing::StatusIs; +using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Gt; using ::testing::MatchesRegex; +using ::testing::Not; +using ::testing::Ref; TEST(StatusMatcherTest, StatusIsOk) { EXPECT_THAT(absl::OkStatus(), IsOk()); } @@ -158,4 +162,23 @@ TEST(StatusMatcherTest, StatusIsFailure) { "ungueltig"); } +TEST(StatusMatcherTest, ReferencesWork) { + int i = 17; + int j = 19; + EXPECT_THAT(absl::StatusOr(i), IsOkAndHolds(17)); + EXPECT_THAT(absl::StatusOr(i), Not(IsOkAndHolds(19))); + EXPECT_THAT(absl::StatusOr(i), IsOkAndHolds(17)); + + // Reference testing works as expected. + EXPECT_THAT(absl::StatusOr(i), IsOkAndHolds(Ref(i))); + EXPECT_THAT(absl::StatusOr(i), Not(IsOkAndHolds(Ref(j)))); + + // Try a more complex one. + std::vector vec = {"A", "B", "C"}; + EXPECT_THAT(absl::StatusOr&>(vec), + IsOkAndHolds(ElementsAre("A", "B", "C"))); + EXPECT_THAT(absl::StatusOr&>(vec), + Not(IsOkAndHolds(ElementsAre("A", "X", "C")))); +} + } // namespace diff --git a/absl/status/statusor.h b/absl/status/statusor.h index 5257af0eb24..56309af32e0 100644 --- a/absl/status/statusor.h +++ b/absl/status/statusor.h @@ -189,14 +189,22 @@ class ABSL_MUST_USE_RESULT StatusOr; // return Foo(arg); // } template -class StatusOr : private internal_statusor::StatusOrData, +class StatusOr : private internal_statusor::OperatorBase, + private internal_statusor::StatusOrData, private internal_statusor::CopyCtorBase, private internal_statusor::MoveCtorBase, private internal_statusor::CopyAssignBase, private internal_statusor::MoveAssignBase { +#ifndef SWIG + static_assert(!std::is_rvalue_reference_v, + "rvalue references are not yet supported."); +#endif // SWIG + template friend class StatusOr; + friend internal_statusor::OperatorBase; + typedef internal_statusor::StatusOrData Base; public: @@ -397,7 +405,7 @@ class StatusOr : private internal_statusor::StatusOrData, typename std::enable_if< internal_statusor::IsAssignmentValid::value, int>::type = 0> - StatusOr& operator=(U&& v ABSL_ATTRIBUTE_LIFETIME_BOUND) { + StatusOr& operator=(U&& v ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) { this->Assign(std::forward(v)); return *this; } @@ -493,10 +501,7 @@ class StatusOr : private internal_statusor::StatusOrData, // // The `std::move` on statusor instead of on the whole expression enables // warnings about possible uses of the statusor object after the move. - const T& value() const& ABSL_ATTRIBUTE_LIFETIME_BOUND; - T& value() & ABSL_ATTRIBUTE_LIFETIME_BOUND; - const T&& value() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND; - T&& value() && ABSL_ATTRIBUTE_LIFETIME_BOUND; + using StatusOr::OperatorBase::value; // StatusOr:: operator*() // @@ -508,10 +513,7 @@ class StatusOr : private internal_statusor::StatusOrData, // `absl::StatusOr`. Alternatively, see the `value()` member function for a // similar API that guarantees crashing or throwing an exception if there is // no current value. - const T& operator*() const& ABSL_ATTRIBUTE_LIFETIME_BOUND; - T& operator*() & ABSL_ATTRIBUTE_LIFETIME_BOUND; - const T&& operator*() const&& ABSL_ATTRIBUTE_LIFETIME_BOUND; - T&& operator*() && ABSL_ATTRIBUTE_LIFETIME_BOUND; + using StatusOr::OperatorBase::operator*; // StatusOr::operator->() // @@ -520,8 +522,7 @@ class StatusOr : private internal_statusor::StatusOrData, // REQUIRES: `this->ok() == true`, otherwise the behavior is undefined. // // Use `this->ok()` to verify that there is a current value. - const T* operator->() const ABSL_ATTRIBUTE_LIFETIME_BOUND; - T* operator->() ABSL_ATTRIBUTE_LIFETIME_BOUND; + using StatusOr::OperatorBase::operator->; // StatusOr::value_or() // @@ -536,10 +537,34 @@ class StatusOr : private internal_statusor::StatusOrData, // // Unlike with `value`, calling `std::move()` on the result of `value_or` will // still trigger a copy. - template - T value_or(U&& default_value) const&; - template - T value_or(U&& default_value) &&; + template < + typename U, + std::enable_if_t::value, + int> = 0> + T value_or(U&& default_value) const& { + return this->ValueOrImpl(std::forward(default_value)); + } + template < + typename U, + std::enable_if_t::value, + int> = 0> + T value_or(U&& default_value) && { + return std::move(*this).ValueOrImpl(std::forward(default_value)); + } + template < + typename U, + std::enable_if_t::value, + int> = 0> + T value_or(U&& default_value ABSL_ATTRIBUTE_LIFETIME_BOUND) const& { + return this->ValueOrImpl(std::forward(default_value)); + } + template < + typename U, + std::enable_if_t::value, + int> = 0> + T value_or(U&& default_value ABSL_ATTRIBUTE_LIFETIME_BOUND) && { + return std::move(*this).ValueOrImpl(std::forward(default_value)); + } // StatusOr::IgnoreError() // @@ -607,7 +632,9 @@ class StatusOr : private internal_statusor::StatusOrData, // operator==() // // This operator checks the equality of two `absl::StatusOr` objects. -template +template ::value, + int> = 0> bool operator==(const StatusOr& lhs, const StatusOr& rhs) { if (lhs.ok() && rhs.ok()) return *lhs == *rhs; return lhs.status() == rhs.status(); @@ -616,7 +643,9 @@ bool operator==(const StatusOr& lhs, const StatusOr& rhs) { // operator!=() // // This operator checks the inequality of two `absl::StatusOr` objects. -template +template ::value, + int> = 0> bool operator!=(const StatusOr& lhs, const StatusOr& rhs) { return !(lhs == rhs); } @@ -703,88 +732,6 @@ Status StatusOr::status() && { return ok() ? OkStatus() : std::move(this->status_); } -template -const T& StatusOr::value() const& { - if (!this->ok()) internal_statusor::ThrowBadStatusOrAccess(this->status_); - return this->data_; -} - -template -T& StatusOr::value() & { - if (!this->ok()) internal_statusor::ThrowBadStatusOrAccess(this->status_); - return this->data_; -} - -template -const T&& StatusOr::value() const&& { - if (!this->ok()) { - internal_statusor::ThrowBadStatusOrAccess(std::move(this->status_)); - } - return std::move(this->data_); -} - -template -T&& StatusOr::value() && { - if (!this->ok()) { - internal_statusor::ThrowBadStatusOrAccess(std::move(this->status_)); - } - return std::move(this->data_); -} - -template -const T& StatusOr::operator*() const& { - this->EnsureOk(); - return this->data_; -} - -template -T& StatusOr::operator*() & { - this->EnsureOk(); - return this->data_; -} - -template -const T&& StatusOr::operator*() const&& { - this->EnsureOk(); - return std::move(this->data_); -} - -template -T&& StatusOr::operator*() && { - this->EnsureOk(); - return std::move(this->data_); -} - -template -const T* absl_nonnull StatusOr::operator->() const { - this->EnsureOk(); - return &this->data_; -} - -template -T* absl_nonnull StatusOr::operator->() { - this->EnsureOk(); - return &this->data_; -} - -template -template -T StatusOr::value_or(U&& default_value) const& { - if (ok()) { - return this->data_; - } - return std::forward(default_value); -} - -template -template -T StatusOr::value_or(U&& default_value) && { - if (ok()) { - return std::move(this->data_); - } - return std::forward(default_value); -} - template void StatusOr::IgnoreError() const { // no-op diff --git a/absl/status/statusor_test.cc b/absl/status/statusor_test.cc index 17a33842cc9..26d4235808c 100644 --- a/absl/status/statusor_test.cc +++ b/absl/status/statusor_test.cc @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -1799,4 +1800,325 @@ TEST(StatusOr, ErrorPrinting) { EXPECT_THAT(absl::StrCat(print_me), error_matcher); } +TEST(StatusOr, SupportsReferenceTypes) { + int i = 1; + absl::StatusOr s = i; + EXPECT_EQ(&i, &*s); + *s = 10; + EXPECT_EQ(i, 10); +} + +TEST(StatusOr, ReferenceFromStatus) { + int i = 10; + absl::StatusOr s = i; + s = absl::InternalError("foo"); + EXPECT_EQ(s.status().message(), "foo"); + + absl::StatusOr s2 = absl::InternalError("foo2"); + EXPECT_EQ(s2.status().message(), "foo2"); +} + +TEST(StatusOr, SupportReferenceValueConstructor) { + int i = 1; + absl::StatusOr s = i; + absl::StatusOr cs = i; + absl::StatusOr cs2 = std::move(i); // `T&&` to `const T&` is ok. + + EXPECT_EQ(&i, &*s); + EXPECT_EQ(&i, &*cs); + + Derived d; + absl::StatusOr b = d; + EXPECT_EQ(&d, &*b); + + // We disallow constructions that cause temporaries. + EXPECT_FALSE((std::is_constructible_v, double>)); + EXPECT_FALSE( + (std::is_constructible_v, const double&>)); + EXPECT_FALSE( + (std::is_constructible_v, + std::string>)); + + // We disallow constructions with wrong reference. + EXPECT_FALSE((std::is_constructible_v, int&&>)); + EXPECT_FALSE((std::is_constructible_v, const int&>)); +} + +TEST(StatusOr, SupportReferenceConvertingConstructor) { + int i = 1; + absl::StatusOr s = i; + absl::StatusOr cs = s; + + EXPECT_EQ(&i, &*s); + EXPECT_EQ(&i, &*cs); + + // The other direction is not allowed. + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); + + Derived d; + absl::StatusOr b = absl::StatusOr(d); + EXPECT_EQ(&d, &*b); + + // The other direction is not allowed. + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); + + // We disallow conversions that cause temporaries. + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); + EXPECT_FALSE( + (std::is_constructible_v, + absl::StatusOr>)); + + // We disallow constructions with wrong reference. + EXPECT_FALSE((std::is_constructible_v, + absl::StatusOr>)); +} + +TEST(StatusOr, SupportReferenceValueAssignment) { + int i = 1; + absl::StatusOr s = i; + absl::StatusOr cs; + cs = i; + absl::StatusOr cs2; + cs2 = std::move(i); // `T&&` to `const T&` is ok. + + EXPECT_EQ(&i, &*s); + EXPECT_EQ(&i, &*cs); + + Derived d; + absl::StatusOr b; + b = d; + EXPECT_EQ(&d, &*b); + + // We disallow constructions that cause temporaries. + EXPECT_FALSE((std::is_assignable_v, double>)); + EXPECT_FALSE( + (std::is_assignable_v, const double&>)); + EXPECT_FALSE((std::is_assignable_v, + std::string>)); + + // We disallow constructions with wrong reference. + EXPECT_FALSE((std::is_assignable_v, int&&>)); + EXPECT_FALSE((std::is_assignable_v, const int&>)); +} + +TEST(StatusOr, SupportReferenceConvertingAssignment) { + int i = 1; + absl::StatusOr s; + s = i; + absl::StatusOr cs; + cs = s; + + EXPECT_EQ(&i, &*s); + EXPECT_EQ(&i, &*cs); + + // The other direction is not allowed. + EXPECT_FALSE( + (std::is_assignable_v, absl::StatusOr>)); + + Derived d; + absl::StatusOr b; + b = absl::StatusOr(d); + EXPECT_EQ(&d, &*b); + + // The other direction is not allowed. + EXPECT_FALSE((std::is_assignable_v, + absl::StatusOr>)); + + // We disallow conversions that cause temporaries. + EXPECT_FALSE((std::is_assignable_v, + absl::StatusOr>)); + EXPECT_FALSE((std::is_assignable_v, + absl::StatusOr>)); + EXPECT_FALSE((std::is_assignable_v, + absl::StatusOr>)); + + // We disallow constructions with wrong reference. + EXPECT_FALSE( + (std::is_assignable_v, absl::StatusOr>)); +} + +TEST(StatusOr, SupportReferenceToNonReferenceConversions) { + int i = 17; + absl::StatusOr si = i; + absl::StatusOr sf = si; + EXPECT_THAT(sf, IsOkAndHolds(17.)); + + i = 20; + sf = si; + EXPECT_THAT(sf, IsOkAndHolds(20.)); + + EXPECT_THAT(absl::StatusOr(absl::StatusOr(i)), + IsOkAndHolds(20)); + EXPECT_THAT(absl::StatusOr(absl::StatusOr(i)), + IsOkAndHolds(20)); + + std::string str = "str"; + absl::StatusOr sos = absl::StatusOr(str); + EXPECT_THAT(sos, IsOkAndHolds("str")); + str = "str2"; + EXPECT_THAT(sos, IsOkAndHolds("str")); + sos = absl::StatusOr(str); + EXPECT_THAT(sos, IsOkAndHolds("str2")); + + absl::StatusOr sosv = absl::StatusOr(str); + EXPECT_THAT(sosv, IsOkAndHolds("str2")); + str = "str3"; + sosv = absl::StatusOr(str); + EXPECT_THAT(sosv, IsOkAndHolds("str3")); + + absl::string_view view = "view"; + // This way it is constructible, but not convertible because + // string_view->string is explicit + EXPECT_THAT( + absl::StatusOr(absl::StatusOr(view)), + IsOkAndHolds("view")); +#if defined(ABSL_USES_STD_STRING_VIEW) + // The assignment doesn't work with normal absl::string_view because + // std::string doesn't know about it. + sos = absl::StatusOr(view); + EXPECT_THAT(sos, IsOkAndHolds("view")); +#endif + + EXPECT_FALSE((std::is_convertible_v, + absl::StatusOr>)); +} + +TEST(StatusOr, ReferenceOperatorStarAndArrow) { + std::string str = "Foo"; + absl::StatusOr s = str; + s->assign("Bar"); + EXPECT_EQ(str, "Bar"); + + *s = "Baz"; + EXPECT_EQ(str, "Baz"); + + const absl::StatusOr cs = str; + // Even if the StatusOr is const, the reference it gives is non-const so we + // can still assign. + *cs = "Finally"; + EXPECT_EQ(str, "Finally"); + + cs->clear(); + EXPECT_EQ(cs.value(), str); + EXPECT_EQ(str, ""); +} + +TEST(StatusOr, ReferenceValueOr) { + int i = 17; + absl::StatusOr si = i; + + int other = 20; + EXPECT_EQ(&i, &si.value_or(other)); + + si = absl::UnknownError(""); + EXPECT_EQ(&other, &si.value_or(other)); + + absl::StatusOr csi = i; + EXPECT_EQ(&i, &csi.value_or(1)); + + const auto value_or_call = [](auto&& sor, auto&& v) + -> decltype(std::forward(sor).value_or( + std::forward(v))) {}; + using Probe = decltype(value_or_call); + // Just to verify that Probe works as expected in the good cases. + EXPECT_TRUE((std::is_invocable_v, int&&>)); + // Causes temporary conversion. + EXPECT_FALSE( + (std::is_invocable_v, double&&>)); + // Const invalid. + EXPECT_FALSE((std::is_invocable_v, const int&>)); +} + +TEST(StatusOr, ReferenceAssignmentFromStatusOr) { + std::vector v = {1, 2, 3}; + absl::StatusOr si = v[0]; + absl::StatusOr si2 = v[1]; + + EXPECT_THAT(v, ElementsAre(1, 2, 3)); + EXPECT_THAT(si, IsOkAndHolds(1)); + EXPECT_THAT(si2, IsOkAndHolds(2)); + + // This rebinds the reference. + si = si2; + EXPECT_THAT(v, ElementsAre(1, 2, 3)); + EXPECT_THAT(si, IsOkAndHolds(2)); + EXPECT_THAT(si2, IsOkAndHolds(2)); + EXPECT_EQ(&*si, &*si2); +} + +TEST(StatusOr, ReferenceAssignFromReference) { + std::vector v = {1, 2, 3}; + absl::StatusOr si = v[0]; + + EXPECT_THAT(v, ElementsAre(1, 2, 3)); + EXPECT_THAT(si, IsOkAndHolds(1)); + + // This rebinds the reference. + si = v[2]; + EXPECT_THAT(v, ElementsAre(1, 2, 3)); + EXPECT_THAT(si, IsOkAndHolds(3)); + EXPECT_EQ(&*si, &v[2]); +} + +TEST(StatusOr, ReferenceIsNotLifetimeBoundForStarValue) { + int i = 0; + + // op*/value should not be LIFETIME_BOUND because the ref is not limited to + // the lifetime of the StatusOr. + int& r = *absl::StatusOr(i); + EXPECT_EQ(&r, &i); + int& r2 = absl::StatusOr(i).value(); + EXPECT_EQ(&r2, &i); + + struct S { + int i; + }; + S s; + // op-> should also not be LIFETIME_BOUND for refs. + int& r3 = absl::StatusOr(s)->i; + EXPECT_EQ(&r3, &s.i); +} + +template +void TestReferenceDeref() { + static_assert(std::is_same_v())>); + static_assert(std::is_same_v().value())>); +} + +TEST(StatusOr, ReferenceTypeIsMaintainedOnDeref) { + TestReferenceDeref&>(); + TestReferenceDeref&&>(); + TestReferenceDeref&>(); + TestReferenceDeref&&>(); + + TestReferenceDeref&>(); + TestReferenceDeref&&>(); + TestReferenceDeref&>(); + TestReferenceDeref&&>(); + + struct Struct { + int value; + }; + EXPECT_TRUE( + (std::is_same_v< + int&, decltype((std::declval>()->value))>)); + EXPECT_TRUE( + (std::is_same_v< + int&, + decltype((std::declval>()->value))>)); + EXPECT_TRUE( + (std::is_same_v< + const int&, + decltype((std::declval>()->value))>)); +} + } // namespace diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel index 49562f72c61..2e73f80fec0 100644 --- a/absl/strings/BUILD.bazel +++ b/absl/strings/BUILD.bazel @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -329,6 +332,7 @@ cc_test( visibility = ["//visibility:private"], deps = [ ":internal", + ":string_view", "//absl/base:core_headers", "@googletest//:gtest", "@googletest//:gtest_main", @@ -1316,6 +1320,7 @@ cc_library( linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:private"], deps = [ + ":internal", ":strings", "//absl/base:config", "//absl/base:core_headers", @@ -1460,6 +1465,7 @@ cc_library( testonly = True, srcs = ["internal/pow10_helper.cc"], hdrs = ["internal/pow10_helper.h"], + copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:private"], deps = ["//absl/base:config"], diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt index ee738605520..32ad263c1ef 100644 --- a/absl/strings/CMakeLists.txt +++ b/absl/strings/CMakeLists.txt @@ -243,6 +243,7 @@ absl_cc_test( COPTS ${ABSL_TEST_COPTS} DEPS + absl::string_view absl::strings_internal absl::base absl::core_headers @@ -518,6 +519,7 @@ absl_cc_library( absl::utility absl::int128 absl::span + absl::strings_internal ) absl_cc_test( @@ -661,7 +663,7 @@ absl_cc_library( SRCS "internal/pow10_helper.cc" COPTS - ${ABSL_TEST_COPTS} + ${ABSL_DEFAULT_COPTS} DEPS absl::config TESTONLY diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc index e53f914dbbd..a7e09949894 100644 --- a/absl/strings/cord.cc +++ b/absl/strings/cord.cc @@ -161,8 +161,10 @@ static CordRep* absl_nonnull CordRepFromString(std::string&& src) { // -------------------------------------------------------------------- // Cord::InlineRep functions -inline void Cord::InlineRep::set_data(const char* absl_nonnull data, size_t n) { +inline void Cord::InlineRep::set_data(const char* absl_nullable data, + size_t n) { static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15"); + assert(data != nullptr || n == 0); data_.set_inline_data(data, n); } diff --git a/absl/strings/cord.h b/absl/strings/cord.h index 7afa419a685..fa6eb8ad21e 100644 --- a/absl/strings/cord.h +++ b/absl/strings/cord.h @@ -755,7 +755,7 @@ class Cord { // NOTE: This routine is reasonably efficient. It is roughly // logarithmic based on the number of chunks that make up the cord. Still, // if you need to iterate over the contents of a cord, you should - // use a CharIterator/ChunkIterator rather than call operator[] or Get() + // use a CharIterator/ChunkIterator rather than call operator[] // repeatedly in a loop. char operator[](size_t i) const; @@ -921,7 +921,7 @@ class Cord { // Returns nullptr if holding pointer const char* absl_nullable data() const; // Discards pointer, if any - void set_data(const char* absl_nonnull data, size_t n); + void set_data(const char* absl_nullable data, size_t n); char* absl_nonnull set_data(size_t n); // Write data to the result // Returns nullptr if holding bytes absl::cord_internal::CordRep* absl_nullable tree() const; @@ -1098,8 +1098,7 @@ class Cord { hash_state = combiner.add_buffer(std::move(hash_state), chunk.data(), chunk.size()); }); - return H::combine(combiner.finalize(std::move(hash_state)), - hash_internal::WeaklyMixedInteger{size()}); + return combiner.finalize(std::move(hash_state)); } friend class CrcCord; diff --git a/absl/strings/cordz_test_helpers.h b/absl/strings/cordz_test_helpers.h index 98117099376..66232db7caa 100644 --- a/absl/strings/cordz_test_helpers.h +++ b/absl/strings/cordz_test_helpers.h @@ -34,16 +34,15 @@ namespace absl { ABSL_NAMESPACE_BEGIN // Returns the CordzInfo for the cord, or nullptr if the cord is not sampled. -inline const cord_internal::CordzInfo* absl_nullable GetCordzInfoForTesting( +inline const cord_internal::CordzInfo* GetCordzInfoForTesting( const Cord& cord) { if (!cord.contents_.is_tree()) return nullptr; return cord.contents_.cordz_info(); } // Returns true if the provided cordz_info is in the list of sampled cords. -inline bool CordzInfoIsListed( - const cord_internal::CordzInfo* absl_nonnull cordz_info, - cord_internal::CordzSampleToken token = {}) { +inline bool CordzInfoIsListed(const cord_internal::CordzInfo* cordz_info, + cord_internal::CordzSampleToken token = {}) { for (const cord_internal::CordzInfo& info : token) { if (cordz_info == &info) return true; } @@ -121,7 +120,7 @@ class CordzSamplingIntervalHelper { // Wrapper struct managing a small CordRep `rep` struct TestCordRep { - cord_internal::CordRepFlat* absl_nonnull rep; + cord_internal::CordRepFlat* rep; TestCordRep() { rep = cord_internal::CordRepFlat::New(100); diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h index b55b412c091..5045811fd02 100644 --- a/absl/strings/internal/cord_internal.h +++ b/absl/strings/internal/cord_internal.h @@ -635,7 +635,7 @@ class InlineData { poison(); } - void CopyInlineToString(std::string* absl_nonnull dst) const { + void CopyInlineToString(std::string* dst) const { assert(!is_tree()); // As Cord can store only 15 bytes it is smaller than std::string's // small string optimization buffer size. Therefore we will always trigger @@ -915,8 +915,6 @@ inline CordRep* CordRep::Ref(CordRep* rep) { inline void CordRep::Unref(CordRep* rep) { assert(rep != nullptr); - // Expect refcount to be 0. Avoiding the cost of an atomic decrement should - // typically outweigh the cost of an extra branch checking for ref == 1. if (ABSL_PREDICT_FALSE(!rep->refcount.DecrementExpectHighRefcount())) { Destroy(rep); } diff --git a/absl/strings/internal/cordz_handle.cc b/absl/strings/internal/cordz_handle.cc index 53d5f529a7d..a4f47f0a133 100644 --- a/absl/strings/internal/cordz_handle.cc +++ b/absl/strings/internal/cordz_handle.cc @@ -54,7 +54,7 @@ static Queue& GlobalQueue() { CordzHandle::CordzHandle(bool is_snapshot) : is_snapshot_(is_snapshot) { Queue& global_queue = GlobalQueue(); if (is_snapshot) { - MutexLock lock(&global_queue.mutex); + MutexLock lock(global_queue.mutex); CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire); if (dq_tail != nullptr) { dq_prev_ = dq_tail; @@ -69,7 +69,7 @@ CordzHandle::~CordzHandle() { if (is_snapshot_) { std::vector to_delete; { - MutexLock lock(&global_queue.mutex); + MutexLock lock(global_queue.mutex); CordzHandle* next = dq_next_; if (dq_prev_ == nullptr) { // We were head of the queue, delete every CordzHandle until we reach @@ -103,7 +103,7 @@ void CordzHandle::Delete(CordzHandle* handle) { if (handle) { Queue& queue = GlobalQueue(); if (!handle->SafeToDelete()) { - MutexLock lock(&queue.mutex); + MutexLock lock(queue.mutex); CordzHandle* dq_tail = queue.dq_tail.load(std::memory_order_acquire); if (dq_tail != nullptr) { handle->dq_prev_ = dq_tail; @@ -119,7 +119,7 @@ void CordzHandle::Delete(CordzHandle* handle) { std::vector CordzHandle::DiagnosticsGetDeleteQueue() { std::vector handles; Queue& global_queue = GlobalQueue(); - MutexLock lock(&global_queue.mutex); + MutexLock lock(global_queue.mutex); CordzHandle* dq_tail = global_queue.dq_tail.load(std::memory_order_acquire); for (const CordzHandle* p = dq_tail; p; p = p->dq_prev_) { handles.push_back(p); @@ -134,7 +134,7 @@ bool CordzHandle::DiagnosticsHandleIsSafeToInspect( if (handle->is_snapshot_) return false; bool snapshot_found = false; Queue& global_queue = GlobalQueue(); - MutexLock lock(&global_queue.mutex); + MutexLock lock(global_queue.mutex); for (const CordzHandle* p = global_queue.dq_tail; p; p = p->dq_prev_) { if (p == handle) return !snapshot_found; if (p == this) snapshot_found = true; @@ -151,7 +151,7 @@ CordzHandle::DiagnosticsGetSafeToInspectDeletedHandles() { } Queue& global_queue = GlobalQueue(); - MutexLock lock(&global_queue.mutex); + MutexLock lock(global_queue.mutex); for (const CordzHandle* p = dq_next_; p != nullptr; p = p->dq_next_) { if (!p->is_snapshot()) { handles.push_back(p); diff --git a/absl/strings/internal/cordz_info.cc b/absl/strings/internal/cordz_info.cc index 4baaecdcf69..a916c0c67a7 100644 --- a/absl/strings/internal/cordz_info.cc +++ b/absl/strings/internal/cordz_info.cc @@ -327,7 +327,7 @@ CordzInfo::~CordzInfo() { } void CordzInfo::Track() { - SpinLockHolder l(&list_->mutex); + SpinLockHolder l(list_->mutex); CordzInfo* const head = list_->head.load(std::memory_order_acquire); if (head != nullptr) { @@ -340,7 +340,7 @@ void CordzInfo::Track() { void CordzInfo::Untrack() { ODRCheck(); { - SpinLockHolder l(&list_->mutex); + SpinLockHolder l(list_->mutex); CordzInfo* const head = list_->head.load(std::memory_order_acquire); CordzInfo* const next = ci_next_.load(std::memory_order_acquire); @@ -370,7 +370,7 @@ void CordzInfo::Untrack() { // We are likely part of a snapshot, extend the life of the CordRep { - absl::MutexLock lock(&mutex_); + absl::MutexLock lock(mutex_); if (rep_) CordRep::Ref(rep_); } CordzHandle::Delete(this); @@ -378,14 +378,14 @@ void CordzInfo::Untrack() { void CordzInfo::Lock(MethodIdentifier method) ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_) { - mutex_.Lock(); + mutex_.lock(); update_tracker_.LossyAdd(method); assert(rep_); } void CordzInfo::Unlock() ABSL_UNLOCK_FUNCTION(mutex_) { bool tracked = rep_ != nullptr; - mutex_.Unlock(); + mutex_.unlock(); if (!tracked) { Untrack(); } diff --git a/absl/strings/internal/cordz_info.h b/absl/strings/internal/cordz_info.h index 2dc9d16def0..578aa593553 100644 --- a/absl/strings/internal/cordz_info.h +++ b/absl/strings/internal/cordz_info.h @@ -191,9 +191,7 @@ class ABSL_LOCKABLE CordzInfo : public CordzHandle { // Global cordz info list. CordzInfo stores a pointer to the global list // instance to harden against ODR violations. struct List { - constexpr explicit List(absl::ConstInitType) - : mutex(absl::kConstInit, - absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) {} + constexpr explicit List(absl::ConstInitType) {} SpinLock mutex; std::atomic head ABSL_GUARDED_BY(mutex){nullptr}; @@ -292,7 +290,7 @@ inline void CordzInfo::SetCordRep(CordRep* rep) { inline void CordzInfo::UnsafeSetCordRep(CordRep* rep) { rep_ = rep; } inline CordRep* CordzInfo::RefCordRep() const ABSL_LOCKS_EXCLUDED(mutex_) { - MutexLock lock(&mutex_); + MutexLock lock(mutex_); return rep_ ? CordRep::Ref(rep_) : nullptr; } diff --git a/absl/strings/internal/str_format/arg.cc b/absl/strings/internal/str_format/arg.cc index eeb2108154f..01e4e42d9d0 100644 --- a/absl/strings/internal/str_format/arg.cc +++ b/absl/strings/internal/str_format/arg.cc @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "absl/base/config.h" @@ -34,13 +35,10 @@ #include "absl/numeric/int128.h" #include "absl/strings/internal/str_format/extension.h" #include "absl/strings/internal/str_format/float_conversion.h" +#include "absl/strings/internal/utf8.h" #include "absl/strings/numbers.h" #include "absl/strings/string_view.h" -#if defined(ABSL_HAVE_STD_STRING_VIEW) -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace str_format_internal { @@ -311,68 +309,16 @@ inline bool ConvertStringArg(string_view v, const FormatConversionSpecImpl conv, conv.has_left_flag()); } -struct ShiftState { - bool saw_high_surrogate = false; - uint8_t bits = 0; -}; - -// Converts `v` from UTF-16 or UTF-32 to UTF-8 and writes to `buf`. `buf` is -// assumed to have enough space for the output. `s` is used to carry state -// between successive calls with a UTF-16 surrogate pair. Returns the number of -// chars written, or `static_cast(-1)` on failure. -// -// This is basically std::wcrtomb(), but always outputting UTF-8 instead of -// respecting the current locale. -inline size_t WideToUtf8(wchar_t wc, char *buf, ShiftState &s) { - const auto v = static_cast(wc); - if (v < 0x80) { - *buf = static_cast(v); - return 1; - } else if (v < 0x800) { - *buf++ = static_cast(0xc0 | (v >> 6)); - *buf = static_cast(0x80 | (v & 0x3f)); - return 2; - } else if (v < 0xd800 || (v - 0xe000) < 0x2000) { - *buf++ = static_cast(0xe0 | (v >> 12)); - *buf++ = static_cast(0x80 | ((v >> 6) & 0x3f)); - *buf = static_cast(0x80 | (v & 0x3f)); - return 3; - } else if ((v - 0x10000) < 0x100000) { - *buf++ = static_cast(0xf0 | (v >> 18)); - *buf++ = static_cast(0x80 | ((v >> 12) & 0x3f)); - *buf++ = static_cast(0x80 | ((v >> 6) & 0x3f)); - *buf = static_cast(0x80 | (v & 0x3f)); - return 4; - } else if (v < 0xdc00) { - s.saw_high_surrogate = true; - s.bits = static_cast(v & 0x3); - const uint8_t high_bits = ((v >> 6) & 0xf) + 1; - *buf++ = static_cast(0xf0 | (high_bits >> 2)); - *buf = - static_cast(0x80 | static_cast((high_bits & 0x3) << 4) | - static_cast((v >> 2) & 0xf)); - return 2; - } else if (v < 0xe000 && s.saw_high_surrogate) { - *buf++ = static_cast(0x80 | static_cast(s.bits << 4) | - static_cast((v >> 6) & 0xf)); - *buf = static_cast(0x80 | (v & 0x3f)); - s.saw_high_surrogate = false; - s.bits = 0; - return 2; - } else { - return static_cast(-1); - } -} - inline bool ConvertStringArg(const wchar_t *v, size_t len, const FormatConversionSpecImpl conv, FormatSinkImpl *sink) { FixedArray mb(len * 4); - ShiftState s; + strings_internal::ShiftState s; size_t chars_written = 0; for (size_t i = 0; i < len; ++i) { - const size_t chars = WideToUtf8(v[i], &mb[chars_written], s); + const size_t chars = + strings_internal::WideToUtf8(v[i], &mb[chars_written], s); if (chars == static_cast(-1)) { return false; } chars_written += chars; } @@ -382,8 +328,8 @@ inline bool ConvertStringArg(const wchar_t *v, bool ConvertWCharTImpl(wchar_t v, const FormatConversionSpecImpl conv, FormatSinkImpl *sink) { char mb[4]; - ShiftState s; - const size_t chars_written = WideToUtf8(v, mb, s); + strings_internal::ShiftState s; + const size_t chars_written = strings_internal::WideToUtf8(v, mb, s); return chars_written != static_cast(-1) && !s.saw_high_surrogate && ConvertStringArg(string_view(mb, chars_written), conv, sink); } @@ -510,13 +456,11 @@ StringConvertResult FormatConvertImpl(string_view v, return {ConvertStringArg(v, conv, sink)}; } -#if defined(ABSL_HAVE_STD_STRING_VIEW) StringConvertResult FormatConvertImpl(std::wstring_view v, const FormatConversionSpecImpl conv, FormatSinkImpl* sink) { return {ConvertStringArg(v.data(), v.size(), conv, sink)}; } -#endif StringPtrConvertResult FormatConvertImpl(const char* v, const FormatConversionSpecImpl conv, diff --git a/absl/strings/internal/str_format/arg.h b/absl/strings/internal/str_format/arg.h index 309161d5915..021013fb1ab 100644 --- a/absl/strings/internal/str_format/arg.h +++ b/absl/strings/internal/str_format/arg.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -37,10 +38,6 @@ #include "absl/strings/internal/str_format/extension.h" #include "absl/strings/string_view.h" -#if defined(ABSL_HAVE_STD_STRING_VIEW) -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN @@ -228,7 +225,6 @@ StringConvertResult FormatConvertImpl(const std::wstring& v, StringConvertResult FormatConvertImpl(string_view v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); -#if defined(ABSL_HAVE_STD_STRING_VIEW) StringConvertResult FormatConvertImpl(std::wstring_view v, FormatConversionSpecImpl conv, FormatSinkImpl* sink); @@ -239,7 +235,6 @@ inline StringConvertResult FormatConvertImpl(std::string_view v, return FormatConvertImpl(absl::string_view(v.data(), v.size()), conv, sink); } #endif // !ABSL_USES_STD_STRING_VIEW -#endif // ABSL_HAVE_STD_STRING_VIEW using StringPtrConvertResult = ArgConvertResult #include #include +#include #include // NOLINT #include #include @@ -46,10 +47,6 @@ #include "absl/types/optional.h" #include "absl/types/span.h" -#if defined(ABSL_HAVE_STD_STRING_VIEW) -#include -#endif - namespace absl { ABSL_NAMESPACE_BEGIN namespace str_format_internal { @@ -322,10 +319,8 @@ TEST_F(FormatConvertTest, BasicString) { TestStringConvert(std::string("hello")); TestStringConvert(std::wstring(L"hello")); TestStringConvert(string_view("hello")); -#if defined(ABSL_HAVE_STD_STRING_VIEW) TestStringConvert(std::string_view("hello")); TestStringConvert(std::wstring_view(L"hello")); -#endif // ABSL_HAVE_STD_STRING_VIEW } TEST_F(FormatConvertTest, NullString) { diff --git a/absl/strings/internal/str_split_internal.h b/absl/strings/internal/str_split_internal.h index ed1f117cda2..31f1d657d5e 100644 --- a/absl/strings/internal/str_split_internal.h +++ b/absl/strings/internal/str_split_internal.h @@ -30,6 +30,7 @@ #define ABSL_STRINGS_INTERNAL_STR_SPLIT_INTERNAL_H_ #include +#include #include #include #include @@ -58,8 +59,12 @@ namespace strings_internal { class ConvertibleToStringView { public: ConvertibleToStringView(const char* s) // NOLINT(runtime/explicit) - : value_(s) {} - ConvertibleToStringView(char* s) : value_(s) {} // NOLINT(runtime/explicit) + : value_(s) { + assert(s != nullptr); + } + ConvertibleToStringView(char* s) : value_(s) { // NOLINT(runtime/explicit) + assert(s != nullptr); + } ConvertibleToStringView(absl::string_view s) // NOLINT(runtime/explicit) : value_(s) {} ConvertibleToStringView(const std::string& s) // NOLINT(runtime/explicit) diff --git a/absl/strings/internal/utf8.cc b/absl/strings/internal/utf8.cc index 7ecb93dfbe7..61945f5869b 100644 --- a/absl/strings/internal/utf8.cc +++ b/absl/strings/internal/utf8.cc @@ -16,11 +16,17 @@ #include "absl/strings/internal/utf8.h" +#include +#include +#include + +#include "absl/base/config.h" + namespace absl { ABSL_NAMESPACE_BEGIN namespace strings_internal { -size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) { +size_t EncodeUTF8Char(char* buffer, char32_t utf8_char) { if (utf8_char <= 0x7F) { *buffer = static_cast(utf8_char); return 1; @@ -48,6 +54,95 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) { } } +size_t WideToUtf8(wchar_t wc, char* buf, ShiftState& s) { + // Reinterpret the output buffer `buf` as `unsigned char*` for subsequent + // bitwise operations. This ensures well-defined behavior for bit + // manipulations (avoiding issues with signed `char`) and is safe under C++ + // aliasing rules, as `unsigned char` can alias any type. + auto* ubuf = reinterpret_cast(buf); + const uint32_t v = static_cast(wc); + constexpr size_t kError = static_cast(-1); + + if (v <= 0x007F) { + // 1-byte sequence (U+0000 to U+007F). + // 0xxxxxxx. + ubuf[0] = (0b0111'1111 & v); + s = {}; // Reset surrogate state. + return 1; + } else if (0x0080 <= v && v <= 0x07FF) { + // 2-byte sequence (U+0080 to U+07FF). + // 110xxxxx 10xxxxxx. + ubuf[0] = 0b1100'0000 | (0b0001'1111 & (v >> 6)); + ubuf[1] = 0b1000'0000 | (0b0011'1111 & v); + s = {}; // Reset surrogate state. + return 2; + } else if ((0x0800 <= v && v <= 0xD7FF) || (0xE000 <= v && v <= 0xFFFF)) { + // 3-byte sequence (U+0800 to U+D7FF or U+E000 to U+FFFF). + // Excludes surrogate code points U+D800-U+DFFF. + // 1110xxxx 10xxxxxx 10xxxxxx. + ubuf[0] = 0b1110'0000 | (0b0000'1111 & (v >> 12)); + ubuf[1] = 0b1000'0000 | (0b0011'1111 & (v >> 6)); + ubuf[2] = 0b1000'0000 | (0b0011'1111 & v); + s = {}; // Reset surrogate state. + return 3; + } else if (0xD800 <= v && v <= 0xDBFF) { + // High Surrogate (U+D800 to U+DBFF). + // This part forms the first two bytes of an eventual 4-byte UTF-8 sequence. + const unsigned char high_bits_val = (0b0000'1111 & (v >> 6)) + 1; + + // First byte of the 4-byte UTF-8 sequence (11110xxx). + ubuf[0] = 0b1111'0000 | (0b0000'0111 & (high_bits_val >> 2)); + // Second byte of the 4-byte UTF-8 sequence (10xxxxxx). + ubuf[1] = 0b1000'0000 | // + (0b0011'0000 & (high_bits_val << 4)) | // + (0b0000'1111 & (v >> 2)); + // Set state for high surrogate after writing to buffer. + s = {true, static_cast(0b0000'0011 & v)}; + return 2; // Wrote 2 bytes, expecting 2 more from a low surrogate. + } else if (0xDC00 <= v && v <= 0xDFFF) { + // Low Surrogate (U+DC00 to U+DFFF). + // This part forms the last two bytes of a 4-byte UTF-8 sequence, + // using state from a preceding high surrogate. + if (!s.saw_high_surrogate) { + // Error: Isolated low surrogate without a preceding high surrogate. + // s remains in its current (problematic) state. + // Caller should handle error. + return kError; + } + + // Third byte of the 4-byte UTF-8 sequence (10xxxxxx). + ubuf[0] = 0b1000'0000 | // + (0b0011'0000 & (s.bits << 4)) | // + (0b0000'1111 & (v >> 6)); + // Fourth byte of the 4-byte UTF-8 sequence (10xxxxxx). + ubuf[1] = 0b1000'0000 | (0b0011'1111 & v); + + s = {}; // Reset surrogate state, pair complete. + return 2; // Wrote 2 more bytes, completing the 4-byte sequence. + } else if constexpr (0xFFFF < std::numeric_limits::max()) { + // Conditionally compile the 4-byte direct conversion branch. + // This block is compiled only if wchar_t can represent values > 0xFFFF. + // It's placed after surrogate checks to ensure surrogates are handled by + // their specific logic. This inner 'if' is the runtime check for the 4-byte + // range. At this point, v is known not to be in the 1, 2, or 3-byte BMP + // ranges, nor is it a surrogate code point. + if (0x10000 <= v && v <= 0x10FFFF) { + // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx. + ubuf[0] = 0b1111'0000 | (0b0000'0111 & (v >> 18)); + ubuf[1] = 0b1000'0000 | (0b0011'1111 & (v >> 12)); + ubuf[2] = 0b1000'0000 | (0b0011'1111 & (v >> 6)); + ubuf[3] = 0b1000'0000 | (0b0011'1111 & v); + s = {}; // Reset surrogate state. + return 4; + } + } + + // Invalid wchar_t value (e.g., out of Unicode range, or unhandled after all + // checks). + s = {}; // Reset surrogate state. + return kError; +} + } // namespace strings_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/strings/internal/utf8.h b/absl/strings/internal/utf8.h index 32fb1093bea..ed1db110a7b 100644 --- a/absl/strings/internal/utf8.h +++ b/absl/strings/internal/utf8.h @@ -41,7 +41,21 @@ namespace strings_internal { // characters into buffer, however never will more than kMaxEncodedUTF8Size // bytes be written, regardless of the value of utf8_char. enum { kMaxEncodedUTF8Size = 4 }; -size_t EncodeUTF8Char(char *buffer, char32_t utf8_char); +size_t EncodeUTF8Char(char* buffer, char32_t utf8_char); + +struct ShiftState { + bool saw_high_surrogate = false; + unsigned char bits = 0; +}; + +// Converts `wc` from UTF-16 or UTF-32 to UTF-8 and writes to `buf`. `buf` is +// assumed to have enough space for the output. `s` is used to carry state +// between successive calls with a UTF-16 surrogate pair. Returns the number of +// chars written, or `static_cast(-1)` on failure. +// +// This is basically std::wcrtomb(), but always outputting UTF-8 instead of +// respecting the current locale. +size_t WideToUtf8(wchar_t wc, char* buf, ShiftState& s); } // namespace strings_internal ABSL_NAMESPACE_END diff --git a/absl/strings/internal/utf8_test.cc b/absl/strings/internal/utf8_test.cc index 88dd5036e3d..b88d7bb88a4 100644 --- a/absl/strings/internal/utf8_test.cc +++ b/absl/strings/internal/utf8_test.cc @@ -14,14 +14,29 @@ #include "absl/strings/internal/utf8.h" +#include #include +#include +#include +#include #include +#include +#include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/port.h" +#include "absl/strings/string_view.h" namespace { +using ::absl::strings_internal::kMaxEncodedUTF8Size; +using ::absl::strings_internal::ShiftState; +using ::absl::strings_internal::WideToUtf8; +using ::testing::StartsWith; +using ::testing::TestParamInfo; +using ::testing::TestWithParam; +using ::testing::ValuesIn; + #if !defined(__cpp_char8_t) #if defined(__clang__) #pragma clang diagnostic push @@ -33,12 +48,12 @@ TEST(EncodeUTF8Char, BasicFunction) { {0x00010000, u8"\U00010000"}, {0x0000FFFF, u8"\U0000FFFF"}, {0x0010FFFD, u8"\U0010FFFD"}}; - for (auto &test : tests) { + for (auto& test : tests) { char buf0[7] = {'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'}; char buf1[7] = {'\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF'}; - char *buf0_written = + char* buf0_written = &buf0[absl::strings_internal::EncodeUTF8Char(buf0, test.first)]; - char *buf1_written = + char* buf1_written = &buf1[absl::strings_internal::EncodeUTF8Char(buf1, test.first)]; int apparent_length = 7; while (buf0[apparent_length - 1] == '\x00' && @@ -63,4 +78,182 @@ TEST(EncodeUTF8Char, BasicFunction) { #endif #endif // !defined(__cpp_char8_t) +struct WideToUtf8TestCase { + std::string description; + wchar_t input; + std::string expected_utf8_str; + size_t expected_bytes_written; + ShiftState initial_state = {false, 0}; + ShiftState expected_state = {false, 0}; +}; + +std::vector GetWideToUtf8TestCases() { + constexpr size_t kError = static_cast(-1); + std::vector cases = { + {"ASCII_A", L'A', "A", 1}, + {"NullChar", L'\0', std::string("\0", 1), 1}, + {"ASCII_Max_7F", L'\x7F', "\x7F", 1}, + + {"TwoByte_Min_80", L'\u0080', "\xC2\x80", 2}, + {"PoundSign_A3", L'\u00A3', "\xC2\xA3", 2}, + {"TwoByte_Max_7FF", L'\u07FF', "\xDF\xBF", 2}, + + {"ThreeByte_Min_800", L'\u0800', "\xE0\xA0\x80", 3}, + {"EuroSign_20AC", L'\u20AC', "\xE2\x82\xAC", 3}, + {"BMP_MaxBeforeSurrogates_D7FF", L'\uD7FF', "\xED\x9F\xBF", 3}, + {"BMP_FFFF", L'\uFFFF', "\xEF\xBF\xBF", 3}, + + {"IsolatedHighSurr_D800", L'\xD800', "\xF0\x90", 2, {}, {true, 0}}, + {"IsolatedHighSurr_DBFF", L'\xDBFF', "\xF4\x8F", 2, {}, {true, 3}}, + + {"HighSurr_D800_after_HighD800", + L'\xD800', + "\xF0\x90", + 2, + {true, 0}, + {true, 0}}, + {"HighSurr_DBFF_after_HighDBFF", + L'\xDBFF', + "\xF4\x8F", + 2, + {true, 3}, + {true, 3}}, + + {"LowSurr_DC00_after_HighD800", L'\xDC00', "\x80\x80", 2, {true, 0}, {}}, + {"LowSurr_DFFD_after_HighDBFF", L'\xDFFD', "\xBF\xBD", 2, {true, 3}, {}}, + {"LowSurr_DC00_with_InitialState_saw_high_bits_1", + L'\xDC00', + "\x90\x80", + 2, + {true, 1}, + {}}, + + // Final state = initial on error. + {"Error_IsolatedLowSurr_DC00_NoPriorHigh", L'\xDC00', "", kError, {}, {}}, + {"Error_IsolatedLowSurr_DFFF_NoPriorHigh", L'\xDFFF', "", kError, {}, {}}, + +#if (defined(WCHAR_MAX) && WCHAR_MAX > 0xFFFF) + {"DirectSupplementaryChars_U10000", static_cast(0x10000), + "\xF0\x90\x80\x80", 4}, + {"DirectSupplementaryChars_U10FFFD", static_cast(0x10FFFD), + "\xF4\x8F\xBF\xBD", 4}, +#endif + }; + + wchar_t minus_one = static_cast(-1); + if constexpr (sizeof(wchar_t) == 2) { + cases.push_back({"WChar_MinusOne_as_FFFF", minus_one, "\xEF\xBF\xBF", 3}); + } else { + cases.push_back( + {"Error_WChar_MinusOne_as_FFFFFFFF", minus_one, "", kError, {}, {}}); + } + + if constexpr (sizeof(wchar_t) >= 4) { +#ifdef WCHAR_MAX + if (static_cast(WCHAR_MAX) >= 0x110000UL) { + cases.push_back({"Error_OutOfRange_110000", + static_cast(0x110000UL), + "", + kError, + {}, + {}}); + } +#else + cases.push_back({"Error_OutOfRange_110000_fallback", + static_cast(0x110000UL), + "", + kError, + {}, + {}}); +#endif + } + return cases; +} + +class WideToUtf8ParamTest : public TestWithParam {}; + +TEST_P(WideToUtf8ParamTest, SingleCharConversion) { + const auto& test_case = GetParam(); + ShiftState state = test_case.initial_state; + constexpr char kFillChar = '\xAB'; + std::string buffer(32, kFillChar); + + size_t bytes_written = WideToUtf8(test_case.input, buffer.data(), state); + + EXPECT_EQ(bytes_written, test_case.expected_bytes_written); + EXPECT_THAT(buffer, StartsWith(test_case.expected_utf8_str)); + + // The remaining bytes should be unchanged. + ASSERT_LT(test_case.expected_utf8_str.length(), buffer.size()); + EXPECT_EQ(buffer[test_case.expected_utf8_str.length()], kFillChar); + + EXPECT_EQ(state.saw_high_surrogate, + test_case.expected_state.saw_high_surrogate); + EXPECT_EQ(state.bits, test_case.expected_state.bits); +} + +INSTANTIATE_TEST_SUITE_P(WideCharToUtf8Conversion, WideToUtf8ParamTest, + ValuesIn(GetWideToUtf8TestCases()), + [](auto info) { return info.param.description; }); + +// Comprehensive test string for validating wchar_t to UTF-8 conversion. +// This string is designed to cover a variety of Unicode character types and +// sequences: +// 1. Basic ASCII characters (within names, numbers, and spacing). +// 2. Common 2-byte UTF-8 sequences: +// - Accented Latin characters (e.g., 'á' in "Holá"). +// - Hebrew text with combining vowel points (e.g., "שָׁלוֹם"). +// 3. Common 3-byte UTF-8 sequences: +// - Currency symbols (e.g., '€'). +// - CJK characters (e.g., "你好", "中"). +// - Components of complex emojis like the Zero Width Joiner (ZWJ) and +// Heart symbol. +// 4. Various 4-byte UTF-8 sequences (representing Supplementary Plane +// characters): +// - An emoji with a skin tone modifier ("👍🏻"). +// - A flag emoji composed of regional indicators ("🇺🇸"). +// - A complex ZWJ emoji sequence ("👩‍❤️‍💋‍👨") combining +// SP characters (👩, 💋, 👨) with BMP characters (ZWJ and ❤️). +// - These are critical for testing the correct handling of surrogate pairs +// when wchar_t is 2 bytes (e.g., on Windows). +// The goal is to ensure accurate conversion across a diverse set of +// characters. +// +// clang-format off +#define WIDE_STRING_LITERAL L"Holá €1 你好 שָׁלוֹם 👍🏻🇺🇸👩‍❤️‍💋‍👨 中" +#define UTF8_STRING_LITERAL u8"Holá €1 你好 שָׁלוֹם 👍🏻🇺🇸👩‍❤️‍💋‍👨 中" +// clang-format on + +absl::string_view GetUtf8TestString() { + // `u8""` forces UTF-8 encoding; MSVC will default to e.g. CP1252 (and warn) + // without it. However, the resulting character type differs between pre-C++20 + // (`char`) and C++20 (`char8_t`). So deduce the right character type for all + // C++ versions, init it with UTF-8, then `memcpy()` to get the result as a + // `char*` + static absl::string_view kUtf8TestString = [] { + using ConstChar8T = std::remove_reference_t; + constexpr ConstChar8T kOutputUtf8[] = UTF8_STRING_LITERAL; + static char output[sizeof kOutputUtf8]; + std::memcpy(output, kOutputUtf8, sizeof kOutputUtf8); + return output; + }(); + + return kUtf8TestString; +} + +TEST(WideToUtf8, FullString) { + std::string buffer(kMaxEncodedUTF8Size * sizeof(WIDE_STRING_LITERAL), '\0'); + char* buffer_ptr = buffer.data(); + + ShiftState state; + for (const wchar_t wc : WIDE_STRING_LITERAL) { + buffer_ptr += WideToUtf8(wc, buffer_ptr, state); + } + + EXPECT_THAT(buffer, StartsWith(GetUtf8TestString())); +} + +#undef WIDE_STRING_LITERAL +#undef UTF8_STRING_LITERAL + } // namespace diff --git a/absl/strings/str_cat.h b/absl/strings/str_cat.h index eafd8a3f0e9..84db0f6cd50 100644 --- a/absl/strings/str_cat.h +++ b/absl/strings/str_cat.h @@ -111,7 +111,7 @@ #include "absl/strings/numbers.h" #include "absl/strings/string_view.h" -#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW) +#if !defined(ABSL_USES_STD_STRING_VIEW) #include #endif @@ -191,26 +191,26 @@ struct Hex { template explicit Hex( Int v, PadSpec spec = absl::kNoPad, - typename std::enable_if::value>::type* = nullptr) + std::enable_if_t::value, bool> = + true) : Hex(spec, static_cast(v)) {} template explicit Hex( Int v, PadSpec spec = absl::kNoPad, - typename std::enable_if::value>::type* = nullptr) + std::enable_if_t::value, bool> = + true) : Hex(spec, static_cast(v)) {} template explicit Hex( Int v, PadSpec spec = absl::kNoPad, - typename std::enable_if::value>::type* = nullptr) + std::enable_if_t::value, bool> = + true) : Hex(spec, static_cast(v)) {} template explicit Hex( Int v, PadSpec spec = absl::kNoPad, - typename std::enable_if::value>::type* = nullptr) + std::enable_if_t::value, bool> = + true) : Hex(spec, static_cast(v)) {} template explicit Hex(Pointee* absl_nullable v, PadSpec spec = absl::kNoPad) @@ -262,7 +262,7 @@ struct Dec { template explicit Dec(Int v, PadSpec spec = absl::kNoPad, - typename std::enable_if<(sizeof(Int) <= 8)>::type* = nullptr) + std::enable_if_t = true) : value(v >= 0 ? static_cast(v) : uint64_t{0} - static_cast(v)), width(spec == absl::kNoPad ? 1 @@ -366,7 +366,7 @@ class AlphaNum { ABSL_ATTRIBUTE_LIFETIME_BOUND) : piece_(pc) {} -#if defined(ABSL_HAVE_STD_STRING_VIEW) && !defined(ABSL_USES_STD_STRING_VIEW) +#if !defined(ABSL_USES_STD_STRING_VIEW) AlphaNum(std::string_view pc // NOLINT(runtime/explicit) ABSL_ATTRIBUTE_LIFETIME_BOUND) : piece_(pc.data(), pc.size()) {} diff --git a/absl/strings/str_cat_test.cc b/absl/strings/str_cat_test.cc index 4de379eb531..a3bd42ccd97 100644 --- a/absl/strings/str_cat_test.cc +++ b/absl/strings/str_cat_test.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "gtest/gtest.h" @@ -28,10 +29,6 @@ #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" -#if defined(ABSL_HAVE_STD_STRING_VIEW) -#include -#endif - #ifdef __ANDROID__ // Android assert messages only go to system log, so death tests cannot inspect // the message for matching. @@ -219,13 +216,11 @@ TEST(StrCat, CornerCases) { EXPECT_EQ(result, ""); } -#if defined(ABSL_HAVE_STD_STRING_VIEW) TEST(StrCat, StdStringView) { std::string_view pieces[] = {"Hello", ", ", "World", "!"}; EXPECT_EQ(absl::StrCat(pieces[0], pieces[1], pieces[2], pieces[3]), "Hello, World!"); } -#endif // ABSL_HAVE_STD_STRING_VIEW TEST(StrCat, NullConstCharPtr) { const char* null = nullptr; diff --git a/absl/strings/str_format_test.cc b/absl/strings/str_format_test.cc index 969e1f9eee6..a4c877a8e38 100644 --- a/absl/strings/str_format_test.cc +++ b/absl/strings/str_format_test.cc @@ -65,8 +65,8 @@ TEST_F(FormatEntryPointTest, UntypedFormat) { "", "a", "%80d", -#if !defined(_MSC_VER) && !defined(__ANDROID__) && !defined(__native_client__) - // MSVC, NaCL and Android don't support positional syntax. +#if !defined(_MSC_VER) && !defined(__ANDROID__) + // MSVC and Android don't support positional syntax. "complicated multipart %% %1$d format %1$0999d", #endif // _MSC_VER }; @@ -266,8 +266,8 @@ TEST_F(FormatEntryPointTest, Stream) { "a", "%80d", "%d %u %c %s %f %g", -#if !defined(_MSC_VER) && !defined(__ANDROID__) && !defined(__native_client__) - // MSVC, NaCL and Android don't support positional syntax. +#if !defined(_MSC_VER) && !defined(__ANDROID__) + // MSVC and Android don't support positional syntax. "complicated multipart %% %1$d format %1$080d", #endif // _MSC_VER }; @@ -516,14 +516,11 @@ TEST_F(FormatEntryPointTest, SNPrintF) { EXPECT_EQ(result, 17); EXPECT_EQ(std::string(buffer), "NUMBER: 1234567"); - // The `output` parameter is annotated nonnull, but we want to test that - // it is never written to if the size is zero. - // Use a variable instead of passing nullptr directly to avoid a `-Wnonnull` - // warning. - char* null_output = nullptr; - result = - SNPrintF(null_output, 0, "Just checking the %s of the output.", "size"); + // Test that the buffer is never written to if the size is zero. + buffer[0] = '\0'; + result = SNPrintF(buffer, 0, "Just checking the %s of the output.", "size"); EXPECT_EQ(result, 37); + EXPECT_EQ(buffer[0], '\0'); } TEST_F(FormatEntryPointTest, SNPrintFWithV) { @@ -551,14 +548,11 @@ TEST_F(FormatEntryPointTest, SNPrintFWithV) { std::string size = "size"; - // The `output` parameter is annotated nonnull, but we want to test that - // it is never written to if the size is zero. - // Use a variable instead of passing nullptr directly to avoid a `-Wnonnull` - // warning. - char* null_output = nullptr; - result = - SNPrintF(null_output, 0, "Just checking the %v of the output.", size); + // Test that the buffer is never written to if the size is zero. + buffer[0] = '\0'; + result = SNPrintF(buffer, 0, "Just checking the %v of the output.", size); EXPECT_EQ(result, 37); + EXPECT_EQ(buffer[0], '\0'); } TEST(StrFormat, BehavesAsDocumented) { diff --git a/absl/strings/str_split.h b/absl/strings/str_split.h index 7e8e31c3d8b..cf53ccf1f1a 100644 --- a/absl/strings/str_split.h +++ b/absl/strings/str_split.h @@ -127,7 +127,7 @@ class ByString { absl::string_view Find(absl::string_view text, size_t pos) const; private: - const std::string delimiter_; + std::string delimiter_; }; // ByAsciiWhitespace @@ -277,7 +277,7 @@ template class MaxSplitsImpl { public: MaxSplitsImpl(Delimiter delimiter, int limit) - : delimiter_(delimiter), limit_(limit), count_(0) {} + : delimiter_(std::move(delimiter)), limit_(limit), count_(0) {} absl::string_view Find(absl::string_view text, size_t pos) { if (count_++ == limit_) { return absl::string_view(text.data() + text.size(), diff --git a/absl/strings/str_split_test.cc b/absl/strings/str_split_test.cc index b083975b484..c17c4724ab8 100644 --- a/absl/strings/str_split_test.cc +++ b/absl/strings/str_split_test.cc @@ -216,7 +216,7 @@ TEST(Split, APIExamples) { std::multimap m = absl::StrSplit("a,1,b,2,a,3", ','); EXPECT_EQ(3, m.size()); - auto it = m.find("a"); + auto it = m.lower_bound("a"); EXPECT_EQ("1", it->second); ++it; EXPECT_EQ("3", it->second); diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h index 9a1933b611b..aaaf60bde77 100644 --- a/absl/strings/string_view.h +++ b/absl/strings/string_view.h @@ -34,7 +34,9 @@ #include #include #include +#include #include +#include #include "absl/base/attributes.h" #include "absl/base/nullability.h" @@ -198,11 +200,25 @@ class ABSL_ATTRIBUTE_VIEW string_view { // The length check is skipped since it is unnecessary and causes code bloat. constexpr string_view( // NOLINT(runtime/explicit) const char* absl_nonnull str) - : ptr_(str), length_(str ? StrlenInternal(str) : 0) {} + : ptr_(str), length_(str ? StrlenInternal(str) : 0) { + assert(str != nullptr); + } // Constructor of a `string_view` from a `const char*` and length. constexpr string_view(const char* absl_nullable data, size_type len) - : ptr_(data), length_(CheckLengthInternal(len)) {} + : ptr_(data), length_(CheckLengthInternal(len)) { + ABSL_ASSERT(data != nullptr || len == 0); + } + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + template End> + requires(std::is_same_v, value_type> && + !std::is_convertible_v) + constexpr string_view(It begin, End end) + : ptr_(std::to_address(begin)), length_(end - begin) { + ABSL_HARDENING_ASSERT(end >= begin); + } +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L constexpr string_view(const string_view&) noexcept = default; string_view& operator=(const string_view&) noexcept = default; @@ -376,7 +392,7 @@ class ABSL_ATTRIBUTE_VIEW string_view { // // Copies the contents of the `string_view` at offset `pos` and length `n` // into `buf`. - size_type copy(char* buf, size_type n, size_type pos = 0) const { + size_type copy(char* absl_nonnull buf, size_type n, size_type pos = 0) const { if (ABSL_PREDICT_FALSE(pos > length_)) { base_internal::ThrowStdOutOfRange("absl::string_view::copy"); } @@ -624,7 +640,7 @@ class ABSL_ATTRIBUTE_VIEW string_view { // Overload of `string_view::starts_with()` that returns true if the // `string_view` starts with the C-style prefix `s`. - constexpr bool starts_with(const char* s) const { + constexpr bool starts_with(const char* absl_nonnull s) const { return starts_with(string_view(s)); } @@ -649,7 +665,7 @@ class ABSL_ATTRIBUTE_VIEW string_view { // Overload of `string_view::ends_with()` that returns true if the // `string_view` ends with the C-style suffix `s`. - constexpr bool ends_with(const char* s) const { + constexpr bool ends_with(const char* absl_nonnull s) const { return ends_with(string_view(s)); } #endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L @@ -745,8 +761,8 @@ ABSL_NAMESPACE_BEGIN // // Like `s.substr(pos, n)`, but clips `pos` to an upper bound of `s.size()`. // Provided because std::string_view::substr throws if `pos > size()` -inline string_view ClippedSubstr(string_view s, size_t pos, - size_t n = string_view::npos) { +inline string_view ClippedSubstr(string_view s ABSL_ATTRIBUTE_LIFETIME_BOUND, + size_t pos, size_t n = string_view::npos) { pos = (std::min)(pos, static_cast(s.size())); return s.substr(pos, n); } diff --git a/absl/strings/string_view_test.cc b/absl/strings/string_view_test.cc index 7064cc7183d..c5d15c7d377 100644 --- a/absl/strings/string_view_test.cc +++ b/absl/strings/string_view_test.cc @@ -16,6 +16,7 @@ #include +#include #include #include #include @@ -34,7 +35,7 @@ #include "absl/base/config.h" #include "absl/meta/type_traits.h" -#if defined(ABSL_HAVE_STD_STRING_VIEW) || defined(__ANDROID__) +#if defined(ABSL_USES_STD_STRING_VIEW) || defined(__ANDROID__) // We don't control the death messaging when using std::string_view. // Android assert messages only go to system log, so death tests cannot inspect // the message for matching. @@ -131,6 +132,23 @@ TEST(StringViewTest, Ctor) { EXPECT_EQ(8u, s31.length()); } +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + { + // Iterator constructor + std::string str = "hello"; + absl::string_view s1(str.begin(), str.end()); + EXPECT_EQ(s1, "hello"); + + std::array arr = { '1', '2', '3' }; + absl::string_view s2(arr.begin(), arr.end()); + EXPECT_EQ(s2, "123"); + + const char carr[] = "carr"; + absl::string_view s3(carr, carr + strlen(carr)); + EXPECT_EQ(s3, "carr"); + } +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + { using mstring = std::basic_string, Mallocator>; @@ -870,42 +888,10 @@ TEST(StringViewTest, FrontBackEmpty) { #endif } -// `std::string_view::string_view(const char*)` calls -// `std::char_traits::length(const char*)` to get the string length. In -// libc++, it doesn't allow `nullptr` in the constexpr context, with the error -// "read of dereferenced null pointer is not allowed in a constant expression". -// At run time, the behavior of `std::char_traits::length()` on `nullptr` is -// undefined by the standard and usually results in crash with libc++. -// GCC also started rejected this in libstdc++ starting in GCC9. -// In MSVC, creating a constexpr string_view from nullptr also triggers an -// "unevaluable pointer value" error. This compiler implementation conforms -// to the standard, but `absl::string_view` implements a different -// behavior for historical reasons. We work around tests that construct -// `string_view` from `nullptr` when using libc++. -#if !defined(ABSL_USES_STD_STRING_VIEW) || \ - (!(defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 9) && \ - !defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) -#define ABSL_HAVE_STRING_VIEW_FROM_NULLPTR 1 -#endif - -TEST(StringViewTest, NULLInput) { +TEST(StringViewTest, DefaultConstructor) { absl::string_view s; EXPECT_EQ(s.data(), nullptr); EXPECT_EQ(s.size(), 0u); - -#ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR - // The `str` parameter is annotated nonnull, but we want to test the defensive - // null check. Use a variable instead of passing nullptr directly to avoid a - // `-Wnonnull` warning. - char* null_str = nullptr; - s = absl::string_view(null_str); - EXPECT_EQ(s.data(), nullptr); - EXPECT_EQ(s.size(), 0u); - - // .ToString() on a absl::string_view with nullptr should produce the empty - // string. - EXPECT_EQ("", std::string(s)); -#endif // ABSL_HAVE_STRING_VIEW_FROM_NULLPTR } TEST(StringViewTest, Comparisons2) { @@ -1086,16 +1072,6 @@ TEST(StringViewTest, ConstexprCompiles) { // know at compile time that the argument is nullptr and complain because the // parameter is annotated nonnull. We hence turn the warning off for this // test. -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wnonnull" -#endif -#ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR - constexpr absl::string_view cstr(nullptr); -#endif -#if defined(__clang__) -#pragma clang diagnostic pop -#endif constexpr absl::string_view cstr_len("cstr", 4); #if defined(ABSL_USES_STD_STRING_VIEW) @@ -1163,12 +1139,6 @@ TEST(StringViewTest, ConstexprCompiles) { constexpr absl::string_view::iterator const_end_empty = sp.end(); EXPECT_EQ(const_begin_empty, const_end_empty); -#ifdef ABSL_HAVE_STRING_VIEW_FROM_NULLPTR - constexpr absl::string_view::iterator const_begin_nullptr = cstr.begin(); - constexpr absl::string_view::iterator const_end_nullptr = cstr.end(); - EXPECT_EQ(const_begin_nullptr, const_end_nullptr); -#endif // ABSL_HAVE_STRING_VIEW_FROM_NULLPTR - constexpr absl::string_view::iterator const_begin = cstr_len.begin(); constexpr absl::string_view::iterator const_end = cstr_len.end(); constexpr absl::string_view::size_type const_size = cstr_len.size(); @@ -1202,6 +1172,18 @@ TEST(StringViewTest, ConstexprCompiles) { constexpr size_t sp_npos = sp.npos; EXPECT_EQ(sp_npos, static_cast(-1)); + +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L + { + static constexpr std::array arr = { '1', '2', '3' }; + constexpr absl::string_view s2(arr.begin(), arr.end()); + EXPECT_EQ(s2, "123"); + + static constexpr char carr[] = "carr"; + constexpr absl::string_view s3(carr, carr + 4); + EXPECT_EQ(s3, "carr"); + } +#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L } constexpr char ConstexprMethodsHelper() { diff --git a/absl/strings/substitute.h b/absl/strings/substitute.h index 08f64e99164..c93b1cc645f 100644 --- a/absl/strings/substitute.h +++ b/absl/strings/substitute.h @@ -187,12 +187,13 @@ class Arg { // vector::reference and const_reference require special help to convert // to `Arg` because it requires two user defined conversions. - template ::value && - (std::is_same::reference>::value || - std::is_same::const_reference>::value)>* = - nullptr> + template < + typename T, + std::enable_if_t< + std::is_class::value && + (std::is_same::reference>::value || + std::is_same::const_reference>::value), + bool> = true> Arg(T value) // NOLINT(google-explicit-constructor) : Arg(static_cast(value)) {} @@ -237,7 +238,7 @@ constexpr int CalculateOneBit(const char* absl_nonnull format) { : (1 << (*format - '0')); } -constexpr const char* SkipNumber(const char* absl_nonnull format) { +constexpr const char* absl_nonnull SkipNumber(const char* absl_nonnull format) { return !*format ? format : (format + 1); } diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel index 920928e2328..9a1aa833790 100644 --- a/absl/synchronization/BUILD.bazel +++ b/absl/synchronization/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -360,6 +363,7 @@ cc_test( linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "no_test_wasm", + "noubsan", # TODO(b/417700722): timeouts under UBSAN. ], deps = [ ":per_thread_sem_test_common", diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt index 9d4844d0a5b..ad45515c115 100644 --- a/absl/synchronization/CMakeLists.txt +++ b/absl/synchronization/CMakeLists.txt @@ -113,7 +113,6 @@ absl_cc_library( absl::raw_logging_internal absl::stacktrace absl::symbolize - absl::tracing_internal absl::time absl::tracing_internal Threads::Threads diff --git a/absl/synchronization/barrier.cc b/absl/synchronization/barrier.cc index 0dfd795e7b5..f5dad22203f 100644 --- a/absl/synchronization/barrier.cc +++ b/absl/synchronization/barrier.cc @@ -26,7 +26,7 @@ static bool IsZero(void *arg) { } bool Barrier::Block() { - MutexLock l(&this->lock_); + MutexLock l(this->lock_); this->num_to_block_--; if (this->num_to_block_ < 0) { diff --git a/absl/synchronization/barrier_test.cc b/absl/synchronization/barrier_test.cc index bfc6cb1883b..2aed2724076 100644 --- a/absl/synchronization/barrier_test.cc +++ b/absl/synchronization/barrier_test.cc @@ -37,7 +37,7 @@ TEST(Barrier, SanityTest) { } // Increment the counter. - absl::MutexLock lock(&mutex); + absl::MutexLock lock(mutex); ++counter; }; @@ -57,7 +57,7 @@ TEST(Barrier, SanityTest) { // The counter should still be zero since no thread should have // been able to pass the barrier yet. { - absl::MutexLock lock(&mutex); + absl::MutexLock lock(mutex); EXPECT_EQ(counter, 0); } @@ -70,6 +70,6 @@ TEST(Barrier, SanityTest) { } // All threads should now have incremented the counter. - absl::MutexLock lock(&mutex); + absl::MutexLock lock(mutex); EXPECT_EQ(counter, kNumThreads); } diff --git a/absl/synchronization/blocking_counter.cc b/absl/synchronization/blocking_counter.cc index a530baf4cc4..9468469afab 100644 --- a/absl/synchronization/blocking_counter.cc +++ b/absl/synchronization/blocking_counter.cc @@ -42,7 +42,7 @@ bool BlockingCounter::DecrementCount() { "BlockingCounter::DecrementCount() called too many times"); if (count == 0) { base_internal::TraceSignal(this, TraceObjectKind()); - MutexLock l(&lock_); + MutexLock l(lock_); done_ = true; return true; } @@ -52,7 +52,7 @@ bool BlockingCounter::DecrementCount() { void BlockingCounter::Wait() { base_internal::TraceWait(this, TraceObjectKind()); { - MutexLock l(&this->lock_); + MutexLock l(this->lock_); // only one thread may call Wait(). To support more than one thread, // implement a counter num_to_exit, like in the Barrier class. diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc index 93cd376bde0..0b0f9207a44 100644 --- a/absl/synchronization/internal/create_thread_identity.cc +++ b/absl/synchronization/internal/create_thread_identity.cc @@ -35,7 +35,7 @@ namespace synchronization_internal { // ThreadIdentity storage is persistent, we maintain a free-list of previously // released ThreadIdentity objects. ABSL_CONST_INIT static base_internal::SpinLock freelist_lock( - absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + base_internal::SCHEDULE_KERNEL_ONLY); ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist; // A per-thread destructor for reclaiming associated ThreadIdentity objects. @@ -60,7 +60,7 @@ static void ReclaimThreadIdentity(void* v) { // association state in this case. base_internal::ClearCurrentThreadIdentity(); { - base_internal::SpinLockHolder l(&freelist_lock); + base_internal::SpinLockHolder l(freelist_lock); identity->next = thread_identity_freelist; thread_identity_freelist = identity; } @@ -108,7 +108,7 @@ static base_internal::ThreadIdentity* NewThreadIdentity() { { // Re-use a previously released object if possible. - base_internal::SpinLockHolder l(&freelist_lock); + base_internal::SpinLockHolder l(freelist_lock); if (thread_identity_freelist) { identity = thread_identity_freelist; // Take list-head. thread_identity_freelist = thread_identity_freelist->next; diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 129067c1515..f58fb0a7772 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc @@ -33,15 +33,15 @@ #include "absl/base/internal/low_level_alloc.h" #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING -#include "absl/synchronization/internal/graphcycles.h" - #include #include #include #include + #include "absl/base/internal/hide_ptr.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" +#include "absl/synchronization/internal/graphcycles.h" // Do not use STL. This module does not use standard memory allocation. @@ -54,15 +54,14 @@ namespace { // Avoid LowLevelAlloc's default arena since it calls malloc hooks in // which people are doing things like acquiring Mutexes. ABSL_CONST_INIT static absl::base_internal::SpinLock arena_mu( - absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + base_internal::SCHEDULE_KERNEL_ONLY); ABSL_CONST_INIT static base_internal::LowLevelAlloc::Arena* arena; static void InitArenaIfNecessary() { - arena_mu.Lock(); + base_internal::SpinLockHolder l(arena_mu); if (arena == nullptr) { arena = base_internal::LowLevelAlloc::NewArena(0); } - arena_mu.Unlock(); } // Number of inlined elements in Vec. Hash table implementation @@ -89,7 +88,7 @@ class Vec { T* end() { return ptr_ + size_; } const T& operator[](uint32_t i) const { return ptr_[i]; } T& operator[](uint32_t i) { return ptr_[i]; } - const T& back() const { return ptr_[size_-1]; } + const T& back() const { return ptr_[size_ - 1]; } void pop_back() { size_--; } void push_back(const T& v) { @@ -178,7 +177,7 @@ class NodeSet { } table_[i] = v; // Double when 75% full. - if (occupied_ >= table_.size() - table_.size()/4) Grow(); + if (occupied_ >= table_.size() - table_.size() / 4) Grow(); return true; } @@ -193,7 +192,7 @@ class NodeSet { // Example: // HASH_FOR_EACH(elem, node->out) { ... } #define HASH_FOR_EACH(elem, eset) \ - for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); ) + for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem);) bool Next(int32_t* cursor, int32_t* elem) { while (static_cast(*cursor) < table_.size()) { int32_t v = table_[static_cast(*cursor)]; @@ -209,7 +208,7 @@ class NodeSet { private: enum : int32_t { kEmpty = -1, kDel = -2 }; Vec table_; - uint32_t occupied_; // Count of non-empty slots (includes deleted slots) + uint32_t occupied_; // Count of non-empty slots (includes deleted slots) static uint32_t Hash(int32_t a) { return static_cast(a) * 41; } @@ -270,25 +269,23 @@ inline GraphId MakeId(int32_t index, uint32_t version) { return g; } -inline int32_t NodeIndex(GraphId id) { - return static_cast(id.handle); -} +inline int32_t NodeIndex(GraphId id) { return static_cast(id.handle); } inline uint32_t NodeVersion(GraphId id) { return static_cast(id.handle >> 32); } struct Node { - int32_t rank; // rank number assigned by Pearce-Kelly algorithm - uint32_t version; // Current version number - int32_t next_hash; // Next entry in hash table - bool visited; // Temporary marker used by depth-first-search - uintptr_t masked_ptr; // User-supplied pointer - NodeSet in; // List of immediate predecessor nodes in graph - NodeSet out; // List of immediate successor nodes in graph - int priority; // Priority of recorded stack trace. - int nstack; // Depth of recorded stack trace. - void* stack[40]; // stack[0,nstack-1] holds stack trace for node. + int32_t rank; // rank number assigned by Pearce-Kelly algorithm + uint32_t version; // Current version number + int32_t next_hash; // Next entry in hash table + bool visited; // Temporary marker used by depth-first-search + uintptr_t masked_ptr; // User-supplied pointer + NodeSet in; // List of immediate predecessor nodes in graph + NodeSet out; // List of immediate successor nodes in graph + int priority; // Priority of recorded stack trace. + int nstack; // Depth of recorded stack trace. + void* stack[40]; // stack[0,nstack-1] holds stack trace for node. }; // Hash table for pointer to node index lookups. @@ -318,7 +315,7 @@ class PointerMap { // Advance through linked list while keeping track of the // predecessor slot that points to the current entry. auto masked = base_internal::HidePtr(ptr); - for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) { + for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1;) { int32_t index = *slot; Node* n = (*nodes_)[static_cast(index)]; if (n->masked_ptr == masked) { @@ -381,7 +378,9 @@ GraphCycles::GraphCycles() { GraphCycles::~GraphCycles() { for (auto* node : rep_->nodes_) { - if (node == nullptr) { continue; } + if (node == nullptr) { + continue; + } node->Node::~Node(); base_internal::LowLevelAlloc::Free(node); } @@ -474,8 +473,7 @@ void GraphCycles::RemoveNode(void* ptr) { void* GraphCycles::Ptr(GraphId id) { Node* n = FindNode(rep_, id); - return n == nullptr ? nullptr - : base_internal::UnhidePtr(n->masked_ptr); + return n == nullptr ? nullptr : base_internal::UnhidePtr(n->masked_ptr); } bool GraphCycles::HasNode(GraphId node) { @@ -502,8 +500,8 @@ static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound); static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound); static void Reorder(GraphCycles::Rep* r); static void Sort(const Vec&, Vec* delta); -static void MoveToList( - GraphCycles::Rep* r, Vec* src, Vec* dst); +static void MoveToList(GraphCycles::Rep* r, Vec* src, + Vec* dst); bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) { Rep* r = rep_; @@ -605,9 +603,8 @@ static void Reorder(GraphCycles::Rep* r) { // Produce sorted list of all ranks that will be reassigned. r->merged_.resize(r->deltab_.size() + r->deltaf_.size()); - std::merge(r->deltab_.begin(), r->deltab_.end(), - r->deltaf_.begin(), r->deltaf_.end(), - r->merged_.begin()); + std::merge(r->deltab_.begin(), r->deltab_.end(), r->deltaf_.begin(), + r->deltaf_.end(), r->merged_.begin()); // Assign the ranks in order to the collected list. for (uint32_t i = 0; i < r->list_.size(); i++) { @@ -628,8 +625,8 @@ static void Sort(const Vec& nodes, Vec* delta) { std::sort(delta->begin(), delta->end(), cmp); } -static void MoveToList( - GraphCycles::Rep* r, Vec* src, Vec* dst) { +static void MoveToList(GraphCycles::Rep* r, Vec* src, + Vec* dst) { for (auto& v : *src) { int32_t w = v; // Replace v entry with its rank diff --git a/absl/synchronization/internal/thread_pool.h b/absl/synchronization/internal/thread_pool.h index 5eb0bb605e4..f87adf635d3 100644 --- a/absl/synchronization/internal/thread_pool.h +++ b/absl/synchronization/internal/thread_pool.h @@ -46,7 +46,7 @@ class ThreadPool { ~ThreadPool() { { - absl::MutexLock l(&mu_); + absl::MutexLock l(mu_); for (size_t i = 0; i < threads_.size(); i++) { queue_.push(nullptr); // Shutdown signal. } @@ -59,7 +59,7 @@ class ThreadPool { // Schedule a function to be run on a ThreadPool thread immediately. void Schedule(absl::AnyInvocable func) { assert(func != nullptr); - absl::MutexLock l(&mu_); + absl::MutexLock l(mu_); queue_.push(std::move(func)); } @@ -72,7 +72,7 @@ class ThreadPool { while (true) { absl::AnyInvocable func; { - absl::MutexLock l(&mu_); + absl::MutexLock l(mu_); mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable)); func = std::move(queue_.front()); queue_.pop(); diff --git a/absl/synchronization/internal/waiter_test.cc b/absl/synchronization/internal/waiter_test.cc index 6e374155584..80a6985769b 100644 --- a/absl/synchronization/internal/waiter_test.cc +++ b/absl/synchronization/internal/waiter_test.cc @@ -129,7 +129,10 @@ TYPED_TEST_P(WaiterTest, WaitTimeWoken) { start + absl::Seconds(10)))); absl::Duration waited = absl::Now() - start; EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500))); - EXPECT_LT(waited, absl::Seconds(2)); + #ifndef _MSC_VER + // Skip on MSVC due to flakiness. + EXPECT_LT(waited, absl::Seconds(2)); + #endif } TYPED_TEST_P(WaiterTest, WaitDurationReached) { @@ -139,7 +142,10 @@ TYPED_TEST_P(WaiterTest, WaitDurationReached) { absl::synchronization_internal::KernelTimeout(absl::Milliseconds(500)))); absl::Duration waited = absl::Now() - start; EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500))); - EXPECT_LT(waited, absl::Seconds(1)); + #ifndef _MSC_VER + // Skip on MSVC due to flakiness. + EXPECT_LT(waited, absl::Seconds(1)); + #endif } TYPED_TEST_P(WaiterTest, WaitTimeReached) { @@ -149,7 +155,10 @@ TYPED_TEST_P(WaiterTest, WaitTimeReached) { start + absl::Milliseconds(500)))); absl::Duration waited = absl::Now() - start; EXPECT_GE(waited, WithTolerance(absl::Milliseconds(500))); - EXPECT_LT(waited, absl::Seconds(1)); + #ifndef _MSC_VER + // Skip on MSVC due to flakiness. + EXPECT_LT(waited, absl::Seconds(1)); + #endif } REGISTER_TYPED_TEST_SUITE_P(WaiterTest, diff --git a/absl/synchronization/lifetime_test.cc b/absl/synchronization/lifetime_test.cc index 4c4cff64a75..1c11431b3a9 100644 --- a/absl/synchronization/lifetime_test.cc +++ b/absl/synchronization/lifetime_test.cc @@ -45,7 +45,7 @@ void ThreadOne(absl::Mutex* mutex, absl::CondVar* condvar, CHECK(!*state) << "*state not initialized"; { - absl::MutexLock lock(mutex); + absl::MutexLock lock(*mutex); notification->Notify(); CHECK(notification->HasBeenNotified()) << "invalid Notification"; @@ -64,7 +64,7 @@ void ThreadTwo(absl::Mutex* mutex, absl::CondVar* condvar, notification->WaitForNotification(); CHECK(notification->HasBeenNotified()) << "invalid Notification"; { - absl::MutexLock lock(mutex); + absl::MutexLock lock(*mutex); *state = true; condvar->Signal(); } @@ -148,12 +148,12 @@ ABSL_CONST_INIT absl::Mutex early_const_init_mutex(absl::kConstInit); // before the constructors of either grab_lock or check_still_locked are run.) extern absl::Mutex const_init_sanity_mutex; OnConstruction grab_lock([]() ABSL_NO_THREAD_SAFETY_ANALYSIS { - const_init_sanity_mutex.Lock(); + const_init_sanity_mutex.lock(); }); ABSL_CONST_INIT absl::Mutex const_init_sanity_mutex(absl::kConstInit); OnConstruction check_still_locked([]() ABSL_NO_THREAD_SAFETY_ANALYSIS { const_init_sanity_mutex.AssertHeld(); - const_init_sanity_mutex.Unlock(); + const_init_sanity_mutex.unlock(); }); #endif // defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER > 1900) diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 5091b8fd340..1d5858261a7 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -226,7 +226,7 @@ static bool AtomicSetBits(std::atomic* pv, intptr_t bits, // Data for doing deadlock detection. ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu( - absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + base_internal::SCHEDULE_KERNEL_ONLY); // Graph used to detect deadlocks. ABSL_CONST_INIT static GraphCycles* deadlock_graph @@ -292,7 +292,7 @@ static const struct { }; ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu( - absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); + base_internal::SCHEDULE_KERNEL_ONLY); // Hash table size; should be prime > 2. // Can't be too small, as it's used for deadlock detection information. @@ -330,7 +330,7 @@ static SynchEvent* EnsureSynchEvent(std::atomic* addr, const char* name, intptr_t bits, intptr_t lockbit) { uint32_t h = reinterpret_cast(addr) % kNSynchEvent; - synch_event_mu.Lock(); + synch_event_mu.lock(); // When a Mutex/CondVar is destroyed, we don't remove the associated // SynchEvent to keep destructors empty in release builds for performance // reasons. If the current call is the first to set bits (kMuEvent/kCVEvent), @@ -392,16 +392,16 @@ static SynchEvent* EnsureSynchEvent(std::atomic* addr, } else { e->refcount++; // for return value } - synch_event_mu.Unlock(); + synch_event_mu.unlock(); return e; } // Decrement the reference count of *e, or do nothing if e==null. static void UnrefSynchEvent(SynchEvent* e) { if (e != nullptr) { - synch_event_mu.Lock(); + synch_event_mu.lock(); bool del = (--(e->refcount) == 0); - synch_event_mu.Unlock(); + synch_event_mu.unlock(); if (del) { base_internal::LowLevelAlloc::Free(e); } @@ -414,7 +414,7 @@ static void UnrefSynchEvent(SynchEvent* e) { static SynchEvent* GetSynchEvent(const void* addr) { uint32_t h = reinterpret_cast(addr) % kNSynchEvent; SynchEvent* e; - synch_event_mu.Lock(); + synch_event_mu.lock(); for (e = synch_event[h]; e != nullptr && e->masked_addr != base_internal::HidePtr(addr); e = e->next) { @@ -422,7 +422,7 @@ static SynchEvent* GetSynchEvent(const void* addr) { if (e != nullptr) { e->refcount++; } - synch_event_mu.Unlock(); + synch_event_mu.unlock(); return e; } @@ -509,10 +509,10 @@ struct SynchWaitParams { const Condition* cond; // The condition that this thread is waiting for. // In Mutex, this field is set to zero if a timeout // expires. - KernelTimeout timeout; // timeout expiry---absolute time - // In Mutex, this field is set to zero if a timeout - // expires. - Mutex* const cvmu; // used for transfer from cond var to mutex + KernelTimeout timeout; // timeout expiry---absolute time + // In Mutex, this field is set to zero if a timeout + // expires. + Mutex* const cvmu; // used for transfer from cond var to mutex PerThreadSynch* const thread; // thread that is waiting // If not null, thread should be enqueued on the CondVar whose state @@ -1223,9 +1223,8 @@ static GraphId GetGraphIdLocked(Mutex* mu) } static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { - deadlock_graph_mu.Lock(); + base_internal::SpinLockHolder l(deadlock_graph_mu); GraphId id = GetGraphIdLocked(mu); - deadlock_graph_mu.Unlock(); return id; } @@ -1327,8 +1326,7 @@ static char* StackString(void** pcs, int n, char* buf, int maxlen, char sym[kSymLen]; int len = 0; for (int i = 0; i != n; i++) { - if (len >= maxlen) - return buf; + if (len >= maxlen) return buf; size_t count = static_cast(maxlen - len); if (symbolize) { if (!absl::Symbolize(pcs[i], sym, kSymLen)) { @@ -1387,7 +1385,7 @@ static GraphId DeadlockCheck(Mutex* mu) { SynchLocksHeld* all_locks = Synch_GetAllLocks(); - absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu); + absl::base_internal::SpinLockHolder lock(deadlock_graph_mu); const GraphId mu_id = GetGraphIdLocked(mu); if (all_locks->n == 0) { @@ -1457,7 +1455,7 @@ static GraphId DeadlockCheck(Mutex* mu) { } if (synch_deadlock_detection.load(std::memory_order_acquire) == OnDeadlockCycle::kAbort) { - deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler + deadlock_graph_mu.unlock(); // avoid deadlock in fatal sighandler ABSL_RAW_LOG(FATAL, "dying due to potential deadlock"); return mu_id; } @@ -1482,11 +1480,11 @@ static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) { void Mutex::ForgetDeadlockInfo() { if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) != OnDeadlockCycle::kIgnore) { - deadlock_graph_mu.Lock(); + deadlock_graph_mu.lock(); if (deadlock_graph != nullptr) { deadlock_graph->RemoveNode(this); } - deadlock_graph_mu.Unlock(); + deadlock_graph_mu.unlock(); } } @@ -1528,7 +1526,7 @@ static bool TryAcquireWithSpinning(std::atomic* mu) { return false; } -void Mutex::Lock() { +void Mutex::lock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); GraphId id = DebugOnlyDeadlockCheck(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1546,7 +1544,7 @@ void Mutex::Lock() { ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); } -void Mutex::ReaderLock() { +void Mutex::lock_shared() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); GraphId id = DebugOnlyDeadlockCheck(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1606,7 +1604,7 @@ bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) { return res; } -bool Mutex::TryLock() { +bool Mutex::try_lock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); intptr_t v = mu_.load(std::memory_order_relaxed); // Try fast acquire. @@ -1644,7 +1642,7 @@ ABSL_ATTRIBUTE_NOINLINE bool Mutex::TryLockSlow() { return false; } -bool Mutex::ReaderTryLock() { +bool Mutex::try_lock_shared() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock | __tsan_mutex_try_lock); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1706,7 +1704,7 @@ ABSL_ATTRIBUTE_NOINLINE bool Mutex::ReaderTryLockSlow() { return false; } -void Mutex::Unlock() { +void Mutex::unlock() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); DebugOnlyLockLeave(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1776,7 +1774,7 @@ static bool ExactlyOneReader(intptr_t v) { return (v & kMuMultipleWaitersMask) == 0; } -void Mutex::ReaderUnlock() { +void Mutex::unlock_shared() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock); DebugOnlyLockLeave(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -2286,7 +2284,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) { // set up to walk the list PerThreadSynch* w_walk; // current waiter during list walk PerThreadSynch* pw_walk; // previous waiter during list walk - if (old_h != nullptr) { // we've searched up to old_h before + if (old_h != nullptr) { // we've searched up to old_h before pw_walk = old_h; w_walk = old_h->next; } else { // no prior search, start at beginning @@ -2762,7 +2760,7 @@ void CondVar::SignalAll() { void ReleasableMutexLock::Release() { ABSL_RAW_CHECK(this->mu_ != nullptr, "ReleasableMutexLock::Release may only be called once"); - this->mu_->Unlock(); + this->mu_->unlock(); this->mu_ = nullptr; } diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index 78b1c7a048a..20624860ede 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -61,17 +61,15 @@ #include #include #include -#include -#include #include "absl/base/attributes.h" +#include "absl/base/config.h" #include "absl/base/const_init.h" #include "absl/base/internal/identity.h" -#include "absl/base/internal/low_level_alloc.h" #include "absl/base/internal/thread_identity.h" #include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/macros.h" #include "absl/base/nullability.h" -#include "absl/base/port.h" #include "absl/base/thread_annotations.h" #include "absl/synchronization/internal/kernel_timeout.h" #include "absl/synchronization/internal/per_thread_sem.h" @@ -92,10 +90,10 @@ struct SynchWaitParams; // invariants. Proper usage of mutexes prevents concurrent access by different // threads to the same resource. // -// A `Mutex` has two basic operations: `Mutex::Lock()` and `Mutex::Unlock()`. -// The `Lock()` operation *acquires* a `Mutex` (in a state known as an -// *exclusive* -- or *write* -- lock), and the `Unlock()` operation *releases* a -// Mutex. During the span of time between the Lock() and Unlock() operations, +// A `Mutex` has two basic operations: `Mutex::lock()` and `Mutex::unlock()`. +// The `lock()` operation *acquires* a `Mutex` (in a state known as an +// *exclusive* -- or *write* -- lock), and the `unlock()` operation *releases* a +// Mutex. During the span of time between the lock() and unlock() operations, // a mutex is said to be *held*. By design, all mutexes support exclusive/write // locks, as this is the most common way to use a mutex. // @@ -106,23 +104,23 @@ struct SynchWaitParams; // // The `Mutex` state machine for basic lock/unlock operations is quite simple: // -// | | Lock() | Unlock() | +// | | lock() | unlock() | // |----------------+------------------------+----------| // | Free | Exclusive | invalid | // | Exclusive | blocks, then exclusive | Free | // // The full conditions are as follows. // -// * Calls to `Unlock()` require that the mutex be held, and must be made in the -// same thread that performed the corresponding `Lock()` operation which +// * Calls to `unlock()` require that the mutex be held, and must be made in the +// same thread that performed the corresponding `lock()` operation which // acquired the mutex; otherwise the call is invalid. // // * The mutex being non-reentrant (or non-recursive) means that a call to -// `Lock()` or `TryLock()` must not be made in a thread that already holds the -// mutex; such a call is invalid. +// `lock()` or `try_lock()` must not be made in a thread that already holds +// the mutex; such a call is invalid. // // * In other words, the state of being "held" has both a temporal component -// (from `Lock()` until `Unlock()`) as well as a thread identity component: +// (from `lock()` until `unlock()`) as well as a thread identity component: // the mutex is held *by a particular thread*. // // An "invalid" operation has undefined behavior. The `Mutex` implementation @@ -174,24 +172,32 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { ~Mutex(); - // Mutex::Lock() + // Mutex::lock() // // Blocks the calling thread, if necessary, until this `Mutex` is free, and // then acquires it exclusively. (This lock is also known as a "write lock.") - void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(); + void lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(); - // Mutex::Unlock() + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } + + // Mutex::unlock() // // Releases this `Mutex` and returns it from the exclusive/write state to the // free state. Calling thread must hold the `Mutex` exclusively. - void Unlock() ABSL_UNLOCK_FUNCTION(); + void unlock() ABSL_UNLOCK_FUNCTION(); + + inline void Unlock() ABSL_UNLOCK_FUNCTION() { unlock(); } - // Mutex::TryLock() + // Mutex::try_lock() // // If the mutex can be acquired without blocking, does so exclusively and // returns `true`. Otherwise, returns `false`. Returns `true` with high // probability if the `Mutex` was free. - [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true); + [[nodiscard]] bool try_lock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true); + + [[nodiscard]] bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return try_lock(); + } // Mutex::AssertHeld() // @@ -211,19 +217,19 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { // Neither read-locks nor write-locks are reentrant/recursive to avoid // potential client programming errors. // - // The Mutex API provides `Writer*()` aliases for the existing `Lock()`, - // `Unlock()` and `TryLock()` methods for use within applications mixing - // reader/writer locks. Using `Reader*()` and `Writer*()` operations in this + // The Mutex API provides `Writer*()` aliases for the existing `lock()`, + // `unlock()` and `try_lock()` methods for use within applications mixing + // reader/writer locks. Using `*_shared()` and `Writer*()` operations in this // manner can make locking behavior clearer when mixing read and write modes. // // Introducing reader locks necessarily complicates the `Mutex` state // machine somewhat. The table below illustrates the allowed state transitions - // of a mutex in such cases. Note that ReaderLock() may block even if the lock - // is held in shared mode; this occurs when another thread is blocked on a - // call to WriterLock(). + // of a mutex in such cases. Note that lock_shared() may block even if the + // lock is held in shared mode; this occurs when another thread is blocked on + // a call to lock(). // // --------------------------------------------------------------------------- - // Operation: WriterLock() Unlock() ReaderLock() ReaderUnlock() + // Operation: lock() unlock() lock_shared() unlock_shared() // --------------------------------------------------------------------------- // State // --------------------------------------------------------------------------- @@ -235,28 +241,35 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { // // In comments below, "shared" refers to a state of Shared(n) for any n > 0. - // Mutex::ReaderLock() + // Mutex::lock_shared() // // Blocks the calling thread, if necessary, until this `Mutex` is either free, // or in shared mode, and then acquires a share of it. Note that - // `ReaderLock()` will block if some other thread has an exclusive/writer lock - // on the mutex. + // `lock_shared()` will block if some other thread has an exclusive/writer + // lock on the mutex. + void lock_shared() ABSL_SHARED_LOCK_FUNCTION(); - void ReaderLock() ABSL_SHARED_LOCK_FUNCTION(); + void ReaderLock() ABSL_SHARED_LOCK_FUNCTION() { lock_shared(); } - // Mutex::ReaderUnlock() + // Mutex::unlock_shared() // - // Releases a read share of this `Mutex`. `ReaderUnlock` may return a mutex to - // the free state if this thread holds the last reader lock on the mutex. Note - // that you cannot call `ReaderUnlock()` on a mutex held in write mode. - void ReaderUnlock() ABSL_UNLOCK_FUNCTION(); + // Releases a read share of this `Mutex`. `unlock_shared` may return a mutex + // to the free state if this thread holds the last reader lock on the mutex. + // Note that you cannot call `unlock_shared()` on a mutex held in write mode. + void unlock_shared() ABSL_UNLOCK_FUNCTION(); + + void ReaderUnlock() ABSL_UNLOCK_FUNCTION() { unlock_shared(); } - // Mutex::ReaderTryLock() + // Mutex::try_lock_shared() // // If the mutex can be acquired without blocking, acquires this mutex for // shared access and returns `true`. Otherwise, returns `false`. Returns // `true` with high probability if the `Mutex` was free or shared. - [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true); + [[nodiscard]] bool try_lock_shared() ABSL_SHARED_TRYLOCK_FUNCTION(true); + + [[nodiscard]] bool ReaderTryLock() ABSL_SHARED_TRYLOCK_FUNCTION(true) { + return try_lock_shared(); + } // Mutex::AssertReaderHeld() // @@ -278,12 +291,12 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { // These methods may be used (along with the complementary `Reader*()` // methods) to distinguish simple exclusive `Mutex` usage (`Lock()`, // etc.) from reader/writer lock usage. - void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { this->Lock(); } + void WriterLock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { lock(); } - void WriterUnlock() ABSL_UNLOCK_FUNCTION() { this->Unlock(); } + void WriterUnlock() ABSL_UNLOCK_FUNCTION() { unlock(); } [[nodiscard]] bool WriterTryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { - return this->TryLock(); + return try_lock(); } // --------------------------------------------------------------------------- @@ -546,10 +559,10 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { base_internal::PerThreadSynch* absl_nonnull w); void Dtor(); - friend class CondVar; // for access to Trans()/Fer(). + friend class CondVar; // for access to Trans()/Fer(). void Trans(MuHow absl_nonnull how); // used for CondVar->Mutex transfer void Fer(base_internal::PerThreadSynch* absl_nonnull - w); // used for CondVar->Mutex transfer + w); // used for CondVar->Mutex transfer // Catch the error of writing Mutex when intending MutexLock. explicit Mutex(const volatile Mutex* absl_nullable /*ignored*/) {} @@ -572,7 +585,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED Mutex { // Class Foo { // public: // Foo::Bar* Baz() { -// MutexLock lock(&mu_); +// MutexLock lock(mu_); // ... // return bar; // } @@ -584,32 +597,42 @@ class ABSL_SCOPED_LOCKABLE MutexLock { public: // Constructors - // Calls `mu->Lock()` and returns when that call returns. That is, `*mu` is + // Calls `mu.lock()` and returns when that call returns. That is, `mu` is + // guaranteed to be locked when this object is constructed. + explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(mu) { + this->mu_.lock(); + } + + // Calls `mu->lock()` and returns when that call returns. That is, `*mu` is // guaranteed to be locked when this object is constructed. Requires that // `mu` be dereferenceable. explicit MutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) - : mu_(mu) { - this->mu_->Lock(); - } + : MutexLock(*mu) {} - // Like above, but calls `mu->LockWhen(cond)` instead. That is, in addition to + // Like above, but calls `mu.LockWhen(cond)` instead. That is, in addition to // the above, the condition given by `cond` is also guaranteed to hold when // this object is constructed. - explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond) - ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + explicit MutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), + const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { - this->mu_->LockWhen(cond); + this->mu_.LockWhen(cond); } + explicit MutexLock(Mutex* absl_nonnull mu, const Condition& cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : MutexLock(*mu, cond) {} + MutexLock(const MutexLock&) = delete; // NOLINT(runtime/mutex) MutexLock(MutexLock&&) = delete; // NOLINT(runtime/mutex) MutexLock& operator=(const MutexLock&) = delete; MutexLock& operator=(MutexLock&&) = delete; - ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->Unlock(); } + ~MutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); } private: - Mutex* absl_nonnull const mu_; + Mutex& mu_; }; // ReaderMutexLock @@ -618,26 +641,34 @@ class ABSL_SCOPED_LOCKABLE MutexLock { // releases a shared lock on a `Mutex` via RAII. class ABSL_SCOPED_LOCKABLE ReaderMutexLock { public: + explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) + ABSL_SHARED_LOCK_FUNCTION(mu) + : mu_(mu) { + mu.lock_shared(); + } + explicit ReaderMutexLock(Mutex* absl_nonnull mu) ABSL_SHARED_LOCK_FUNCTION(mu) + : ReaderMutexLock(*mu) {} + + explicit ReaderMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), + const Condition& cond) ABSL_SHARED_LOCK_FUNCTION(mu) : mu_(mu) { - mu->ReaderLock(); + mu.ReaderLockWhen(cond); } explicit ReaderMutexLock(Mutex* absl_nonnull mu, const Condition& cond) ABSL_SHARED_LOCK_FUNCTION(mu) - : mu_(mu) { - mu->ReaderLockWhen(cond); - } + : ReaderMutexLock(*mu, cond) {} ReaderMutexLock(const ReaderMutexLock&) = delete; ReaderMutexLock(ReaderMutexLock&&) = delete; ReaderMutexLock& operator=(const ReaderMutexLock&) = delete; ReaderMutexLock& operator=(ReaderMutexLock&&) = delete; - ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->ReaderUnlock(); } + ~ReaderMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock_shared(); } private: - Mutex* absl_nonnull const mu_; + Mutex& mu_; }; // WriterMutexLock @@ -646,27 +677,36 @@ class ABSL_SCOPED_LOCKABLE ReaderMutexLock { // releases a write (exclusive) lock on a `Mutex` via RAII. class ABSL_SCOPED_LOCKABLE WriterMutexLock { public: - explicit WriterMutexLock(Mutex* absl_nonnull mu) + explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this)) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { - mu->WriterLock(); + mu.lock(); } - explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond) + explicit WriterMutexLock(Mutex* absl_nonnull mu) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : WriterMutexLock(*mu) {} + + explicit WriterMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), + const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { - mu->WriterLockWhen(cond); + mu.WriterLockWhen(cond); } + explicit WriterMutexLock(Mutex* absl_nonnull mu, const Condition& cond) + ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : WriterMutexLock(*mu, cond) {} + WriterMutexLock(const WriterMutexLock&) = delete; WriterMutexLock(WriterMutexLock&&) = delete; WriterMutexLock& operator=(const WriterMutexLock&) = delete; WriterMutexLock& operator=(WriterMutexLock&&) = delete; - ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_->WriterUnlock(); } + ~WriterMutexLock() ABSL_UNLOCK_FUNCTION() { this->mu_.unlock(); } private: - Mutex* absl_nonnull const mu_; + Mutex& mu_; }; // ----------------------------------------------------------------------------- @@ -713,7 +753,7 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { // Example using a scope guard: // // { -// MutexLock lock(&mu_, count_is_zero); +// MutexLock lock(mu_, count_is_zero); // // ... // } // @@ -755,7 +795,7 @@ class Condition { Condition( bool (*absl_nonnull func)(T* absl_nullability_unknown), typename absl::internal::type_identity::type* absl_nullability_unknown - arg); + arg); // Templated version for invoking a method that returns a `bool`. // @@ -1019,7 +1059,7 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe { ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { if (this->mu_ != nullptr) { - this->mu_->Lock(); + this->mu_->lock(); } } @@ -1033,7 +1073,7 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe { ~MutexLockMaybe() ABSL_UNLOCK_FUNCTION() { if (this->mu_ != nullptr) { - this->mu_->Unlock(); + this->mu_->unlock(); } } @@ -1051,28 +1091,37 @@ class ABSL_SCOPED_LOCKABLE MutexLockMaybe { // mutex before destruction. `Release()` may be called at most once. class ABSL_SCOPED_LOCKABLE ReleasableMutexLock { public: + explicit ReleasableMutexLock(Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY( + this)) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(&mu) { + this->mu_->lock(); + } + explicit ReleasableMutexLock(Mutex* absl_nonnull mu) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) - : mu_(mu) { - this->mu_->Lock(); + : ReleasableMutexLock(*mu) {} + + explicit ReleasableMutexLock( + Mutex& mu ABSL_INTERNAL_ATTRIBUTE_CAPTURED_BY(this), + const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) + : mu_(&mu) { + this->mu_->LockWhen(cond); } explicit ReleasableMutexLock(Mutex* absl_nonnull mu, const Condition& cond) ABSL_EXCLUSIVE_LOCK_FUNCTION(mu) - : mu_(mu) { - this->mu_->LockWhen(cond); - } + : ReleasableMutexLock(*mu, cond) {} ~ReleasableMutexLock() ABSL_UNLOCK_FUNCTION() { if (this->mu_ != nullptr) { - this->mu_->Unlock(); + this->mu_->unlock(); } } void Release() ABSL_UNLOCK_FUNCTION(); private: - Mutex* absl_nonnull mu_; + Mutex* absl_nullable mu_; ReleasableMutexLock(const ReleasableMutexLock&) = delete; ReleasableMutexLock(ReleasableMutexLock&&) = delete; ReleasableMutexLock& operator=(const ReleasableMutexLock&) = delete; @@ -1135,7 +1184,7 @@ template inline Condition::Condition( bool (*absl_nonnull func)(T* absl_nullability_unknown), typename absl::internal::type_identity::type* absl_nullability_unknown - arg) + arg) // Just delegate to the overload above. : Condition(func, arg) {} diff --git a/absl/synchronization/mutex_benchmark.cc b/absl/synchronization/mutex_benchmark.cc index 06888dfea28..d2c64956f09 100644 --- a/absl/synchronization/mutex_benchmark.cc +++ b/absl/synchronization/mutex_benchmark.cc @@ -30,7 +30,7 @@ namespace { void BM_Mutex(benchmark::State& state) { static absl::NoDestructor mu; for (auto _ : state) { - absl::MutexLock lock(mu.get()); + absl::MutexLock lock(*mu.get()); } } BENCHMARK(BM_Mutex)->UseRealTime()->Threads(1)->ThreadPerCpu(); @@ -38,7 +38,7 @@ BENCHMARK(BM_Mutex)->UseRealTime()->Threads(1)->ThreadPerCpu(); void BM_ReaderLock(benchmark::State& state) { static absl::NoDestructor mu; for (auto _ : state) { - absl::ReaderMutexLock lock(mu.get()); + absl::ReaderMutexLock lock(*mu.get()); } } BENCHMARK(BM_ReaderLock)->UseRealTime()->Threads(1)->ThreadPerCpu(); @@ -46,8 +46,8 @@ BENCHMARK(BM_ReaderLock)->UseRealTime()->Threads(1)->ThreadPerCpu(); void BM_TryLock(benchmark::State& state) { absl::Mutex mu; for (auto _ : state) { - if (mu.TryLock()) { - mu.Unlock(); + if (mu.try_lock()) { + mu.unlock(); } } } @@ -56,8 +56,8 @@ BENCHMARK(BM_TryLock); void BM_ReaderTryLock(benchmark::State& state) { static absl::NoDestructor mu; for (auto _ : state) { - if (mu->ReaderTryLock()) { - mu->ReaderUnlock(); + if (mu->try_lock_shared()) { + mu->unlock_shared(); } } } @@ -72,24 +72,6 @@ static void DelayNs(int64_t ns, int* data) { } } -template -class RaiiLocker { - public: - explicit RaiiLocker(MutexType* mu) : mu_(mu) { mu_->Lock(); } - ~RaiiLocker() { mu_->Unlock(); } - private: - MutexType* mu_; -}; - -template <> -class RaiiLocker { - public: - explicit RaiiLocker(std::mutex* mu) : mu_(mu) { mu_->lock(); } - ~RaiiLocker() { mu_->unlock(); } - private: - std::mutex* mu_; -}; - // RAII object to change the Mutex priority of the running thread. class ScopedThreadMutexPriority { public: @@ -163,7 +145,7 @@ void BM_MutexEnqueue(benchmark::State& state) { shared->looping_threads.fetch_add(1); for (int i = 0; i < kBatchSize; i++) { { - absl::MutexLock l(&shared->mu); + absl::MutexLock l(shared->mu); shared->thread_has_mutex.store(true, std::memory_order_relaxed); // Spin until all other threads are either out of the benchmark loop // or blocked on the mutex. This ensures that the mutex queue is kept @@ -226,7 +208,7 @@ void BM_Contended(benchmark::State& state) { // to keep ratio between local work and critical section approximately // equal regardless of number of threads. DelayNs(100 * state.threads(), &local); - RaiiLocker locker(&shared->mu); + std::scoped_lock locker(shared->mu); DelayNs(state.range(0), &shared->data); } } @@ -291,7 +273,7 @@ void BM_ConditionWaiters(benchmark::State& state) { init->DecrementCount(); m->LockWhen(absl::Condition( static_cast([](int* v) { return *v == 0; }), p)); - m->Unlock(); + m->unlock(); } }; @@ -317,15 +299,15 @@ void BM_ConditionWaiters(benchmark::State& state) { init.Wait(); for (auto _ : state) { - mu.Lock(); - mu.Unlock(); // Each unlock requires Condition evaluation for our waiters. + mu.lock(); + mu.unlock(); // Each unlock requires Condition evaluation for our waiters. } - mu.Lock(); + mu.lock(); for (int i = 0; i < num_classes; i++) { equivalence_classes[i] = 0; } - mu.Unlock(); + mu.unlock(); } // Some configurations have higher thread limits than others. diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc index a3eb3db239c..793acf8c01d 100644 --- a/absl/synchronization/mutex_test.cc +++ b/absl/synchronization/mutex_test.cc @@ -23,7 +23,9 @@ #include #include #include +#include // NOLINT(build/c++11) #include +#include // NOLINT(build/c++14) #include #include // NOLINT(build/c++11) #include @@ -106,7 +108,7 @@ static void CheckSumG0G1(void *v) { static void TestMu(TestContext *cxt, int c) { for (int i = 0; i != cxt->iterations; i++) { - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); int a = cxt->g0 + 1; cxt->g0 = a; cxt->g1--; @@ -117,17 +119,17 @@ static void TestTry(TestContext *cxt, int c) { for (int i = 0; i != cxt->iterations; i++) { do { std::this_thread::yield(); - } while (!cxt->mu.TryLock()); + } while (!cxt->mu.try_lock()); int a = cxt->g0 + 1; cxt->g0 = a; cxt->g1--; - cxt->mu.Unlock(); + cxt->mu.unlock(); } } static void TestR20ms(TestContext *cxt, int c) { for (int i = 0; i != cxt->iterations; i++) { - absl::ReaderMutexLock l(&cxt->mu); + absl::ReaderMutexLock l(cxt->mu); absl::SleepFor(absl::Milliseconds(20)); cxt->mu.AssertReaderHeld(); } @@ -136,7 +138,7 @@ static void TestR20ms(TestContext *cxt, int c) { static void TestRW(TestContext *cxt, int c) { if ((c & 1) == 0) { for (int i = 0; i != cxt->iterations; i++) { - absl::WriterMutexLock l(&cxt->mu); + absl::WriterMutexLock l(cxt->mu); cxt->g0++; cxt->g1--; cxt->mu.AssertHeld(); @@ -144,7 +146,7 @@ static void TestRW(TestContext *cxt, int c) { } } else { for (int i = 0; i != cxt->iterations; i++) { - absl::ReaderMutexLock l(&cxt->mu); + absl::ReaderMutexLock l(cxt->mu); CHECK_EQ(cxt->g0, -cxt->g1) << "Error in TestRW"; cxt->mu.AssertReaderHeld(); } @@ -166,7 +168,7 @@ static void TestAwait(TestContext *cxt, int c) { MyContext mc; mc.target = c; mc.cxt = cxt; - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); cxt->mu.AssertHeld(); while (cxt->g0 < cxt->iterations) { cxt->mu.Await(absl::Condition(&mc, &MyContext::MyTurn)); @@ -182,7 +184,7 @@ static void TestAwait(TestContext *cxt, int c) { static void TestSignalAll(TestContext *cxt, int c) { int target = c; - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); cxt->mu.AssertHeld(); while (cxt->g0 < cxt->iterations) { while (cxt->g0 != target && cxt->g0 != cxt->iterations) { @@ -200,7 +202,7 @@ static void TestSignalAll(TestContext *cxt, int c) { static void TestSignal(TestContext *cxt, int c) { CHECK_EQ(cxt->threads, 2) << "TestSignal should use 2 threads"; int target = c; - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); cxt->mu.AssertHeld(); while (cxt->g0 < cxt->iterations) { while (cxt->g0 != target && cxt->g0 != cxt->iterations) { @@ -217,7 +219,7 @@ static void TestSignal(TestContext *cxt, int c) { static void TestCVTimeout(TestContext *cxt, int c) { int target = c; - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); cxt->mu.AssertHeld(); while (cxt->g0 < cxt->iterations) { while (cxt->g0 != target && cxt->g0 != cxt->iterations) { @@ -241,7 +243,7 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) { absl::Condition false_cond(&kFalse); absl::Condition g0ge2(G0GE2, cxt); if (c == 0) { - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); absl::Time start = absl::Now(); if (use_cv) { @@ -309,7 +311,7 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) { CHECK_EQ(cxt->g0, cxt->threads) << "TestTime failed"; } else if (c == 1) { - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); const absl::Time start = absl::Now(); if (use_cv) { cxt->cv.WaitWithTimeout(&cxt->mu, absl::Milliseconds(500)); @@ -322,7 +324,7 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) { << "TestTime failed"; cxt->g0++; } else if (c == 2) { - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); if (use_cv) { while (cxt->g0 < 2) { cxt->cv.WaitWithTimeout(&cxt->mu, absl::Seconds(100)); @@ -333,7 +335,7 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) { } cxt->g0++; } else { - absl::MutexLock l(&cxt->mu); + absl::MutexLock l(cxt->mu); if (use_cv) { while (cxt->g0 < 2) { cxt->cv.Wait(&cxt->mu); @@ -351,11 +353,11 @@ static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); } static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv, const std::function &cb) { - mu->Lock(); + mu->lock(); int c = (*c0)++; - mu->Unlock(); + mu->unlock(); cb(c); - absl::MutexLock l(mu); + absl::MutexLock l(*mu); (*c1)++; cv->Signal(); } @@ -377,11 +379,11 @@ static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int), &EndTest, &c0, &c1, &mu2, &cv2, std::function(std::bind(test, cxt, std::placeholders::_1)))); } - mu2.Lock(); + mu2.lock(); while (c1 != threads) { cv2.Wait(&mu2); } - mu2.Unlock(); + mu2.unlock(); return cxt->g0; } @@ -422,7 +424,7 @@ struct TimeoutBugStruct { static void WaitForA(TimeoutBugStruct *x) { x->mu.LockWhen(absl::Condition(&x->a)); x->a_waiter_count--; - x->mu.Unlock(); + x->mu.unlock(); } static bool NoAWaiters(TimeoutBugStruct *x) { return x->a_waiter_count == 0; } @@ -445,27 +447,27 @@ TEST(Mutex, CondVarWaitSignalsAwait) { // Thread A. Sets barrier, waits for release using Mutex::Await, then // signals released_cv. pool->Schedule([&state] { - state.release_mu.Lock(); + state.release_mu.lock(); - state.barrier_mu.Lock(); + state.barrier_mu.lock(); state.barrier = true; - state.barrier_mu.Unlock(); + state.barrier_mu.unlock(); state.release_mu.Await(absl::Condition(&state.release)); state.released_cv.Signal(); - state.release_mu.Unlock(); + state.release_mu.unlock(); }); state.barrier_mu.LockWhen(absl::Condition(&state.barrier)); - state.barrier_mu.Unlock(); - state.release_mu.Lock(); + state.barrier_mu.unlock(); + state.release_mu.lock(); // Thread A is now blocked on release by way of Mutex::Await(). // Set release. Calling released_cv.Wait() should un-block thread A, // which will signal released_cv. If not, the test will hang. state.release = true; state.released_cv.Wait(&state.release_mu); - state.release_mu.Unlock(); + state.release_mu.unlock(); } // Test that a CondVar.WaitWithTimeout(&mutex) can un-block a call to @@ -486,20 +488,20 @@ TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) { // Thread A. Sets barrier, waits for release using Mutex::Await, then // signals released_cv. pool->Schedule([&state] { - state.release_mu.Lock(); + state.release_mu.lock(); - state.barrier_mu.Lock(); + state.barrier_mu.lock(); state.barrier = true; - state.barrier_mu.Unlock(); + state.barrier_mu.unlock(); state.release_mu.Await(absl::Condition(&state.release)); state.released_cv.Signal(); - state.release_mu.Unlock(); + state.release_mu.unlock(); }); state.barrier_mu.LockWhen(absl::Condition(&state.barrier)); - state.barrier_mu.Unlock(); - state.release_mu.Lock(); + state.barrier_mu.unlock(); + state.release_mu.lock(); // Thread A is now blocked on release by way of Mutex::Await(). // Set release. Calling released_cv.Wait() should un-block thread A, @@ -510,7 +512,7 @@ TEST(Mutex, CondVarWaitWithTimeoutSignalsAwait) { << "; Unrecoverable test failure: CondVar::WaitWithTimeout did not " "unblock the absl::Mutex::Await call in another thread."; - state.release_mu.Unlock(); + state.release_mu.unlock(); } // Test for regression of a bug in loop of TryRemove() @@ -536,7 +538,7 @@ TEST(Mutex, MutexTimeoutBug) { x.a = true; // wakeup the two waiters on A x.mu.Await(absl::Condition(&NoAWaiters, &x)); // wait for them to exit - x.mu.Unlock(); + x.mu.unlock(); } struct CondVarWaitDeadlock : testing::TestWithParam { @@ -556,27 +558,27 @@ struct CondVarWaitDeadlock : testing::TestWithParam { void Waiter1() { if (read_lock1) { - mu.ReaderLock(); + mu.lock_shared(); while (!cond1) { cv.Wait(&mu); } - mu.ReaderUnlock(); + mu.unlock_shared(); } else { - mu.Lock(); + mu.lock(); while (!cond1) { cv.Wait(&mu); } - mu.Unlock(); + mu.unlock(); } } void Waiter2() { if (read_lock2) { mu.ReaderLockWhen(absl::Condition(&cond2)); - mu.ReaderUnlock(); + mu.unlock_shared(); } else { mu.LockWhen(absl::Condition(&cond2)); - mu.Unlock(); + mu.unlock(); } } }; @@ -600,21 +602,21 @@ TEST_P(CondVarWaitDeadlock, Test) { absl::SleepFor(absl::Milliseconds(100)); // Wake condwaiter. - mu.Lock(); + mu.lock(); cond1 = true; if (signal_unlocked) { - mu.Unlock(); + mu.unlock(); cv.Signal(); } else { cv.Signal(); - mu.Unlock(); + mu.unlock(); } waiter1.reset(); // "join" waiter1 // Wake waiter. - mu.Lock(); + mu.lock(); cond2 = true; - mu.Unlock(); + mu.unlock(); waiter2.reset(); // "join" waiter2 } @@ -639,19 +641,19 @@ struct DequeueAllWakeableBugStruct { // Test for regression of a bug in loop of DequeueAllWakeable() static void AcquireAsReader(DequeueAllWakeableBugStruct *x) { - x->mu.ReaderLock(); - x->mu2.Lock(); + x->mu.lock_shared(); + x->mu2.lock(); x->unfinished_count--; x->done1 = (x->unfinished_count == 0); - x->mu2.Unlock(); + x->mu2.unlock(); // make sure that both readers acquired mu before we release it. absl::SleepFor(absl::Seconds(2)); - x->mu.ReaderUnlock(); + x->mu.unlock_shared(); - x->mu2.Lock(); + x->mu2.lock(); x->finished_count--; x->done2 = (x->finished_count == 0); - x->mu2.Unlock(); + x->mu2.unlock(); } // Test for regression of a bug in loop of DequeueAllWakeable() @@ -663,21 +665,21 @@ TEST(Mutex, MutexReaderWakeupBug) { x.done1 = false; x.finished_count = 2; x.done2 = false; - x.mu.Lock(); // acquire mu exclusively + x.mu.lock(); // acquire mu exclusively // queue two thread that will block on reader locks on x.mu tp->Schedule(std::bind(&AcquireAsReader, &x)); tp->Schedule(std::bind(&AcquireAsReader, &x)); absl::SleepFor(absl::Seconds(1)); // give time for reader threads to block - x.mu.Unlock(); // wake them up + x.mu.unlock(); // wake them up // both readers should finish promptly EXPECT_TRUE( x.mu2.LockWhenWithTimeout(absl::Condition(&x.done1), absl::Seconds(10))); - x.mu2.Unlock(); + x.mu2.unlock(); EXPECT_TRUE( x.mu2.LockWhenWithTimeout(absl::Condition(&x.done2), absl::Seconds(10))); - x.mu2.Unlock(); + x.mu2.unlock(); } struct LockWhenTestStruct { @@ -689,15 +691,15 @@ struct LockWhenTestStruct { }; static bool LockWhenTestIsCond(LockWhenTestStruct *s) { - s->mu2.Lock(); + s->mu2.lock(); s->waiting = true; - s->mu2.Unlock(); + s->mu2.unlock(); return s->cond; } static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) { s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s)); - s->mu1.Unlock(); + s->mu1.unlock(); } TEST(Mutex, LockWhen) { @@ -705,11 +707,11 @@ TEST(Mutex, LockWhen) { std::thread t(LockWhenTestWaitForIsCond, &s); s.mu2.LockWhen(absl::Condition(&s.waiting)); - s.mu2.Unlock(); + s.mu2.unlock(); - s.mu1.Lock(); + s.mu1.lock(); s.cond = true; - s.mu1.Unlock(); + s.mu1.unlock(); t.join(); } @@ -724,20 +726,20 @@ TEST(Mutex, LockWhenGuard) { bool (*cond_lt_10)(int *) = [](int *p) { return *p < 10; }; std::thread t1([&mu, &n, &done, cond_eq_10]() { - absl::ReaderMutexLock lock(&mu, absl::Condition(cond_eq_10, &n)); + absl::ReaderMutexLock lock(mu, absl::Condition(cond_eq_10, &n)); done = true; }); std::thread t2[10]; for (std::thread &t : t2) { t = std::thread([&mu, &n, cond_lt_10]() { - absl::WriterMutexLock lock(&mu, absl::Condition(cond_lt_10, &n)); + absl::WriterMutexLock lock(mu, absl::Condition(cond_lt_10, &n)); ++n; }); } { - absl::MutexLock lock(&mu); + absl::MutexLock lock(mu); n = 0; } @@ -749,7 +751,7 @@ TEST(Mutex, LockWhenGuard) { } // -------------------------------------------------------- -// The following test requires Mutex::ReaderLock to be a real shared +// The following test requires Mutex::lock_shared to be a real shared // lock, which is not the case in all builds. #if !defined(ABSL_MUTEX_READER_LOCK_IS_EXCLUSIVE) @@ -776,9 +778,9 @@ struct ReaderDecrementBugStruct { // L >= mu, L < mu_waiting_on_cond static bool IsCond(void *v) { ReaderDecrementBugStruct *x = reinterpret_cast(v); - x->mu2.Lock(); + x->mu2.lock(); x->waiting_on_cond = true; - x->mu2.Unlock(); + x->mu2.unlock(); return x->cond; } @@ -791,23 +793,23 @@ static bool AllDone(void *v) { // L={} static void WaitForCond(ReaderDecrementBugStruct *x) { absl::Mutex dummy; - absl::MutexLock l(&dummy); + absl::MutexLock l(dummy); x->mu.LockWhen(absl::Condition(&IsCond, x)); x->done--; - x->mu.Unlock(); + x->mu.unlock(); } // L={} static void GetReadLock(ReaderDecrementBugStruct *x) { - x->mu.ReaderLock(); - x->mu2.Lock(); + x->mu.lock_shared(); + x->mu2.lock(); x->have_reader_lock = true; x->mu2.Await(absl::Condition(&x->complete)); - x->mu2.Unlock(); - x->mu.ReaderUnlock(); - x->mu.Lock(); + x->mu2.unlock(); + x->mu.unlock_shared(); + x->mu.lock(); x->done--; - x->mu.Unlock(); + x->mu.unlock(); } // Test for reader counter being decremented incorrectly by waiter @@ -823,32 +825,32 @@ TEST(Mutex, MutexReaderDecrementBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { // Run WaitForCond() and wait for it to sleep std::thread thread1(WaitForCond, &x); x.mu2.LockWhen(absl::Condition(&x.waiting_on_cond)); - x.mu2.Unlock(); + x.mu2.unlock(); // Run GetReadLock(), and wait for it to get the read lock std::thread thread2(GetReadLock, &x); x.mu2.LockWhen(absl::Condition(&x.have_reader_lock)); - x.mu2.Unlock(); + x.mu2.unlock(); // Get the reader lock ourselves, and release it. - x.mu.ReaderLock(); - x.mu.ReaderUnlock(); + x.mu.lock_shared(); + x.mu.unlock_shared(); // The lock should be held in read mode by GetReadLock(). // If we have the bug, the lock will be free. x.mu.AssertReaderHeld(); // Wake up all the threads. - x.mu2.Lock(); + x.mu2.lock(); x.complete = true; - x.mu2.Unlock(); + x.mu2.unlock(); // TODO(delesley): turn on analysis once lock upgrading is supported. // (This call upgrades the lock from shared to exclusive.) - x.mu.Lock(); + x.mu.lock(); x.cond = true; x.mu.Await(absl::Condition(&AllDone, &x)); - x.mu.Unlock(); + x.mu.unlock(); thread1.join(); thread2.join(); @@ -869,9 +871,9 @@ TEST(Mutex, LockedMutexDestructionBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { auto mu = absl::make_unique(kNumLocks); for (int j = 0; j != kNumLocks; j++) { if ((j % 2) == 0) { - mu[j].WriterLock(); + mu[j].lock(); } else { - mu[j].ReaderLock(); + mu[j].lock_shared(); } } } @@ -1067,15 +1069,15 @@ static void ReaderForReaderOnCondVar(absl::Mutex *mu, absl::CondVar *cv, int *running) { absl::InsecureBitGen gen; std::uniform_int_distribution random_millis(0, 15); - mu->ReaderLock(); + mu->lock_shared(); while (*running == 3) { absl::SleepFor(absl::Milliseconds(random_millis(gen))); cv->WaitWithTimeout(mu, absl::Milliseconds(random_millis(gen))); } - mu->ReaderUnlock(); - mu->Lock(); + mu->unlock_shared(); + mu->lock(); (*running)--; - mu->Unlock(); + mu->unlock(); } static bool IntIsZero(int *x) { return *x == 0; } @@ -1090,10 +1092,10 @@ TEST(Mutex, TestReaderOnCondVar) { tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running)); tp->Schedule(std::bind(&ReaderForReaderOnCondVar, &mu, &cv, &running)); absl::SleepFor(absl::Seconds(2)); - mu.Lock(); + mu.lock(); running--; mu.Await(absl::Condition(&IntIsZero, &running)); - mu.Unlock(); + mu.unlock(); } // -------------------------------------------------------- @@ -1117,7 +1119,7 @@ static bool ConditionWithAcquire(AcquireFromConditionStruct *x) { bool always_false = false; x->mu1.LockWhenWithTimeout(absl::Condition(&always_false), absl::Milliseconds(100)); - x->mu1.Unlock(); + x->mu1.unlock(); } CHECK_LT(x->value, 4) << "should not be invoked a fourth time"; @@ -1129,7 +1131,7 @@ static void WaitForCond2(AcquireFromConditionStruct *x) { // wait for cond0 to become true x->mu0.LockWhen(absl::Condition(&ConditionWithAcquire, x)); x->done = true; - x->mu0.Unlock(); + x->mu0.unlock(); } // Test for Condition whose function acquires other Mutexes @@ -1145,12 +1147,12 @@ TEST(Mutex, AcquireFromCondition) { // return false. absl::SleepFor(absl::Milliseconds(500)); // allow T time to hang - x.mu0.Lock(); + x.mu0.lock(); x.cv.WaitWithTimeout(&x.mu0, absl::Milliseconds(500)); // wake T // T will be woken because the Wait() will call ConditionWithAcquire() // for the second time, and it will return true. - x.mu0.Unlock(); + x.mu0.unlock(); // T will then acquire the lock and recheck its own condition. // It will find the condition true, as this is the third invocation, @@ -1166,7 +1168,7 @@ TEST(Mutex, AcquireFromCondition) { // is conceptually waiting both on the condition variable, and on mu2. x.mu0.LockWhen(absl::Condition(&x.done)); - x.mu0.Unlock(); + x.mu0.unlock(); } TEST(Mutex, DeadlockDetector) { @@ -1178,20 +1180,20 @@ TEST(Mutex, DeadlockDetector) { absl::Mutex m3; absl::Mutex m4; - m1.Lock(); // m1 gets ID1 - m2.Lock(); // m2 gets ID2 - m3.Lock(); // m3 gets ID3 - m3.Unlock(); - m2.Unlock(); + m1.lock(); // m1 gets ID1 + m2.lock(); // m2 gets ID2 + m3.lock(); // m3 gets ID3 + m3.unlock(); + m2.unlock(); // m1 still held m1.ForgetDeadlockInfo(); // m1 loses ID - m2.Lock(); // m2 gets ID2 - m3.Lock(); // m3 gets ID3 - m4.Lock(); // m4 gets ID4 - m3.Unlock(); - m2.Unlock(); - m4.Unlock(); - m1.Unlock(); + m2.lock(); // m2 gets ID2 + m3.lock(); // m3 gets ID3 + m4.lock(); // m4 gets ID4 + m3.unlock(); + m2.unlock(); + m4.unlock(); + m1.unlock(); } // Bazel has a test "warning" file that programs can write to if the @@ -1246,18 +1248,18 @@ TEST(Mutex, DeadlockDetectorBazelWarning) { absl::Mutex mu0; absl::Mutex mu1; - bool got_mu0 = mu0.TryLock(); - mu1.Lock(); // acquire mu1 while holding mu0 + bool got_mu0 = mu0.try_lock(); + mu1.lock(); // acquire mu1 while holding mu0 if (got_mu0) { - mu0.Unlock(); + mu0.unlock(); } - if (mu0.TryLock()) { // try lock shouldn't cause deadlock detector to fire - mu0.Unlock(); + if (mu0.try_lock()) { // try lock shouldn't cause deadlock detector to fire + mu0.unlock(); } - mu0.Lock(); // acquire mu0 while holding mu1; should get one deadlock + mu0.lock(); // acquire mu0 while holding mu1; should get one deadlock // report here - mu0.Unlock(); - mu1.Unlock(); + mu0.unlock(); + mu1.unlock(); absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort); } @@ -1272,10 +1274,10 @@ TEST(Mutex, DeadlockDetectorLongCycle) { // Check that we survive a deadlock with a lock cycle. std::vector mutex(100); for (size_t i = 0; i != mutex.size(); i++) { - mutex[i].Lock(); - mutex[(i + 1) % mutex.size()].Lock(); - mutex[i].Unlock(); - mutex[(i + 1) % mutex.size()].Unlock(); + mutex[i].lock(); + mutex[(i + 1) % mutex.size()].lock(); + mutex[i].unlock(); + mutex[(i + 1) % mutex.size()].unlock(); } absl::SetMutexDeadlockDetectionMode(absl::OnDeadlockCycle::kAbort); @@ -1295,10 +1297,10 @@ TEST(Mutex, DeadlockDetectorStressTest) ABSL_NO_THREAD_SAFETY_ANALYSIS { int end = std::min(n_locks, i + 5); // acquire and then release locks i, i+1, ..., i+4 for (int j = i; j < end; j++) { - array_of_locks[j].Lock(); + array_of_locks[j].lock(); } for (int j = i; j < end; j++) { - array_of_locks[j].Unlock(); + array_of_locks[j].unlock(); } } } @@ -1319,11 +1321,11 @@ TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { absl::Mutex b, c; // Hold mutex. - a->Lock(); + a->lock(); // Force deadlock id assignment by acquiring another lock. - b.Lock(); - b.Unlock(); + b.lock(); + b.unlock(); // Delete the mutex. The Mutex destructor tries to remove held locks, // but the attempt isn't foolproof. It can fail if: @@ -1338,8 +1340,8 @@ TEST(Mutex, DeadlockIdBug) ABSL_NO_THREAD_SAFETY_ANALYSIS { // We should end up getting assigned the same deadlock id that was // freed up when "a" was deleted, which will cause a spurious deadlock // report if the held lock entry for "a" was not invalidated. - c.Lock(); - c.Unlock(); + c.lock(); + c.unlock(); } // -------------------------------------------------------- @@ -1574,11 +1576,11 @@ TEST_P(TimeoutTest, Await) { std::unique_ptr pool = CreateDefaultPool(); RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] { - absl::MutexLock l(&mu); + absl::MutexLock l(mu); value = true; }); - absl::MutexLock lock(&mu); + absl::MutexLock lock(mu); absl::Time start_time = absl::Now(); absl::Condition cond(&value); bool result = @@ -1608,7 +1610,7 @@ TEST_P(TimeoutTest, LockWhen) { std::unique_ptr pool = CreateDefaultPool(); RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] { - absl::MutexLock l(&mu); + absl::MutexLock l(mu); value = true; }); @@ -1618,7 +1620,7 @@ TEST_P(TimeoutTest, LockWhen) { params.use_absolute_deadline ? mu.LockWhenWithDeadline(cond, start_time + params.wait_timeout) : mu.LockWhenWithTimeout(cond, params.wait_timeout); - mu.Unlock(); + mu.unlock(); if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) { EXPECT_EQ(params.expected_result, result); @@ -1643,7 +1645,7 @@ TEST_P(TimeoutTest, ReaderLockWhen) { std::unique_ptr pool = CreateDefaultPool(); RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] { - absl::MutexLock l(&mu); + absl::MutexLock l(mu); value = true; }); @@ -1654,7 +1656,7 @@ TEST_P(TimeoutTest, ReaderLockWhen) { start_time + params.wait_timeout) : mu.ReaderLockWhenWithTimeout(absl::Condition(&value), params.wait_timeout); - mu.ReaderUnlock(); + mu.unlock_shared(); if (DelayIsWithinBounds(params.expected_delay, absl::Now() - start_time)) { EXPECT_EQ(params.expected_result, result); @@ -1680,12 +1682,12 @@ TEST_P(TimeoutTest, Wait) { std::unique_ptr pool = CreateDefaultPool(); RunAfterDelay(params.satisfy_condition_delay, pool.get(), [&] { - absl::MutexLock l(&mu); + absl::MutexLock l(mu); value = true; cv.Signal(); }); - absl::MutexLock lock(&mu); + absl::MutexLock lock(mu); absl::Time start_time = absl::Now(); absl::Duration timeout = params.wait_timeout; absl::Time deadline = start_time + timeout; @@ -1711,13 +1713,13 @@ TEST(Mutex, Logging) { logged_mutex.EnableDebugLog("fido_mutex"); absl::CondVar logged_cv; logged_cv.EnableDebugLog("rover_cv"); - logged_mutex.Lock(); + logged_mutex.lock(); logged_cv.WaitWithTimeout(&logged_mutex, absl::Milliseconds(20)); - logged_mutex.Unlock(); - logged_mutex.ReaderLock(); - logged_mutex.ReaderUnlock(); - logged_mutex.Lock(); - logged_mutex.Unlock(); + logged_mutex.unlock(); + logged_mutex.lock_shared(); + logged_mutex.unlock_shared(); + logged_mutex.lock(); + logged_mutex.unlock(); logged_cv.Signal(); logged_cv.SignalAll(); } @@ -1735,8 +1737,8 @@ TEST(Mutex, LoggingAddressReuse) { alive[i] = true; mu->EnableDebugLog("Mutex"); mu->EnableInvariantDebugging(invariant, &alive[i]); - mu->Lock(); - mu->Unlock(); + mu->lock(); + mu->unlock(); mu->~Mutex(); alive[i] = false; } @@ -1762,8 +1764,8 @@ TEST(Mutex, SynchEventRace) { { absl::Mutex mu; mu.EnableInvariantDebugging([](void *) {}, nullptr); - mu.Lock(); - mu.Unlock(); + mu.lock(); + mu.unlock(); } { absl::Mutex mu; @@ -1900,7 +1902,7 @@ TEST(Mutex, MuTime) { } TEST(Mutex, SignalExitedThread) { - // The test may expose a race when Mutex::Unlock signals a thread + // The test may expose a race when Mutex::unlock signals a thread // that has already exited. #if defined(__wasm__) || defined(__asmjs__) constexpr int kThreads = 1; // OOMs under WASM @@ -1913,11 +1915,11 @@ TEST(Mutex, SignalExitedThread) { for (int i = 0; i < kThreads; i++) { absl::Mutex mu; std::thread t([&]() { - mu.Lock(); - mu.Unlock(); + mu.lock(); + mu.unlock(); }); - mu.Lock(); - mu.Unlock(); + mu.lock(); + mu.unlock(); t.join(); } }); @@ -1931,7 +1933,7 @@ TEST(Mutex, WriterPriority) { std::atomic saw_wrote{false}; auto readfunc = [&]() { for (size_t i = 0; i < 10; ++i) { - absl::ReaderMutexLock lock(&mu); + absl::ReaderMutexLock lock(mu); if (wrote) { saw_wrote = true; break; @@ -1946,7 +1948,7 @@ TEST(Mutex, WriterPriority) { // PerThreadSynch::priority, so the writer intentionally runs on a new thread. std::thread t3([&]() { // The writer should be able squeeze between the two alternating readers. - absl::MutexLock lock(&mu); + absl::MutexLock lock(mu); wrote = true; }); t1.join(); @@ -1978,30 +1980,30 @@ TEST(Mutex, CondVarPriority) { bool morph = false; std::thread th([&]() { EXPECT_EQ(0, pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m)); - mu.Lock(); + mu.lock(); locked = true; mu.Await(absl::Condition(¬ified)); - mu.Unlock(); + mu.unlock(); EXPECT_EQ(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() ->per_thread_synch.priority, param.sched_priority); - mu.Lock(); + mu.lock(); mu.Await(absl::Condition(&waiting)); morph = true; absl::SleepFor(absl::Seconds(1)); cv.Signal(); - mu.Unlock(); + mu.unlock(); }); - mu.Lock(); + mu.lock(); mu.Await(absl::Condition(&locked)); notified = true; - mu.Unlock(); - mu.Lock(); + mu.unlock(); + mu.lock(); waiting = true; while (!morph) { cv.Wait(&mu); } - mu.Unlock(); + mu.unlock(); th.join(); EXPECT_NE(absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() ->per_thread_synch.priority, @@ -2016,22 +2018,34 @@ TEST(Mutex, LockWhenWithTimeoutResult) { const bool kAlwaysTrue = true, kAlwaysFalse = false; const absl::Condition kTrueCond(&kAlwaysTrue), kFalseCond(&kAlwaysFalse); EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1))); - mu.Unlock(); + mu.unlock(); EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1))); EXPECT_TRUE(mu.AwaitWithTimeout(kTrueCond, absl::Milliseconds(1))); EXPECT_FALSE(mu.AwaitWithTimeout(kFalseCond, absl::Milliseconds(1))); std::thread th1([&]() { EXPECT_TRUE(mu.LockWhenWithTimeout(kTrueCond, absl::Milliseconds(1))); - mu.Unlock(); + mu.unlock(); }); std::thread th2([&]() { EXPECT_FALSE(mu.LockWhenWithTimeout(kFalseCond, absl::Milliseconds(1))); - mu.Unlock(); + mu.unlock(); }); absl::SleepFor(absl::Milliseconds(100)); - mu.Unlock(); + mu.unlock(); th1.join(); th2.join(); } +TEST(Mutex, ScopedLock) { + absl::Mutex mu; + { + std::scoped_lock l(mu); + } + + { + std::shared_lock l(mu); + EXPECT_TRUE(l.owns_lock()); + } +} + } // namespace diff --git a/absl/synchronization/notification.cc b/absl/synchronization/notification.cc index a5853ab3d70..a890c1bcd55 100644 --- a/absl/synchronization/notification.cc +++ b/absl/synchronization/notification.cc @@ -26,7 +26,7 @@ ABSL_NAMESPACE_BEGIN void Notification::Notify() { base_internal::TraceSignal(this, TraceObjectKind()); - MutexLock l(&this->mutex_); + MutexLock l(this->mutex_); #ifndef NDEBUG if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) { @@ -43,7 +43,7 @@ void Notification::Notify() { Notification::~Notification() { // Make sure that the thread running Notify() exits before the object is // destructed. - MutexLock l(&this->mutex_); + MutexLock l(this->mutex_); } void Notification::WaitForNotification() const { @@ -51,7 +51,7 @@ void Notification::WaitForNotification() const { if (!HasBeenNotifiedInternal(&this->notified_yet_)) { this->mutex_.LockWhen( Condition(&HasBeenNotifiedInternal, &this->notified_yet_)); - this->mutex_.Unlock(); + this->mutex_.unlock(); } base_internal::TraceContinue(this, TraceObjectKind()); } @@ -63,7 +63,7 @@ bool Notification::WaitForNotificationWithTimeout( if (!notified) { notified = this->mutex_.LockWhenWithTimeout( Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout); - this->mutex_.Unlock(); + this->mutex_.unlock(); } base_internal::TraceContinue(notified ? this : nullptr, TraceObjectKind()); return notified; @@ -75,7 +75,7 @@ bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const { if (!notified) { notified = this->mutex_.LockWhenWithDeadline( Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline); - this->mutex_.Unlock(); + this->mutex_.unlock(); } base_internal::TraceContinue(notified ? this : nullptr, TraceObjectKind()); return notified; diff --git a/absl/synchronization/notification_test.cc b/absl/synchronization/notification_test.cc index eedad178393..ac5dccd5687 100644 --- a/absl/synchronization/notification_test.cc +++ b/absl/synchronization/notification_test.cc @@ -34,17 +34,17 @@ class ThreadSafeCounter { ThreadSafeCounter() : count_(0) {} void Increment() { - MutexLock lock(&mutex_); + MutexLock lock(mutex_); ++count_; } int Get() const { - MutexLock lock(&mutex_); + MutexLock lock(mutex_); return count_; } void WaitUntilGreaterOrEqual(int n) { - MutexLock lock(&mutex_); + MutexLock lock(mutex_); auto cond = [this, n]() { return count_ >= n; }; mutex_.Await(Condition(&cond)); } diff --git a/absl/time/BUILD.bazel b/absl/time/BUILD.bazel index ad0313c5cf4..b68dd85686d 100644 --- a/absl/time/BUILD.bazel +++ b/absl/time/BUILD.bazel @@ -14,6 +14,9 @@ # limitations under the License. # +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", @@ -118,6 +121,7 @@ cc_test( "no_test_android_arm", "no_test_android_arm64", "no_test_android_x86", + "no_test_ios_sim_arm64", "no_test_ios_x86_64", "no_test_lexan", "no_test_loonix", diff --git a/absl/time/clock.cc b/absl/time/clock.cc index ecd539e5caa..bdcd85b6255 100644 --- a/absl/time/clock.cc +++ b/absl/time/clock.cc @@ -135,7 +135,7 @@ static inline uint64_t SeqAcquire(std::atomic *seq) { // fetch_add would be before it, not after. std::atomic_thread_fence(std::memory_order_release); - return x + 2; // original word plus 2 + return x + 2; // original word plus 2 } // Release seqlock (*seq) by writing x to it---a value previously returned by @@ -160,8 +160,8 @@ static const uint64_t kMinNSBetweenSamples = 2000 << 20; // We require that kMinNSBetweenSamples shifted by kScale // have at least a bit left over for 64-bit calculations. static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) == - kMinNSBetweenSamples, - "cannot represent kMaxBetweenSamplesNSScaled"); + kMinNSBetweenSamples, + "cannot represent kMaxBetweenSamplesNSScaled"); // data from a sample of the kernel's time value struct TimeSampleAtomic { @@ -206,8 +206,7 @@ struct ABSL_CACHELINE_ALIGNED TimeState { // A reader-writer lock protecting the static locations below. // See SeqAcquire() and SeqRelease() above. - absl::base_internal::SpinLock lock{absl::kConstInit, - base_internal::SCHEDULE_KERNEL_ONLY}; + absl::base_internal::SpinLock lock{base_internal::SCHEDULE_KERNEL_ONLY}; }; ABSL_CONST_INIT static TimeState time_state; @@ -416,7 +415,7 @@ static int64_t GetCurrentTimeNanosSlowPath() ABSL_LOCKS_EXCLUDED(time_state.lock) { // Serialize access to slow-path. Fast-path readers are not blocked yet, and // code below must not modify last_sample until the seqlock is acquired. - time_state.lock.Lock(); + base_internal::SpinLockHolder l(time_state.lock); // Sample the kernel time base. This is the definition of // "now" if we take the slow path. @@ -439,16 +438,14 @@ static int64_t GetCurrentTimeNanosSlowPath() if (delta_cycles < sample.min_cycles_per_sample) { // Another thread updated the sample. This path does not take the seqlock // so that blocked readers can make progress without blocking new readers. - estimated_base_ns = sample.base_ns + - ((delta_cycles * sample.nsscaled_per_cycle) >> kScale); + estimated_base_ns = + sample.base_ns + ((delta_cycles * sample.nsscaled_per_cycle) >> kScale); time_state.stats_fast_slow_paths++; } else { estimated_base_ns = UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample); } - time_state.lock.Unlock(); - return static_cast(estimated_base_ns); } @@ -494,8 +491,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle; } while (estimated_scaled_ns / sample->nsscaled_per_cycle != (delta_cycles >> s)); - estimated_base_ns = sample->base_ns + - (estimated_scaled_ns >> (kScale - s)); + estimated_base_ns = + sample->base_ns + (estimated_scaled_ns >> (kScale - s)); } // Compute the assumed cycle time kMinNSBetweenSamples ns into the future @@ -522,8 +519,8 @@ static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, diff_ns - (diff_ns / 16)); uint64_t new_nsscaled_per_cycle = SafeDivideAndScale(ns, assumed_next_sample_delta_cycles); - if (new_nsscaled_per_cycle != 0 && - diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) { + if (new_nsscaled_per_cycle != 0 && diff_ns < 100 * 1000 * 1000 && + -diff_ns < 100 * 1000 * 1000) { // record the cycle time measurement time_state.last_sample.nsscaled_per_cycle.store( new_nsscaled_per_cycle, std::memory_order_relaxed); diff --git a/absl/time/duration.cc b/absl/time/duration.cc index 38c4b63990c..fb7c90a2d74 100644 --- a/absl/time/duration.cc +++ b/absl/time/duration.cc @@ -469,7 +469,7 @@ Duration& Duration::operator*=(int64_t r) { Duration& Duration::operator*=(double r) { if (time_internal::IsInfiniteDuration(*this) || !IsFinite(r)) { - const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0); + const bool is_neg = std::isnan(r) || std::signbit(r) != (rep_hi_.Get() < 0); return *this = is_neg ? -InfiniteDuration() : InfiniteDuration(); } return *this = ScaleDouble(*this, r); @@ -485,7 +485,7 @@ Duration& Duration::operator/=(int64_t r) { Duration& Duration::operator/=(double r) { if (time_internal::IsInfiniteDuration(*this) || !IsValidDivisor(r)) { - const bool is_neg = std::signbit(r) != (rep_hi_.Get() < 0); + const bool is_neg = std::isnan(r) || std::signbit(r) != (rep_hi_.Get() < 0); return *this = is_neg ? -InfiniteDuration() : InfiniteDuration(); } return *this = ScaleDouble(*this, r); diff --git a/absl/time/duration_test.cc b/absl/time/duration_test.cc index 1e3fe670449..164ad6b0016 100644 --- a/absl/time/duration_test.cc +++ b/absl/time/duration_test.cc @@ -841,18 +841,18 @@ TEST(Duration, DivisionByZero) { TEST(Duration, NaN) { // Note that IEEE 754 does not define the behavior of a nan's sign when it is - // copied, so the code below allows for either + or - InfiniteDuration. + // copied. We return -InfiniteDuration in either case. #define TEST_NAN_HANDLING(NAME, NAN) \ do { \ const auto inf = absl::InfiniteDuration(); \ auto x = NAME(NAN); \ - EXPECT_TRUE(x == inf || x == -inf); \ + EXPECT_TRUE(x == -inf); \ auto y = NAME(42); \ y *= NAN; \ - EXPECT_TRUE(y == inf || y == -inf); \ + EXPECT_TRUE(y == -inf); \ auto z = NAME(42); \ z /= NAN; \ - EXPECT_TRUE(z == inf || z == -inf); \ + EXPECT_TRUE(z == -inf); \ } while (0) const double nan = std::numeric_limits::quiet_NaN(); diff --git a/absl/time/internal/cctz/BUILD.bazel b/absl/time/internal/cctz/BUILD.bazel index da30a0f11bf..6e178748887 100644 --- a/absl/time/internal/cctz/BUILD.bazel +++ b/absl/time/internal/cctz/BUILD.bazel @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_binary.bzl", "cc_binary") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load("//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", "ABSL_DEFAULT_LINKOPTS", "ABSL_TEST_COPTS") package(features = [ @@ -80,9 +83,15 @@ cc_library( ### tests -test_suite( - name = "all_tests", - visibility = ["//visibility:public"], +cc_library( + name = "test_time_zone_names", + testonly = True, + srcs = ["src/test_time_zone_names.cc"], + hdrs = ["src/test_time_zone_names.h"], + copts = ABSL_DEFAULT_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + visibility = ["//visibility:private"], + deps = ["//absl/base:config"], ) cc_test( @@ -137,6 +146,7 @@ cc_test( ], deps = [ ":civil_time", + ":test_time_zone_names", ":time_zone", "//absl/base:config", "@googletest//:gtest", @@ -161,6 +171,7 @@ cc_test( tags = ["benchmark"], deps = [ ":civil_time", + ":test_time_zone_names", ":time_zone", "//absl/base:config", "@google_benchmark//:benchmark_main", diff --git a/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/absl/time/internal/cctz/include/cctz/civil_time_detail.h index 2b0aed56c38..fe3b8bdf822 100644 --- a/absl/time/internal/cctz/include/cctz/civil_time_detail.h +++ b/absl/time/internal/cctz/include/cctz/civil_time_detail.h @@ -96,6 +96,18 @@ CONSTEXPR_F int days_per_4years(int yi) noexcept { CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept { return is_leap_year(y + (m > 2)) ? 366 : 365; } +// The compiler cannot optimize away the check if we use +// -fsanitize=array-bounds. +// m is guaranteed to be in [1:12] in the caller, but the compiler cannot +// optimize away the check even when this function is inlined into BreakTime. +// To reduce the overhead, we use no_sanitize to skip the unnecessary +// -fsanitize=array-bounds check. Remove no_sanitize once the missed +// optimization is fixed. +#if defined(__clang__) && defined(__has_cpp_attribute) +#if __has_cpp_attribute(clang::no_sanitize) +[[clang::no_sanitize("array-bounds")]] +#endif +#endif CONSTEXPR_F int days_per_month(year_t y, month_t m) noexcept { CONSTEXPR_D int k_days_per_month[1 + 12] = { -1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 // non leap year diff --git a/absl/time/internal/cctz/src/cctz_benchmark.cc b/absl/time/internal/cctz/src/cctz_benchmark.cc index ba7e149662f..ce2781871d4 100644 --- a/absl/time/internal/cctz/src/cctz_benchmark.cc +++ b/absl/time/internal/cctz/src/cctz_benchmark.cc @@ -23,6 +23,7 @@ #include "benchmark/benchmark.h" #include "absl/time/internal/cctz/include/cctz/civil_time.h" #include "absl/time/internal/cctz/include/cctz/time_zone.h" +#include "absl/time/internal/cctz/src/test_time_zone_names.h" #include "absl/time/internal/cctz/src/time_zone_impl.h" namespace { @@ -103,498 +104,10 @@ const char RFC3339_sec[] = "%Y-%m-%d%ET%H:%M:%S%Ez"; const char RFC1123_full[] = "%a, %d %b %Y %H:%M:%S %z"; const char RFC1123_no_wday[] = "%d %b %Y %H:%M:%S %z"; -// A list of known time-zone names. -// TODO: Refactor with src/time_zone_lookup_test.cc. -const char* const kTimeZoneNames[] = {"Africa/Abidjan", - "Africa/Accra", - "Africa/Addis_Ababa", - "Africa/Algiers", - "Africa/Asmara", - "Africa/Bamako", - "Africa/Bangui", - "Africa/Banjul", - "Africa/Bissau", - "Africa/Blantyre", - "Africa/Brazzaville", - "Africa/Bujumbura", - "Africa/Cairo", - "Africa/Casablanca", - "Africa/Ceuta", - "Africa/Conakry", - "Africa/Dakar", - "Africa/Dar_es_Salaam", - "Africa/Djibouti", - "Africa/Douala", - "Africa/El_Aaiun", - "Africa/Freetown", - "Africa/Gaborone", - "Africa/Harare", - "Africa/Johannesburg", - "Africa/Juba", - "Africa/Kampala", - "Africa/Khartoum", - "Africa/Kigali", - "Africa/Kinshasa", - "Africa/Lagos", - "Africa/Libreville", - "Africa/Lome", - "Africa/Luanda", - "Africa/Lubumbashi", - "Africa/Lusaka", - "Africa/Malabo", - "Africa/Maputo", - "Africa/Maseru", - "Africa/Mbabane", - "Africa/Mogadishu", - "Africa/Monrovia", - "Africa/Nairobi", - "Africa/Ndjamena", - "Africa/Niamey", - "Africa/Nouakchott", - "Africa/Ouagadougou", - "Africa/Porto-Novo", - "Africa/Sao_Tome", - "Africa/Timbuktu", - "Africa/Tripoli", - "Africa/Tunis", - "Africa/Windhoek", - "America/Adak", - "America/Anchorage", - "America/Anguilla", - "America/Antigua", - "America/Araguaina", - "America/Argentina/Buenos_Aires", - "America/Argentina/Catamarca", - "America/Argentina/Cordoba", - "America/Argentina/Jujuy", - "America/Argentina/La_Rioja", - "America/Argentina/Mendoza", - "America/Argentina/Rio_Gallegos", - "America/Argentina/Salta", - "America/Argentina/San_Juan", - "America/Argentina/San_Luis", - "America/Argentina/Tucuman", - "America/Argentina/Ushuaia", - "America/Aruba", - "America/Asuncion", - "America/Atikokan", - "America/Atka", - "America/Bahia", - "America/Bahia_Banderas", - "America/Barbados", - "America/Belem", - "America/Belize", - "America/Blanc-Sablon", - "America/Boa_Vista", - "America/Bogota", - "America/Boise", - "America/Cambridge_Bay", - "America/Campo_Grande", - "America/Cancun", - "America/Caracas", - "America/Cayenne", - "America/Cayman", - "America/Chicago", - "America/Chihuahua", - "America/Ciudad_Juarez", - "America/Coral_Harbour", - "America/Costa_Rica", - "America/Coyhaique", - "America/Creston", - "America/Cuiaba", - "America/Curacao", - "America/Danmarkshavn", - "America/Dawson", - "America/Dawson_Creek", - "America/Denver", - "America/Detroit", - "America/Dominica", - "America/Edmonton", - "America/Eirunepe", - "America/El_Salvador", - "America/Ensenada", - "America/Fort_Nelson", - "America/Fortaleza", - "America/Glace_Bay", - "America/Goose_Bay", - "America/Grand_Turk", - "America/Grenada", - "America/Guadeloupe", - "America/Guatemala", - "America/Guayaquil", - "America/Guyana", - "America/Halifax", - "America/Havana", - "America/Hermosillo", - "America/Indiana/Indianapolis", - "America/Indiana/Knox", - "America/Indiana/Marengo", - "America/Indiana/Petersburg", - "America/Indiana/Tell_City", - "America/Indiana/Vevay", - "America/Indiana/Vincennes", - "America/Indiana/Winamac", - "America/Inuvik", - "America/Iqaluit", - "America/Jamaica", - "America/Juneau", - "America/Kentucky/Louisville", - "America/Kentucky/Monticello", - "America/Kralendijk", - "America/La_Paz", - "America/Lima", - "America/Los_Angeles", - "America/Lower_Princes", - "America/Maceio", - "America/Managua", - "America/Manaus", - "America/Marigot", - "America/Martinique", - "America/Matamoros", - "America/Mazatlan", - "America/Menominee", - "America/Merida", - "America/Metlakatla", - "America/Mexico_City", - "America/Miquelon", - "America/Moncton", - "America/Monterrey", - "America/Montevideo", - "America/Montreal", - "America/Montserrat", - "America/Nassau", - "America/New_York", - "America/Nipigon", - "America/Nome", - "America/Noronha", - "America/North_Dakota/Beulah", - "America/North_Dakota/Center", - "America/North_Dakota/New_Salem", - "America/Nuuk", - "America/Ojinaga", - "America/Panama", - "America/Pangnirtung", - "America/Paramaribo", - "America/Phoenix", - "America/Port-au-Prince", - "America/Port_of_Spain", - "America/Porto_Acre", - "America/Porto_Velho", - "America/Puerto_Rico", - "America/Punta_Arenas", - "America/Rainy_River", - "America/Rankin_Inlet", - "America/Recife", - "America/Regina", - "America/Resolute", - "America/Rio_Branco", - "America/Santa_Isabel", - "America/Santarem", - "America/Santiago", - "America/Santo_Domingo", - "America/Sao_Paulo", - "America/Scoresbysund", - "America/Shiprock", - "America/Sitka", - "America/St_Barthelemy", - "America/St_Johns", - "America/St_Kitts", - "America/St_Lucia", - "America/St_Thomas", - "America/St_Vincent", - "America/Swift_Current", - "America/Tegucigalpa", - "America/Thule", - "America/Thunder_Bay", - "America/Tijuana", - "America/Toronto", - "America/Tortola", - "America/Vancouver", - "America/Virgin", - "America/Whitehorse", - "America/Winnipeg", - "America/Yakutat", - "America/Yellowknife", - "Antarctica/Casey", - "Antarctica/Davis", - "Antarctica/DumontDUrville", - "Antarctica/Macquarie", - "Antarctica/Mawson", - "Antarctica/McMurdo", - "Antarctica/Palmer", - "Antarctica/Rothera", - "Antarctica/Syowa", - "Antarctica/Troll", - "Antarctica/Vostok", - "Arctic/Longyearbyen", - "Asia/Aden", - "Asia/Almaty", - "Asia/Amman", - "Asia/Anadyr", - "Asia/Aqtau", - "Asia/Aqtobe", - "Asia/Ashgabat", - "Asia/Atyrau", - "Asia/Baghdad", - "Asia/Bahrain", - "Asia/Baku", - "Asia/Bangkok", - "Asia/Barnaul", - "Asia/Beirut", - "Asia/Bishkek", - "Asia/Brunei", - "Asia/Chita", - "Asia/Choibalsan", - "Asia/Chongqing", - "Asia/Colombo", - "Asia/Damascus", - "Asia/Dhaka", - "Asia/Dili", - "Asia/Dubai", - "Asia/Dushanbe", - "Asia/Famagusta", - "Asia/Gaza", - "Asia/Harbin", - "Asia/Hebron", - "Asia/Ho_Chi_Minh", - "Asia/Hong_Kong", - "Asia/Hovd", - "Asia/Irkutsk", - "Asia/Istanbul", - "Asia/Jakarta", - "Asia/Jayapura", - "Asia/Jerusalem", - "Asia/Kabul", - "Asia/Kamchatka", - "Asia/Karachi", - "Asia/Kashgar", - "Asia/Kathmandu", - "Asia/Khandyga", - "Asia/Kolkata", - "Asia/Krasnoyarsk", - "Asia/Kuala_Lumpur", - "Asia/Kuching", - "Asia/Kuwait", - "Asia/Macau", - "Asia/Magadan", - "Asia/Makassar", - "Asia/Manila", - "Asia/Muscat", - "Asia/Nicosia", - "Asia/Novokuznetsk", - "Asia/Novosibirsk", - "Asia/Omsk", - "Asia/Oral", - "Asia/Phnom_Penh", - "Asia/Pontianak", - "Asia/Pyongyang", - "Asia/Qatar", - "Asia/Qostanay", - "Asia/Qyzylorda", - "Asia/Riyadh", - "Asia/Sakhalin", - "Asia/Samarkand", - "Asia/Seoul", - "Asia/Shanghai", - "Asia/Singapore", - "Asia/Srednekolymsk", - "Asia/Taipei", - "Asia/Tashkent", - "Asia/Tbilisi", - "Asia/Tehran", - "Asia/Tel_Aviv", - "Asia/Thimphu", - "Asia/Tokyo", - "Asia/Tomsk", - "Asia/Ulaanbaatar", - "Asia/Urumqi", - "Asia/Ust-Nera", - "Asia/Vientiane", - "Asia/Vladivostok", - "Asia/Yakutsk", - "Asia/Yangon", - "Asia/Yekaterinburg", - "Asia/Yerevan", - "Atlantic/Azores", - "Atlantic/Bermuda", - "Atlantic/Canary", - "Atlantic/Cape_Verde", - "Atlantic/Faroe", - "Atlantic/Jan_Mayen", - "Atlantic/Madeira", - "Atlantic/Reykjavik", - "Atlantic/South_Georgia", - "Atlantic/St_Helena", - "Atlantic/Stanley", - "Australia/Adelaide", - "Australia/Brisbane", - "Australia/Broken_Hill", - "Australia/Canberra", - "Australia/Currie", - "Australia/Darwin", - "Australia/Eucla", - "Australia/Hobart", - "Australia/Lindeman", - "Australia/Lord_Howe", - "Australia/Melbourne", - "Australia/Perth", - "Australia/Sydney", - "Australia/Yancowinna", - "Etc/GMT", - "Etc/GMT+0", - "Etc/GMT+1", - "Etc/GMT+10", - "Etc/GMT+11", - "Etc/GMT+12", - "Etc/GMT+2", - "Etc/GMT+3", - "Etc/GMT+4", - "Etc/GMT+5", - "Etc/GMT+6", - "Etc/GMT+7", - "Etc/GMT+8", - "Etc/GMT+9", - "Etc/GMT-0", - "Etc/GMT-1", - "Etc/GMT-10", - "Etc/GMT-11", - "Etc/GMT-12", - "Etc/GMT-13", - "Etc/GMT-14", - "Etc/GMT-2", - "Etc/GMT-3", - "Etc/GMT-4", - "Etc/GMT-5", - "Etc/GMT-6", - "Etc/GMT-7", - "Etc/GMT-8", - "Etc/GMT-9", - "Etc/GMT0", - "Etc/Greenwich", - "Etc/UCT", - "Etc/UTC", - "Etc/Universal", - "Etc/Zulu", - "Europe/Amsterdam", - "Europe/Andorra", - "Europe/Astrakhan", - "Europe/Athens", - "Europe/Belfast", - "Europe/Belgrade", - "Europe/Berlin", - "Europe/Bratislava", - "Europe/Brussels", - "Europe/Bucharest", - "Europe/Budapest", - "Europe/Busingen", - "Europe/Chisinau", - "Europe/Copenhagen", - "Europe/Dublin", - "Europe/Gibraltar", - "Europe/Guernsey", - "Europe/Helsinki", - "Europe/Isle_of_Man", - "Europe/Istanbul", - "Europe/Jersey", - "Europe/Kaliningrad", - "Europe/Kirov", - "Europe/Kyiv", - "Europe/Lisbon", - "Europe/Ljubljana", - "Europe/London", - "Europe/Luxembourg", - "Europe/Madrid", - "Europe/Malta", - "Europe/Mariehamn", - "Europe/Minsk", - "Europe/Monaco", - "Europe/Moscow", - "Europe/Nicosia", - "Europe/Oslo", - "Europe/Paris", - "Europe/Podgorica", - "Europe/Prague", - "Europe/Riga", - "Europe/Rome", - "Europe/Samara", - "Europe/San_Marino", - "Europe/Sarajevo", - "Europe/Saratov", - "Europe/Simferopol", - "Europe/Skopje", - "Europe/Sofia", - "Europe/Stockholm", - "Europe/Tallinn", - "Europe/Tirane", - "Europe/Tiraspol", - "Europe/Ulyanovsk", - "Europe/Vaduz", - "Europe/Vatican", - "Europe/Vienna", - "Europe/Vilnius", - "Europe/Volgograd", - "Europe/Warsaw", - "Europe/Zagreb", - "Europe/Zurich", - "Factory", - "Indian/Antananarivo", - "Indian/Chagos", - "Indian/Christmas", - "Indian/Cocos", - "Indian/Comoro", - "Indian/Kerguelen", - "Indian/Mahe", - "Indian/Maldives", - "Indian/Mauritius", - "Indian/Mayotte", - "Indian/Reunion", - "Pacific/Apia", - "Pacific/Auckland", - "Pacific/Bougainville", - "Pacific/Chatham", - "Pacific/Chuuk", - "Pacific/Easter", - "Pacific/Efate", - "Pacific/Fakaofo", - "Pacific/Fiji", - "Pacific/Funafuti", - "Pacific/Galapagos", - "Pacific/Gambier", - "Pacific/Guadalcanal", - "Pacific/Guam", - "Pacific/Honolulu", - "Pacific/Johnston", - "Pacific/Kanton", - "Pacific/Kiritimati", - "Pacific/Kosrae", - "Pacific/Kwajalein", - "Pacific/Majuro", - "Pacific/Marquesas", - "Pacific/Midway", - "Pacific/Nauru", - "Pacific/Niue", - "Pacific/Norfolk", - "Pacific/Noumea", - "Pacific/Pago_Pago", - "Pacific/Palau", - "Pacific/Pitcairn", - "Pacific/Pohnpei", - "Pacific/Port_Moresby", - "Pacific/Rarotonga", - "Pacific/Saipan", - "Pacific/Samoa", - "Pacific/Tahiti", - "Pacific/Tarawa", - "Pacific/Tongatapu", - "Pacific/Wake", - "Pacific/Wallis", - "Pacific/Yap", - "UTC", - nullptr}; - std::vector AllTimeZoneNames() { std::vector names; - for (const char* const* namep = kTimeZoneNames; *namep != nullptr; ++namep) { + for (const char* const* namep = cctz::kTimeZoneNames; *namep != nullptr; + ++namep) { names.push_back(std::string("file:") + *namep); } assert(!names.empty()); @@ -889,6 +402,7 @@ const char* const kFormats[] = { RFC3339_sec, // 3 "%Y-%m-%d%ET%H:%M:%S", // 4 "%Y-%m-%d", // 5 + "%F%ET%T", // 6 }; const int kNumFormats = sizeof(kFormats) / sizeof(kFormats[0]); diff --git a/absl/time/internal/cctz/src/test_time_zone_names.cc b/absl/time/internal/cctz/src/test_time_zone_names.cc new file mode 100644 index 00000000000..ab54c9ae70a --- /dev/null +++ b/absl/time/internal/cctz/src/test_time_zone_names.cc @@ -0,0 +1,515 @@ +// Copyright 2025 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/time/internal/cctz/src/test_time_zone_names.h" + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +namespace cctz { + +// A list of known time-zone names. +const char* const kTimeZoneNames[] = {"Africa/Abidjan", + "Africa/Accra", + "Africa/Addis_Ababa", + "Africa/Algiers", + "Africa/Asmara", + "Africa/Bamako", + "Africa/Bangui", + "Africa/Banjul", + "Africa/Bissau", + "Africa/Blantyre", + "Africa/Brazzaville", + "Africa/Bujumbura", + "Africa/Cairo", + "Africa/Casablanca", + "Africa/Ceuta", + "Africa/Conakry", + "Africa/Dakar", + "Africa/Dar_es_Salaam", + "Africa/Djibouti", + "Africa/Douala", + "Africa/El_Aaiun", + "Africa/Freetown", + "Africa/Gaborone", + "Africa/Harare", + "Africa/Johannesburg", + "Africa/Juba", + "Africa/Kampala", + "Africa/Khartoum", + "Africa/Kigali", + "Africa/Kinshasa", + "Africa/Lagos", + "Africa/Libreville", + "Africa/Lome", + "Africa/Luanda", + "Africa/Lubumbashi", + "Africa/Lusaka", + "Africa/Malabo", + "Africa/Maputo", + "Africa/Maseru", + "Africa/Mbabane", + "Africa/Mogadishu", + "Africa/Monrovia", + "Africa/Nairobi", + "Africa/Ndjamena", + "Africa/Niamey", + "Africa/Nouakchott", + "Africa/Ouagadougou", + "Africa/Porto-Novo", + "Africa/Sao_Tome", + "Africa/Timbuktu", + "Africa/Tripoli", + "Africa/Tunis", + "Africa/Windhoek", + "America/Adak", + "America/Anchorage", + "America/Anguilla", + "America/Antigua", + "America/Araguaina", + "America/Argentina/Buenos_Aires", + "America/Argentina/Catamarca", + "America/Argentina/Cordoba", + "America/Argentina/Jujuy", + "America/Argentina/La_Rioja", + "America/Argentina/Mendoza", + "America/Argentina/Rio_Gallegos", + "America/Argentina/Salta", + "America/Argentina/San_Juan", + "America/Argentina/San_Luis", + "America/Argentina/Tucuman", + "America/Argentina/Ushuaia", + "America/Aruba", + "America/Asuncion", + "America/Atikokan", + "America/Atka", + "America/Bahia", + "America/Bahia_Banderas", + "America/Barbados", + "America/Belem", + "America/Belize", + "America/Blanc-Sablon", + "America/Boa_Vista", + "America/Bogota", + "America/Boise", + "America/Cambridge_Bay", + "America/Campo_Grande", + "America/Cancun", + "America/Caracas", + "America/Cayenne", + "America/Cayman", + "America/Chicago", + "America/Chihuahua", + "America/Ciudad_Juarez", + "America/Coral_Harbour", + "America/Costa_Rica", + "America/Coyhaique", + "America/Creston", + "America/Cuiaba", + "America/Curacao", + "America/Danmarkshavn", + "America/Dawson", + "America/Dawson_Creek", + "America/Denver", + "America/Detroit", + "America/Dominica", + "America/Edmonton", + "America/Eirunepe", + "America/El_Salvador", + "America/Ensenada", + "America/Fort_Nelson", + "America/Fortaleza", + "America/Glace_Bay", + "America/Goose_Bay", + "America/Grand_Turk", + "America/Grenada", + "America/Guadeloupe", + "America/Guatemala", + "America/Guayaquil", + "America/Guyana", + "America/Halifax", + "America/Havana", + "America/Hermosillo", + "America/Indiana/Indianapolis", + "America/Indiana/Knox", + "America/Indiana/Marengo", + "America/Indiana/Petersburg", + "America/Indiana/Tell_City", + "America/Indiana/Vevay", + "America/Indiana/Vincennes", + "America/Indiana/Winamac", + "America/Inuvik", + "America/Iqaluit", + "America/Jamaica", + "America/Juneau", + "America/Kentucky/Louisville", + "America/Kentucky/Monticello", + "America/Kralendijk", + "America/La_Paz", + "America/Lima", + "America/Los_Angeles", + "America/Lower_Princes", + "America/Maceio", + "America/Managua", + "America/Manaus", + "America/Marigot", + "America/Martinique", + "America/Matamoros", + "America/Mazatlan", + "America/Menominee", + "America/Merida", + "America/Metlakatla", + "America/Mexico_City", + "America/Miquelon", + "America/Moncton", + "America/Monterrey", + "America/Montevideo", + "America/Montreal", + "America/Montserrat", + "America/Nassau", + "America/New_York", + "America/Nipigon", + "America/Nome", + "America/Noronha", + "America/North_Dakota/Beulah", + "America/North_Dakota/Center", + "America/North_Dakota/New_Salem", + "America/Nuuk", + "America/Ojinaga", + "America/Panama", + "America/Pangnirtung", + "America/Paramaribo", + "America/Phoenix", + "America/Port-au-Prince", + "America/Port_of_Spain", + "America/Porto_Acre", + "America/Porto_Velho", + "America/Puerto_Rico", + "America/Punta_Arenas", + "America/Rainy_River", + "America/Rankin_Inlet", + "America/Recife", + "America/Regina", + "America/Resolute", + "America/Rio_Branco", + "America/Santa_Isabel", + "America/Santarem", + "America/Santiago", + "America/Santo_Domingo", + "America/Sao_Paulo", + "America/Scoresbysund", + "America/Shiprock", + "America/Sitka", + "America/St_Barthelemy", + "America/St_Johns", + "America/St_Kitts", + "America/St_Lucia", + "America/St_Thomas", + "America/St_Vincent", + "America/Swift_Current", + "America/Tegucigalpa", + "America/Thule", + "America/Thunder_Bay", + "America/Tijuana", + "America/Toronto", + "America/Tortola", + "America/Vancouver", + "America/Virgin", + "America/Whitehorse", + "America/Winnipeg", + "America/Yakutat", + "America/Yellowknife", + "Antarctica/Casey", + "Antarctica/Davis", + "Antarctica/DumontDUrville", + "Antarctica/Macquarie", + "Antarctica/Mawson", + "Antarctica/McMurdo", + "Antarctica/Palmer", + "Antarctica/Rothera", + "Antarctica/Syowa", + "Antarctica/Troll", + "Antarctica/Vostok", + "Arctic/Longyearbyen", + "Asia/Aden", + "Asia/Almaty", + "Asia/Amman", + "Asia/Anadyr", + "Asia/Aqtau", + "Asia/Aqtobe", + "Asia/Ashgabat", + "Asia/Atyrau", + "Asia/Baghdad", + "Asia/Bahrain", + "Asia/Baku", + "Asia/Bangkok", + "Asia/Barnaul", + "Asia/Beirut", + "Asia/Bishkek", + "Asia/Brunei", + "Asia/Chita", + "Asia/Choibalsan", + "Asia/Chongqing", + "Asia/Colombo", + "Asia/Damascus", + "Asia/Dhaka", + "Asia/Dili", + "Asia/Dubai", + "Asia/Dushanbe", + "Asia/Famagusta", + "Asia/Gaza", + "Asia/Harbin", + "Asia/Hebron", + "Asia/Ho_Chi_Minh", + "Asia/Hong_Kong", + "Asia/Hovd", + "Asia/Irkutsk", + "Asia/Istanbul", + "Asia/Jakarta", + "Asia/Jayapura", + "Asia/Jerusalem", + "Asia/Kabul", + "Asia/Kamchatka", + "Asia/Karachi", + "Asia/Kashgar", + "Asia/Kathmandu", + "Asia/Khandyga", + "Asia/Kolkata", + "Asia/Krasnoyarsk", + "Asia/Kuala_Lumpur", + "Asia/Kuching", + "Asia/Kuwait", + "Asia/Macau", + "Asia/Magadan", + "Asia/Makassar", + "Asia/Manila", + "Asia/Muscat", + "Asia/Nicosia", + "Asia/Novokuznetsk", + "Asia/Novosibirsk", + "Asia/Omsk", + "Asia/Oral", + "Asia/Phnom_Penh", + "Asia/Pontianak", + "Asia/Pyongyang", + "Asia/Qatar", + "Asia/Qostanay", + "Asia/Qyzylorda", + "Asia/Riyadh", + "Asia/Sakhalin", + "Asia/Samarkand", + "Asia/Seoul", + "Asia/Shanghai", + "Asia/Singapore", + "Asia/Srednekolymsk", + "Asia/Taipei", + "Asia/Tashkent", + "Asia/Tbilisi", + "Asia/Tehran", + "Asia/Tel_Aviv", + "Asia/Thimphu", + "Asia/Tokyo", + "Asia/Tomsk", + "Asia/Ulaanbaatar", + "Asia/Urumqi", + "Asia/Ust-Nera", + "Asia/Vientiane", + "Asia/Vladivostok", + "Asia/Yakutsk", + "Asia/Yangon", + "Asia/Yekaterinburg", + "Asia/Yerevan", + "Atlantic/Azores", + "Atlantic/Bermuda", + "Atlantic/Canary", + "Atlantic/Cape_Verde", + "Atlantic/Faroe", + "Atlantic/Jan_Mayen", + "Atlantic/Madeira", + "Atlantic/Reykjavik", + "Atlantic/South_Georgia", + "Atlantic/St_Helena", + "Atlantic/Stanley", + "Australia/Adelaide", + "Australia/Brisbane", + "Australia/Broken_Hill", + "Australia/Canberra", + "Australia/Currie", + "Australia/Darwin", + "Australia/Eucla", + "Australia/Hobart", + "Australia/Lindeman", + "Australia/Lord_Howe", + "Australia/Melbourne", + "Australia/Perth", + "Australia/Sydney", + "Australia/Yancowinna", + "Etc/GMT", + "Etc/GMT+0", + "Etc/GMT+1", + "Etc/GMT+10", + "Etc/GMT+11", + "Etc/GMT+12", + "Etc/GMT+2", + "Etc/GMT+3", + "Etc/GMT+4", + "Etc/GMT+5", + "Etc/GMT+6", + "Etc/GMT+7", + "Etc/GMT+8", + "Etc/GMT+9", + "Etc/GMT-0", + "Etc/GMT-1", + "Etc/GMT-10", + "Etc/GMT-11", + "Etc/GMT-12", + "Etc/GMT-13", + "Etc/GMT-14", + "Etc/GMT-2", + "Etc/GMT-3", + "Etc/GMT-4", + "Etc/GMT-5", + "Etc/GMT-6", + "Etc/GMT-7", + "Etc/GMT-8", + "Etc/GMT-9", + "Etc/GMT0", + "Etc/Greenwich", + "Etc/UCT", + "Etc/UTC", + "Etc/Universal", + "Etc/Zulu", + "Europe/Amsterdam", + "Europe/Andorra", + "Europe/Astrakhan", + "Europe/Athens", + "Europe/Belfast", + "Europe/Belgrade", + "Europe/Berlin", + "Europe/Bratislava", + "Europe/Brussels", + "Europe/Bucharest", + "Europe/Budapest", + "Europe/Busingen", + "Europe/Chisinau", + "Europe/Copenhagen", + "Europe/Dublin", + "Europe/Gibraltar", + "Europe/Guernsey", + "Europe/Helsinki", + "Europe/Isle_of_Man", + "Europe/Istanbul", + "Europe/Jersey", + "Europe/Kaliningrad", + "Europe/Kirov", + "Europe/Kyiv", + "Europe/Lisbon", + "Europe/Ljubljana", + "Europe/London", + "Europe/Luxembourg", + "Europe/Madrid", + "Europe/Malta", + "Europe/Mariehamn", + "Europe/Minsk", + "Europe/Monaco", + "Europe/Moscow", + "Europe/Nicosia", + "Europe/Oslo", + "Europe/Paris", + "Europe/Podgorica", + "Europe/Prague", + "Europe/Riga", + "Europe/Rome", + "Europe/Samara", + "Europe/San_Marino", + "Europe/Sarajevo", + "Europe/Saratov", + "Europe/Simferopol", + "Europe/Skopje", + "Europe/Sofia", + "Europe/Stockholm", + "Europe/Tallinn", + "Europe/Tirane", + "Europe/Tiraspol", + "Europe/Ulyanovsk", + "Europe/Vaduz", + "Europe/Vatican", + "Europe/Vienna", + "Europe/Vilnius", + "Europe/Volgograd", + "Europe/Warsaw", + "Europe/Zagreb", + "Europe/Zurich", + "Factory", + "Indian/Antananarivo", + "Indian/Chagos", + "Indian/Christmas", + "Indian/Cocos", + "Indian/Comoro", + "Indian/Kerguelen", + "Indian/Mahe", + "Indian/Maldives", + "Indian/Mauritius", + "Indian/Mayotte", + "Indian/Reunion", + "Pacific/Apia", + "Pacific/Auckland", + "Pacific/Bougainville", + "Pacific/Chatham", + "Pacific/Chuuk", + "Pacific/Easter", + "Pacific/Efate", + "Pacific/Fakaofo", + "Pacific/Fiji", + "Pacific/Funafuti", + "Pacific/Galapagos", + "Pacific/Gambier", + "Pacific/Guadalcanal", + "Pacific/Guam", + "Pacific/Honolulu", + "Pacific/Johnston", + "Pacific/Kanton", + "Pacific/Kiritimati", + "Pacific/Kosrae", + "Pacific/Kwajalein", + "Pacific/Majuro", + "Pacific/Marquesas", + "Pacific/Midway", + "Pacific/Nauru", + "Pacific/Niue", + "Pacific/Norfolk", + "Pacific/Noumea", + "Pacific/Pago_Pago", + "Pacific/Palau", + "Pacific/Pitcairn", + "Pacific/Pohnpei", + "Pacific/Port_Moresby", + "Pacific/Rarotonga", + "Pacific/Saipan", + "Pacific/Samoa", + "Pacific/Tahiti", + "Pacific/Tarawa", + "Pacific/Tongatapu", + "Pacific/Wake", + "Pacific/Wallis", + "Pacific/Yap", + "UTC", + nullptr}; + +} // namespace cctz +} // namespace time_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/time/internal/cctz/src/test_time_zone_names.h b/absl/time/internal/cctz/src/test_time_zone_names.h new file mode 100644 index 00000000000..1993994a1d2 --- /dev/null +++ b/absl/time/internal/cctz/src/test_time_zone_names.h @@ -0,0 +1,33 @@ +// Copyright 2025 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_TIME_INTERNAL_CCTZ_TEST_TIME_ZONE_NAMES_H_ +#define ABSL_TIME_INTERNAL_CCTZ_TEST_TIME_ZONE_NAMES_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +namespace cctz { + +// A list of known time-zone names. +extern const char* const kTimeZoneNames[]; + +} // namespace cctz +} // namespace time_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_TIME_INTERNAL_CCTZ_TEST_TIME_ZONE_NAMES_H_ diff --git a/absl/time/internal/cctz/src/time_zone_format.cc b/absl/time/internal/cctz/src/time_zone_format.cc index 0e5f32f14a0..f739e026e37 100644 --- a/absl/time/internal/cctz/src/time_zone_format.cc +++ b/absl/time/internal/cctz/src/time_zone_format.cc @@ -117,7 +117,7 @@ std::tm ToTM(const time_zone::absolute_lookup& al) { tm.tm_mday = al.cs.day(); tm.tm_mon = al.cs.month() - 1; - // Saturate tm.tm_year is cases of over/underflow. + // Saturate tm.tm_year in cases of over/underflow. if (al.cs.year() < std::numeric_limits::min() + 1900) { tm.tm_year = std::numeric_limits::min(); } else if (al.cs.year() - 1900 > std::numeric_limits::max()) { @@ -338,7 +338,7 @@ std::string format(const std::string& format, const time_point& tp, const std::tm tm = ToTM(al); // Scratch buffer for internal conversions. - char buf[3 + kDigits10_64]; // enough for longest conversion + char buf[6 + (kDigits10_64 + 2)]; // enough for longest conversion %F char* const ep = buf + sizeof(buf); char* bp; // works back from ep @@ -382,7 +382,7 @@ std::string format(const std::string& format, const time_point& tp, if (cur == end || (cur - percent) % 2 == 0) continue; // Simple specifiers that we handle ourselves. - if (strchr("YmdeUuWwHMSzZs%", *cur)) { + if (strchr("YmdeFUuWwHMSTzZs%", *cur)) { if (cur - 1 != pending) { FormatTM(&result, std::string(pending, cur - 1), tm); } @@ -403,6 +403,14 @@ std::string format(const std::string& format, const time_point& tp, if (*cur == 'e' && *bp == '0') *bp = ' '; // for Windows result.append(bp, static_cast(ep - bp)); break; + case 'F': + bp = Format02d(ep, al.cs.day()); + *--bp = '-'; + bp = Format02d(bp, al.cs.month()); + *--bp = '-'; + bp = Format64(bp, 0, al.cs.year()); + result.append(bp, static_cast(ep - bp)); + break; case 'U': bp = Format02d(ep, ToWeek(civil_day(al.cs), weekday::sunday)); result.append(bp, static_cast(ep - bp)); @@ -431,6 +439,14 @@ std::string format(const std::string& format, const time_point& tp, bp = Format02d(ep, al.cs.second()); result.append(bp, static_cast(ep - bp)); break; + case 'T': + bp = Format02d(ep, al.cs.second()); + *--bp = ':'; + bp = Format02d(bp, al.cs.minute()); + *--bp = ':'; + bp = Format02d(bp, al.cs.hour()); + result.append(bp, static_cast(ep - bp)); + break; case 'z': bp = FormatOffset(ep, al.offset, ""); result.append(bp, static_cast(ep - bp)); @@ -769,6 +785,20 @@ bool parse(const std::string& format, const std::string& input, data = ParseInt(data, 2, 1, 31, &tm.tm_mday); week_num = -1; continue; + case 'F': + data = ParseInt(data, 0, kyearmin, kyearmax, &year); + if (data != nullptr) { + saw_year = true; + data = (*data == '-' ? data + 1 : nullptr); + } + data = ParseInt(data, 2, 1, 12, &tm.tm_mon); + if (data != nullptr) { + tm.tm_mon -= 1; + data = (*data == '-' ? data + 1 : nullptr); + } + data = ParseInt(data, 2, 1, 31, &tm.tm_mday); + week_num = -1; + continue; case 'U': data = ParseInt(data, 0, 0, 53, &week_num); week_start = weekday::sunday; @@ -794,13 +824,20 @@ bool parse(const std::string& format, const std::string& input, case 'S': data = ParseInt(data, 2, 0, 60, &tm.tm_sec); continue; + case 'T': + data = ParseInt(data, 2, 0, 23, &tm.tm_hour); + twelve_hour = false; + data = (data != nullptr && *data == ':' ? data + 1 : nullptr); + data = ParseInt(data, 2, 0, 59, &tm.tm_min); + data = (data != nullptr && *data == ':' ? data + 1 : nullptr); + data = ParseInt(data, 2, 0, 60, &tm.tm_sec); + continue; case 'I': case 'l': case 'r': // probably uses %I twelve_hour = true; break; case 'R': // uses %H - case 'T': // uses %H case 'c': // probably uses %H case 'X': // probably uses %H twelve_hour = false; diff --git a/absl/time/internal/cctz/src/time_zone_format_test.cc b/absl/time/internal/cctz/src/time_zone_format_test.cc index 4a6c71f17e3..a270f4dbf33 100644 --- a/absl/time/internal/cctz/src/time_zone_format_test.cc +++ b/absl/time/internal/cctz/src/time_zone_format_test.cc @@ -169,23 +169,22 @@ TEST(Format, Basics) { TEST(Format, PosixConversions) { const time_zone tz = utc_time_zone(); - auto tp = chrono::system_clock::from_time_t(0); + auto tp = + chrono::system_clock::from_time_t(308189482); // 1979-10-08T00:11:22Z - TestFormatSpecifier(tp, tz, "%d", "01"); - TestFormatSpecifier(tp, tz, "%e", " 1"); // extension but internal support + TestFormatSpecifier(tp, tz, "%d", "08"); + TestFormatSpecifier(tp, tz, "%e", " 8"); // extension but internal support TestFormatSpecifier(tp, tz, "%H", "00"); TestFormatSpecifier(tp, tz, "%I", "12"); - TestFormatSpecifier(tp, tz, "%j", "001"); - TestFormatSpecifier(tp, tz, "%m", "01"); - TestFormatSpecifier(tp, tz, "%M", "00"); - TestFormatSpecifier(tp, tz, "%S", "00"); - TestFormatSpecifier(tp, tz, "%U", "00"); -#if !defined(__EMSCRIPTEN__) - TestFormatSpecifier(tp, tz, "%w", "4"); // 4=Thursday -#endif - TestFormatSpecifier(tp, tz, "%W", "00"); - TestFormatSpecifier(tp, tz, "%y", "70"); - TestFormatSpecifier(tp, tz, "%Y", "1970"); + TestFormatSpecifier(tp, tz, "%j", "281"); + TestFormatSpecifier(tp, tz, "%m", "10"); + TestFormatSpecifier(tp, tz, "%M", "11"); + TestFormatSpecifier(tp, tz, "%S", "22"); + TestFormatSpecifier(tp, tz, "%U", "40"); + TestFormatSpecifier(tp, tz, "%w", "1"); // 1=Monday + TestFormatSpecifier(tp, tz, "%W", "41"); + TestFormatSpecifier(tp, tz, "%y", "79"); + TestFormatSpecifier(tp, tz, "%Y", "1979"); TestFormatSpecifier(tp, tz, "%z", "+0000"); TestFormatSpecifier(tp, tz, "%Z", "UTC"); TestFormatSpecifier(tp, tz, "%%", "%"); @@ -193,21 +192,21 @@ TEST(Format, PosixConversions) { #if defined(__linux__) // SU/C99/TZ extensions TestFormatSpecifier(tp, tz, "%C", "19"); - TestFormatSpecifier(tp, tz, "%D", "01/01/70"); - TestFormatSpecifier(tp, tz, "%F", "1970-01-01"); - TestFormatSpecifier(tp, tz, "%g", "70"); - TestFormatSpecifier(tp, tz, "%G", "1970"); + TestFormatSpecifier(tp, tz, "%D", "10/08/79"); + TestFormatSpecifier(tp, tz, "%F", "1979-10-08"); + TestFormatSpecifier(tp, tz, "%g", "79"); + TestFormatSpecifier(tp, tz, "%G", "1979"); #if defined(__GLIBC__) TestFormatSpecifier(tp, tz, "%k", " 0"); TestFormatSpecifier(tp, tz, "%l", "12"); #endif TestFormatSpecifier(tp, tz, "%n", "\n"); - TestFormatSpecifier(tp, tz, "%R", "00:00"); + TestFormatSpecifier(tp, tz, "%R", "00:11"); TestFormatSpecifier(tp, tz, "%t", "\t"); - TestFormatSpecifier(tp, tz, "%T", "00:00:00"); - TestFormatSpecifier(tp, tz, "%u", "4"); // 4=Thursday - TestFormatSpecifier(tp, tz, "%V", "01"); - TestFormatSpecifier(tp, tz, "%s", "0"); + TestFormatSpecifier(tp, tz, "%T", "00:11:22"); + TestFormatSpecifier(tp, tz, "%u", "1"); // 1=Monday + TestFormatSpecifier(tp, tz, "%V", "41"); + TestFormatSpecifier(tp, tz, "%s", "308189482"); #endif } diff --git a/absl/time/internal/cctz/src/time_zone_lookup.cc b/absl/time/internal/cctz/src/time_zone_lookup.cc index 80f73199ff4..e8f1d9307b4 100644 --- a/absl/time/internal/cctz/src/time_zone_lookup.cc +++ b/absl/time/internal/cctz/src/time_zone_lookup.cc @@ -33,24 +33,32 @@ #endif #if defined(_WIN32) -#include -// Include only when the SDK is for Windows 10 (and later), and the binary is -// targeted for Windows XP and later. -// Note: The Windows SDK added windows.globalization.h file for Windows 10, but -// MinGW did not add it until NTDDI_WIN10_NI (SDK version 10.0.22621.0). -#if ((defined(_WIN32_WINNT_WIN10) && !defined(__MINGW32__)) || \ - (defined(NTDDI_WIN10_NI) && NTDDI_VERSION >= NTDDI_WIN10_NI)) && \ - (_WIN32_WINNT >= _WIN32_WINNT_WINXP) +// Include only when is available. +// https://learn.microsoft.com/en-us/windows/win32/intl/international-components-for-unicode--icu- +// https://devblogs.microsoft.com/oldnewthing/20210527-00/?p=105255 +#if defined(__has_include) +#if __has_include() #define USE_WIN32_LOCAL_TIME_ZONE -#include -#include -#include -#include #include -#include -#endif -#endif - +#pragma push_macro("_WIN32_WINNT") +#pragma push_macro("NTDDI_VERSION") +// Minimum _WIN32_WINNT and NTDDI_VERSION to use ucal_getTimeZoneIDForWindowsID +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0A00 // == _WIN32_WINNT_WIN10 +#undef NTDDI_VERSION +#define NTDDI_VERSION 0x0A000004 // == NTDDI_WIN10_RS3 +#include +#pragma pop_macro("NTDDI_VERSION") +#pragma pop_macro("_WIN32_WINNT") +#include + +#include +#endif // __has_include() +#endif // __has_include +#endif // _WIN32 + +#include +#include #include #include #include @@ -65,80 +73,78 @@ namespace cctz { namespace { #if defined(USE_WIN32_LOCAL_TIME_ZONE) -// Calls the WinRT Calendar.GetTimeZone method to obtain the IANA ID of the -// local time zone. Returns an empty vector in case of an error. -std::string win32_local_time_zone(const HMODULE combase) { - std::string result; - const auto ro_activate_instance = - reinterpret_cast( - GetProcAddress(combase, "RoActivateInstance")); - if (!ro_activate_instance) { - return result; - } - const auto windows_create_string_reference = - reinterpret_cast( - GetProcAddress(combase, "WindowsCreateStringReference")); - if (!windows_create_string_reference) { - return result; - } - const auto windows_delete_string = - reinterpret_cast( - GetProcAddress(combase, "WindowsDeleteString")); - if (!windows_delete_string) { - return result; - } - const auto windows_get_string_raw_buffer = - reinterpret_cast( - GetProcAddress(combase, "WindowsGetStringRawBuffer")); - if (!windows_get_string_raw_buffer) { - return result; +// True if we have already failed to load the API. +static std::atomic_bool g_ucal_getTimeZoneIDForWindowsIDUnavailable; +static std::atomic + g_ucal_getTimeZoneIDForWindowsIDRef; + +std::string win32_local_time_zone() { + // If we have already failed to load the API, then just give up. + if (g_ucal_getTimeZoneIDForWindowsIDUnavailable.load()) { + return ""; } - // The string returned by WindowsCreateStringReference doesn't need to be - // deleted. - HSTRING calendar_class_id; - HSTRING_HEADER calendar_class_id_header; - HRESULT hr = windows_create_string_reference( - RuntimeClass_Windows_Globalization_Calendar, - sizeof(RuntimeClass_Windows_Globalization_Calendar) / sizeof(wchar_t) - 1, - &calendar_class_id_header, &calendar_class_id); - if (FAILED(hr)) { - return result; - } + auto ucal_getTimeZoneIDForWindowsIDFunc = + g_ucal_getTimeZoneIDForWindowsIDRef.load(); + if (ucal_getTimeZoneIDForWindowsIDFunc == nullptr) { + // If we have already failed to load the API, then just give up. + if (g_ucal_getTimeZoneIDForWindowsIDUnavailable.load()) { + return ""; + } + + const HMODULE icudll = + ::LoadLibraryExW(L"icu.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); - IInspectable* calendar; - hr = ro_activate_instance(calendar_class_id, &calendar); - if (FAILED(hr)) { - return result; + if (icudll == nullptr) { + g_ucal_getTimeZoneIDForWindowsIDUnavailable.store(true); + return ""; + } + + ucal_getTimeZoneIDForWindowsIDFunc = + reinterpret_cast( + ::GetProcAddress(icudll, "ucal_getTimeZoneIDForWindowsID")); + + if (ucal_getTimeZoneIDForWindowsIDFunc == nullptr) { + g_ucal_getTimeZoneIDForWindowsIDUnavailable.store(true); + return ""; + } + // store-race is not a problem here, because ::GetProcAddress() returns the + // same address for the same function in the same DLL. + g_ucal_getTimeZoneIDForWindowsIDRef.store( + ucal_getTimeZoneIDForWindowsIDFunc); + + // We intentionally do not call ::FreeLibrary() here to avoid frequent DLL + // loadings and unloading. As "icu.dll" is a system library, keeping it on + // memory is supposed to have no major drawback. } - ABI::Windows::Globalization::ITimeZoneOnCalendar* time_zone; - hr = calendar->QueryInterface(IID_PPV_ARGS(&time_zone)); - if (FAILED(hr)) { - calendar->Release(); - return result; + DYNAMIC_TIME_ZONE_INFORMATION info = {}; + if (::GetDynamicTimeZoneInformation(&info) == TIME_ZONE_ID_INVALID) { + return ""; } - HSTRING tz_hstr; - hr = time_zone->GetTimeZone(&tz_hstr); - if (SUCCEEDED(hr)) { - UINT32 wlen; - const PCWSTR tz_wstr = windows_get_string_raw_buffer(tz_hstr, &wlen); - if (tz_wstr) { - const int size = - WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast(wlen), - nullptr, 0, nullptr, nullptr); - result.resize(static_cast(size)); - WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast(wlen), - &result[0], size, nullptr, nullptr); - } - windows_delete_string(tz_hstr); + std::array buffer; + UErrorCode status = U_ZERO_ERROR; + const auto num_chars_in_buffer = ucal_getTimeZoneIDForWindowsIDFunc( + reinterpret_cast(info.TimeZoneKeyName), -1, nullptr, + buffer.data(), static_cast(buffer.size()), &status); + if (status != U_ZERO_ERROR || num_chars_in_buffer <= 0 || + num_chars_in_buffer > static_cast(buffer.size())) { + return ""; } - time_zone->Release(); - calendar->Release(); - return result; + + const int num_bytes_in_utf8 = ::WideCharToMultiByte( + CP_UTF8, 0, reinterpret_cast(buffer.data()), + static_cast(num_chars_in_buffer), nullptr, 0, nullptr, nullptr); + std::string local_time_str; + local_time_str.resize(static_cast(num_bytes_in_utf8)); + ::WideCharToMultiByte( + CP_UTF8, 0, reinterpret_cast(buffer.data()), + static_cast(num_chars_in_buffer), &local_time_str[0], + num_bytes_in_utf8, nullptr, nullptr); + return local_time_str; } -#endif +#endif // USE_WIN32_LOCAL_TIME_ZONE } // namespace std::string time_zone::name() const { return effective_impl().Name(); } @@ -256,36 +262,9 @@ time_zone local_time_zone() { } #endif #if defined(USE_WIN32_LOCAL_TIME_ZONE) - // Use the WinRT Calendar class to get the local time zone. This feature is - // available on Windows 10 and later. The library is dynamically linked to - // maintain binary compatibility with Windows XP - Windows 7. On Windows 8, - // The combase.dll API functions are available but the RoActivateInstance - // call will fail for the Calendar class. - std::string winrt_tz; - const HMODULE combase = - LoadLibraryEx(_T("combase.dll"), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); - if (combase) { - const auto ro_initialize = reinterpret_cast( - GetProcAddress(combase, "RoInitialize")); - const auto ro_uninitialize = reinterpret_cast( - GetProcAddress(combase, "RoUninitialize")); - if (ro_initialize && ro_uninitialize) { - const HRESULT hr = ro_initialize(RO_INIT_MULTITHREADED); - // RPC_E_CHANGED_MODE means that a previous RoInitialize call specified - // a different concurrency model. The WinRT runtime is initialized and - // should work for our purpose here, but we should *not* call - // RoUninitialize because it's a failure. - if (SUCCEEDED(hr) || hr == RPC_E_CHANGED_MODE) { - winrt_tz = win32_local_time_zone(combase); - if (SUCCEEDED(hr)) { - ro_uninitialize(); - } - } - } - FreeLibrary(combase); - } - if (!winrt_tz.empty()) { - zone = winrt_tz.c_str(); + std::string win32_tz = win32_local_time_zone(); + if (!win32_tz.empty()) { + zone = win32_tz.c_str(); } #endif diff --git a/absl/time/internal/cctz/src/time_zone_lookup_test.cc b/absl/time/internal/cctz/src/time_zone_lookup_test.cc index e1bea28ec6e..cd08a358f41 100644 --- a/absl/time/internal/cctz/src/time_zone_lookup_test.cc +++ b/absl/time/internal/cctz/src/time_zone_lookup_test.cc @@ -29,6 +29,7 @@ #include "gtest/gtest.h" #include "absl/time/internal/cctz/include/cctz/civil_time.h" +#include "absl/time/internal/cctz/src/test_time_zone_names.h" namespace chrono = std::chrono; @@ -39,494 +40,6 @@ namespace cctz { namespace { -// A list of known time-zone names. -const char* const kTimeZoneNames[] = {"Africa/Abidjan", - "Africa/Accra", - "Africa/Addis_Ababa", - "Africa/Algiers", - "Africa/Asmara", - "Africa/Bamako", - "Africa/Bangui", - "Africa/Banjul", - "Africa/Bissau", - "Africa/Blantyre", - "Africa/Brazzaville", - "Africa/Bujumbura", - "Africa/Cairo", - "Africa/Casablanca", - "Africa/Ceuta", - "Africa/Conakry", - "Africa/Dakar", - "Africa/Dar_es_Salaam", - "Africa/Djibouti", - "Africa/Douala", - "Africa/El_Aaiun", - "Africa/Freetown", - "Africa/Gaborone", - "Africa/Harare", - "Africa/Johannesburg", - "Africa/Juba", - "Africa/Kampala", - "Africa/Khartoum", - "Africa/Kigali", - "Africa/Kinshasa", - "Africa/Lagos", - "Africa/Libreville", - "Africa/Lome", - "Africa/Luanda", - "Africa/Lubumbashi", - "Africa/Lusaka", - "Africa/Malabo", - "Africa/Maputo", - "Africa/Maseru", - "Africa/Mbabane", - "Africa/Mogadishu", - "Africa/Monrovia", - "Africa/Nairobi", - "Africa/Ndjamena", - "Africa/Niamey", - "Africa/Nouakchott", - "Africa/Ouagadougou", - "Africa/Porto-Novo", - "Africa/Sao_Tome", - "Africa/Timbuktu", - "Africa/Tripoli", - "Africa/Tunis", - "Africa/Windhoek", - "America/Adak", - "America/Anchorage", - "America/Anguilla", - "America/Antigua", - "America/Araguaina", - "America/Argentina/Buenos_Aires", - "America/Argentina/Catamarca", - "America/Argentina/Cordoba", - "America/Argentina/Jujuy", - "America/Argentina/La_Rioja", - "America/Argentina/Mendoza", - "America/Argentina/Rio_Gallegos", - "America/Argentina/Salta", - "America/Argentina/San_Juan", - "America/Argentina/San_Luis", - "America/Argentina/Tucuman", - "America/Argentina/Ushuaia", - "America/Aruba", - "America/Asuncion", - "America/Atikokan", - "America/Atka", - "America/Bahia", - "America/Bahia_Banderas", - "America/Barbados", - "America/Belem", - "America/Belize", - "America/Blanc-Sablon", - "America/Boa_Vista", - "America/Bogota", - "America/Boise", - "America/Cambridge_Bay", - "America/Campo_Grande", - "America/Cancun", - "America/Caracas", - "America/Cayenne", - "America/Cayman", - "America/Chicago", - "America/Chihuahua", - "America/Ciudad_Juarez", - "America/Coral_Harbour", - "America/Costa_Rica", - "America/Coyhaique", - "America/Creston", - "America/Cuiaba", - "America/Curacao", - "America/Danmarkshavn", - "America/Dawson", - "America/Dawson_Creek", - "America/Denver", - "America/Detroit", - "America/Dominica", - "America/Edmonton", - "America/Eirunepe", - "America/El_Salvador", - "America/Ensenada", - "America/Fort_Nelson", - "America/Fortaleza", - "America/Glace_Bay", - "America/Goose_Bay", - "America/Grand_Turk", - "America/Grenada", - "America/Guadeloupe", - "America/Guatemala", - "America/Guayaquil", - "America/Guyana", - "America/Halifax", - "America/Havana", - "America/Hermosillo", - "America/Indiana/Indianapolis", - "America/Indiana/Knox", - "America/Indiana/Marengo", - "America/Indiana/Petersburg", - "America/Indiana/Tell_City", - "America/Indiana/Vevay", - "America/Indiana/Vincennes", - "America/Indiana/Winamac", - "America/Inuvik", - "America/Iqaluit", - "America/Jamaica", - "America/Juneau", - "America/Kentucky/Louisville", - "America/Kentucky/Monticello", - "America/Kralendijk", - "America/La_Paz", - "America/Lima", - "America/Los_Angeles", - "America/Lower_Princes", - "America/Maceio", - "America/Managua", - "America/Manaus", - "America/Marigot", - "America/Martinique", - "America/Matamoros", - "America/Mazatlan", - "America/Menominee", - "America/Merida", - "America/Metlakatla", - "America/Mexico_City", - "America/Miquelon", - "America/Moncton", - "America/Monterrey", - "America/Montevideo", - "America/Montreal", - "America/Montserrat", - "America/Nassau", - "America/New_York", - "America/Nipigon", - "America/Nome", - "America/Noronha", - "America/North_Dakota/Beulah", - "America/North_Dakota/Center", - "America/North_Dakota/New_Salem", - "America/Nuuk", - "America/Ojinaga", - "America/Panama", - "America/Pangnirtung", - "America/Paramaribo", - "America/Phoenix", - "America/Port-au-Prince", - "America/Port_of_Spain", - "America/Porto_Acre", - "America/Porto_Velho", - "America/Puerto_Rico", - "America/Punta_Arenas", - "America/Rainy_River", - "America/Rankin_Inlet", - "America/Recife", - "America/Regina", - "America/Resolute", - "America/Rio_Branco", - "America/Santa_Isabel", - "America/Santarem", - "America/Santiago", - "America/Santo_Domingo", - "America/Sao_Paulo", - "America/Scoresbysund", - "America/Shiprock", - "America/Sitka", - "America/St_Barthelemy", - "America/St_Johns", - "America/St_Kitts", - "America/St_Lucia", - "America/St_Thomas", - "America/St_Vincent", - "America/Swift_Current", - "America/Tegucigalpa", - "America/Thule", - "America/Thunder_Bay", - "America/Tijuana", - "America/Toronto", - "America/Tortola", - "America/Vancouver", - "America/Virgin", - "America/Whitehorse", - "America/Winnipeg", - "America/Yakutat", - "America/Yellowknife", - "Antarctica/Casey", - "Antarctica/Davis", - "Antarctica/DumontDUrville", - "Antarctica/Macquarie", - "Antarctica/Mawson", - "Antarctica/McMurdo", - "Antarctica/Palmer", - "Antarctica/Rothera", - "Antarctica/Syowa", - "Antarctica/Troll", - "Antarctica/Vostok", - "Arctic/Longyearbyen", - "Asia/Aden", - "Asia/Almaty", - "Asia/Amman", - "Asia/Anadyr", - "Asia/Aqtau", - "Asia/Aqtobe", - "Asia/Ashgabat", - "Asia/Atyrau", - "Asia/Baghdad", - "Asia/Bahrain", - "Asia/Baku", - "Asia/Bangkok", - "Asia/Barnaul", - "Asia/Beirut", - "Asia/Bishkek", - "Asia/Brunei", - "Asia/Chita", - "Asia/Choibalsan", - "Asia/Chongqing", - "Asia/Colombo", - "Asia/Damascus", - "Asia/Dhaka", - "Asia/Dili", - "Asia/Dubai", - "Asia/Dushanbe", - "Asia/Famagusta", - "Asia/Gaza", - "Asia/Harbin", - "Asia/Hebron", - "Asia/Ho_Chi_Minh", - "Asia/Hong_Kong", - "Asia/Hovd", - "Asia/Irkutsk", - "Asia/Istanbul", - "Asia/Jakarta", - "Asia/Jayapura", - "Asia/Jerusalem", - "Asia/Kabul", - "Asia/Kamchatka", - "Asia/Karachi", - "Asia/Kashgar", - "Asia/Kathmandu", - "Asia/Khandyga", - "Asia/Kolkata", - "Asia/Krasnoyarsk", - "Asia/Kuala_Lumpur", - "Asia/Kuching", - "Asia/Kuwait", - "Asia/Macau", - "Asia/Magadan", - "Asia/Makassar", - "Asia/Manila", - "Asia/Muscat", - "Asia/Nicosia", - "Asia/Novokuznetsk", - "Asia/Novosibirsk", - "Asia/Omsk", - "Asia/Oral", - "Asia/Phnom_Penh", - "Asia/Pontianak", - "Asia/Pyongyang", - "Asia/Qatar", - "Asia/Qostanay", - "Asia/Qyzylorda", - "Asia/Riyadh", - "Asia/Sakhalin", - "Asia/Samarkand", - "Asia/Seoul", - "Asia/Shanghai", - "Asia/Singapore", - "Asia/Srednekolymsk", - "Asia/Taipei", - "Asia/Tashkent", - "Asia/Tbilisi", - "Asia/Tehran", - "Asia/Tel_Aviv", - "Asia/Thimphu", - "Asia/Tokyo", - "Asia/Tomsk", - "Asia/Ulaanbaatar", - "Asia/Urumqi", - "Asia/Ust-Nera", - "Asia/Vientiane", - "Asia/Vladivostok", - "Asia/Yakutsk", - "Asia/Yangon", - "Asia/Yekaterinburg", - "Asia/Yerevan", - "Atlantic/Azores", - "Atlantic/Bermuda", - "Atlantic/Canary", - "Atlantic/Cape_Verde", - "Atlantic/Faroe", - "Atlantic/Jan_Mayen", - "Atlantic/Madeira", - "Atlantic/Reykjavik", - "Atlantic/South_Georgia", - "Atlantic/St_Helena", - "Atlantic/Stanley", - "Australia/Adelaide", - "Australia/Brisbane", - "Australia/Broken_Hill", - "Australia/Canberra", - "Australia/Currie", - "Australia/Darwin", - "Australia/Eucla", - "Australia/Hobart", - "Australia/Lindeman", - "Australia/Lord_Howe", - "Australia/Melbourne", - "Australia/Perth", - "Australia/Sydney", - "Australia/Yancowinna", - "Etc/GMT", - "Etc/GMT+0", - "Etc/GMT+1", - "Etc/GMT+10", - "Etc/GMT+11", - "Etc/GMT+12", - "Etc/GMT+2", - "Etc/GMT+3", - "Etc/GMT+4", - "Etc/GMT+5", - "Etc/GMT+6", - "Etc/GMT+7", - "Etc/GMT+8", - "Etc/GMT+9", - "Etc/GMT-0", - "Etc/GMT-1", - "Etc/GMT-10", - "Etc/GMT-11", - "Etc/GMT-12", - "Etc/GMT-13", - "Etc/GMT-14", - "Etc/GMT-2", - "Etc/GMT-3", - "Etc/GMT-4", - "Etc/GMT-5", - "Etc/GMT-6", - "Etc/GMT-7", - "Etc/GMT-8", - "Etc/GMT-9", - "Etc/GMT0", - "Etc/Greenwich", - "Etc/UCT", - "Etc/UTC", - "Etc/Universal", - "Etc/Zulu", - "Europe/Amsterdam", - "Europe/Andorra", - "Europe/Astrakhan", - "Europe/Athens", - "Europe/Belfast", - "Europe/Belgrade", - "Europe/Berlin", - "Europe/Bratislava", - "Europe/Brussels", - "Europe/Bucharest", - "Europe/Budapest", - "Europe/Busingen", - "Europe/Chisinau", - "Europe/Copenhagen", - "Europe/Dublin", - "Europe/Gibraltar", - "Europe/Guernsey", - "Europe/Helsinki", - "Europe/Isle_of_Man", - "Europe/Istanbul", - "Europe/Jersey", - "Europe/Kaliningrad", - "Europe/Kirov", - "Europe/Kyiv", - "Europe/Lisbon", - "Europe/Ljubljana", - "Europe/London", - "Europe/Luxembourg", - "Europe/Madrid", - "Europe/Malta", - "Europe/Mariehamn", - "Europe/Minsk", - "Europe/Monaco", - "Europe/Moscow", - "Europe/Nicosia", - "Europe/Oslo", - "Europe/Paris", - "Europe/Podgorica", - "Europe/Prague", - "Europe/Riga", - "Europe/Rome", - "Europe/Samara", - "Europe/San_Marino", - "Europe/Sarajevo", - "Europe/Saratov", - "Europe/Simferopol", - "Europe/Skopje", - "Europe/Sofia", - "Europe/Stockholm", - "Europe/Tallinn", - "Europe/Tirane", - "Europe/Tiraspol", - "Europe/Ulyanovsk", - "Europe/Vaduz", - "Europe/Vatican", - "Europe/Vienna", - "Europe/Vilnius", - "Europe/Volgograd", - "Europe/Warsaw", - "Europe/Zagreb", - "Europe/Zurich", - "Factory", - "Indian/Antananarivo", - "Indian/Chagos", - "Indian/Christmas", - "Indian/Cocos", - "Indian/Comoro", - "Indian/Kerguelen", - "Indian/Mahe", - "Indian/Maldives", - "Indian/Mauritius", - "Indian/Mayotte", - "Indian/Reunion", - "Pacific/Apia", - "Pacific/Auckland", - "Pacific/Bougainville", - "Pacific/Chatham", - "Pacific/Chuuk", - "Pacific/Easter", - "Pacific/Efate", - "Pacific/Fakaofo", - "Pacific/Fiji", - "Pacific/Funafuti", - "Pacific/Galapagos", - "Pacific/Gambier", - "Pacific/Guadalcanal", - "Pacific/Guam", - "Pacific/Honolulu", - "Pacific/Johnston", - "Pacific/Kanton", - "Pacific/Kiritimati", - "Pacific/Kosrae", - "Pacific/Kwajalein", - "Pacific/Majuro", - "Pacific/Marquesas", - "Pacific/Midway", - "Pacific/Nauru", - "Pacific/Niue", - "Pacific/Norfolk", - "Pacific/Noumea", - "Pacific/Pago_Pago", - "Pacific/Palau", - "Pacific/Pitcairn", - "Pacific/Pohnpei", - "Pacific/Port_Moresby", - "Pacific/Rarotonga", - "Pacific/Saipan", - "Pacific/Samoa", - "Pacific/Tahiti", - "Pacific/Tarawa", - "Pacific/Tongatapu", - "Pacific/Wake", - "Pacific/Wallis", - "Pacific/Yap", - "UTC", - nullptr}; - // Helper to return a loaded time zone by value (UTC on error). time_zone LoadZone(const std::string& name) { time_zone tz; diff --git a/absl/time/time.h b/absl/time/time.h index db17a4cd40b..29beaac19af 100644 --- a/absl/time/time.h +++ b/absl/time/time.h @@ -589,9 +589,10 @@ ABSL_ATTRIBUTE_CONST_FUNCTION Duration Seconds(T n) { } return time_internal::MakePosDoubleDuration(n); } else { - if (std::isnan(n)) - return std::signbit(n) ? -InfiniteDuration() : InfiniteDuration(); - if (n <= (std::numeric_limits::min)()) return -InfiniteDuration(); + if (std::isnan(n)) return -InfiniteDuration(); + if (n <= static_cast((std::numeric_limits::min)())) { + return -InfiniteDuration(); + } return -time_internal::MakePosDoubleDuration(-n); } } @@ -1869,8 +1870,9 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Nanoseconds(Duration d) { time_internal::GetRepHi(d) >> 33 == 0) { return (time_internal::GetRepHi(d) * 1000 * 1000 * 1000) + (time_internal::GetRepLo(d) / time_internal::kTicksPerNanosecond); + } else { + return d / Nanoseconds(1); } - return d / Nanoseconds(1); } ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Microseconds( @@ -1880,8 +1882,9 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Microseconds( return (time_internal::GetRepHi(d) * 1000 * 1000) + (time_internal::GetRepLo(d) / (time_internal::kTicksPerNanosecond * 1000)); + } else { + return d / Microseconds(1); } - return d / Microseconds(1); } ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Milliseconds( @@ -1891,8 +1894,9 @@ ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Milliseconds( return (time_internal::GetRepHi(d) * 1000) + (time_internal::GetRepLo(d) / (time_internal::kTicksPerNanosecond * 1000 * 1000)); + } else { + return d / Milliseconds(1); } - return d / Milliseconds(1); } ABSL_ATTRIBUTE_CONST_FUNCTION constexpr int64_t ToInt64Seconds(Duration d) { diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel index 0668a2e7762..cac5e4b1c86 100644 --- a/absl/types/BUILD.bazel +++ b/absl/types/BUILD.bazel @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@rules_cc//cc:cc_test.bzl", "cc_test") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/types/span.h b/absl/types/span.h index 444b2ae6944..016376e030c 100644 --- a/absl/types/span.h +++ b/absl/types/span.h @@ -186,8 +186,9 @@ class ABSL_ATTRIBUTE_VIEW Span { // type C. template using EnableIfConvertibleFrom = - typename std::enable_if::value && - span_internal::HasSize::value>::type; + std::enable_if_t> && + span_internal::HasData::value && + span_internal::HasSize::value>; // Used to SFINAE-enable a function when the slice elements are const. template @@ -202,10 +203,11 @@ class ABSL_ATTRIBUTE_VIEW Span { public: using element_type = T; using value_type = absl::remove_cv_t; - // TODO(b/316099902) - pointer should be Nullable, but this makes it hard - // to recognize foreach loops as safe. - using pointer = T*; - using const_pointer = const T*; + // TODO(b/316099902) - pointer should be absl_nullable, but this makes it hard + // to recognize foreach loops as safe. absl_nullability_unknown is currently + // used to suppress -Wnullability-completeness warnings. + using pointer = T* absl_nullability_unknown; + using const_pointer = const T* absl_nullability_unknown; using reference = T&; using const_reference = const T&; using iterator = pointer; @@ -498,8 +500,7 @@ class ABSL_ATTRIBUTE_VIEW Span { // Support for absl::Hash. template friend H AbslHashValue(H h, Span v) { - return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), - hash_internal::WeaklyMixedInteger{v.size()}); + return H::combine_contiguous(std::move(h), v.data(), v.size()); } private: @@ -725,24 +726,38 @@ ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool operator>=(Span a, const U& b) { // } // template -constexpr Span MakeSpan(T* absl_nullable ptr, size_t size) noexcept { +constexpr Span MakeSpan(T* absl_nullable ptr ABSL_ATTRIBUTE_LIFETIME_BOUND, + size_t size) noexcept { return Span(ptr, size); } template -Span MakeSpan(T* absl_nullable begin, T* absl_nullable end) noexcept { +Span MakeSpan(T* absl_nullable begin ABSL_ATTRIBUTE_LIFETIME_BOUND, + T* absl_nullable end) noexcept { ABSL_HARDENING_ASSERT(begin <= end); return Span(begin, static_cast(end - begin)); } template constexpr auto MakeSpan(C& c) noexcept // NOLINT(runtime/references) - -> decltype(absl::MakeSpan(span_internal::GetData(c), c.size())) { + -> std::enable_if_t::value, + decltype(absl::MakeSpan(span_internal::GetData(c), + c.size()))> { + return MakeSpan(span_internal::GetData(c), c.size()); +} + +template +constexpr auto MakeSpan( + C& c ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept // NOLINT(runtime/references) + -> std::enable_if_t::value, + decltype(absl::MakeSpan(span_internal::GetData(c), + c.size()))> { return MakeSpan(span_internal::GetData(c), c.size()); } template -constexpr Span MakeSpan(T (&array)[N]) noexcept { +constexpr Span MakeSpan( + T (&array ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) noexcept { return Span(array, N); } @@ -771,25 +786,36 @@ constexpr Span MakeSpan(T (&array)[N]) noexcept { // ProcessInts(absl::MakeConstSpan(std::vector{ 0, 0, 0 })); // template -constexpr Span MakeConstSpan(T* absl_nullable ptr, - size_t size) noexcept { +constexpr Span MakeConstSpan( + T* absl_nullable ptr ABSL_ATTRIBUTE_LIFETIME_BOUND, size_t size) noexcept { return Span(ptr, size); } template -Span MakeConstSpan(T* absl_nullable begin, +Span MakeConstSpan(T* absl_nullable begin + ABSL_ATTRIBUTE_LIFETIME_BOUND, T* absl_nullable end) noexcept { ABSL_HARDENING_ASSERT(begin <= end); return Span(begin, end - begin); } template -constexpr auto MakeConstSpan(const C& c) noexcept -> decltype(MakeSpan(c)) { +constexpr auto MakeConstSpan(const C& c) noexcept + -> std::enable_if_t::value, + decltype(MakeSpan(c))> { + return MakeSpan(c); +} + +template +constexpr auto MakeConstSpan(const C& c ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept + -> std::enable_if_t::value, + decltype(MakeSpan(c))> { return MakeSpan(c); } template -constexpr Span MakeConstSpan(const T (&array)[N]) noexcept { +constexpr Span MakeConstSpan( + const T (&array ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) noexcept { return Span(array, N); } ABSL_NAMESPACE_END diff --git a/absl/types/span_test.cc b/absl/types/span_test.cc index 6700b81f0dc..21b49ed61f6 100644 --- a/absl/types/span_test.cc +++ b/absl/types/span_test.cc @@ -41,6 +41,20 @@ static_assert(!absl::type_traits_internal::IsOwner>::value && absl::type_traits_internal::IsView>::value, "Span is a view, not an owner"); +using S = absl::Span; + +static_assert( + std::is_trivially_destructible_v && std::is_trivially_copyable_v && + std::is_trivially_assignable_v && + std::is_trivially_copy_assignable_v && + std::is_trivially_move_assignable_v && + std::is_trivially_assignable_v && + std::is_trivially_constructible_v && + std::is_trivially_copy_constructible_v && + std::is_trivially_move_constructible_v && + std::is_trivially_constructible_v, + "Span should be trivial in everything except default-constructibility"); + MATCHER_P(DataIs, data, absl::StrCat("data() ", negation ? "isn't " : "is ", testing::PrintToString(data))) { diff --git a/absl/utility/BUILD.bazel b/absl/utility/BUILD.bazel index 773f9496886..a714b020a2b 100644 --- a/absl/utility/BUILD.bazel +++ b/absl/utility/BUILD.bazel @@ -14,6 +14,7 @@ # limitations under the License. # +load("@rules_cc//cc:cc_library.bzl", "cc_library") load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", diff --git a/absl/utility/utility.h b/absl/utility/utility.h index 4637b03df15..4d72c31a4f8 100644 --- a/absl/utility/utility.h +++ b/absl/utility/utility.h @@ -49,6 +49,19 @@ using std::make_index_sequence; using std::make_integer_sequence; using std::move; +#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L +// Backfill for std::nontype_t. An instance of this class can be provided as a +// disambiguation tag to `absl::function_ref` to pass the address of a known +// callable at compile time. +// Requires C++20 due to `auto` template parameter. +template +struct nontype_t { + explicit nontype_t() = default; +}; +template +constexpr nontype_t nontype{}; +#endif + ABSL_NAMESPACE_END } // namespace absl diff --git a/ci/absl_alternate_options.h b/ci/absl_alternate_options.h index a5638591572..20bf0105f44 100644 --- a/ci/absl_alternate_options.h +++ b/ci/absl_alternate_options.h @@ -15,13 +15,12 @@ // Alternate options.h file, used in continuous integration testing to exercise // option settings not used by default. +// SKIP_ABSL_INLINE_NAMESPACE_CHECK + #ifndef ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_ #define ABSL_CI_ABSL_ALTERNATE_OPTIONS_H_ -#define ABSL_OPTION_USE_STD_ANY 0 -#define ABSL_OPTION_USE_STD_OPTIONAL 0 #define ABSL_OPTION_USE_STD_STRING_VIEW 0 -#define ABSL_OPTION_USE_STD_VARIANT 0 #define ABSL_OPTION_USE_STD_ORDERING 0 #define ABSL_OPTION_USE_INLINE_NAMESPACE 1 #define ABSL_OPTION_INLINE_NAMESPACE_NAME ns diff --git a/ci/cmake_common.sh b/ci/cmake_common.sh index 3e14ca35181..484230cdf05 100644 --- a/ci/cmake_common.sh +++ b/ci/cmake_common.sh @@ -14,6 +14,6 @@ # The commit of GoogleTest to be used in the CMake tests in this directory. # Keep this in sync with the commit in the MODULE.bazel file. -readonly ABSL_GOOGLETEST_VERSION="1.16.0" +readonly ABSL_GOOGLETEST_VERSION="1.17.0" readonly ABSL_GOOGLETEST_DOWNLOAD_URL="https://github.com/google/googletest/releases/download/v${ABSL_GOOGLETEST_VERSION}/googletest-${ABSL_GOOGLETEST_VERSION}.tar.gz" diff --git a/ci/linux_arm_clang-latest_libcxx_bazel.sh b/ci/linux_arm_clang-latest_libcxx_bazel.sh index d9e5992a7d4..631a8bd706b 100755 --- a/ci/linux_arm_clang-latest_libcxx_bazel.sh +++ b/ci/linux_arm_clang-latest_libcxx_bazel.sh @@ -51,12 +51,12 @@ if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then BAZEL_EXTRA_ARGS="--remote_cache=https://storage.googleapis.com/absl-bazel-remote-cache/${container_key} --google_credentials=/keystore/73103_absl-bazel-remote-cache ${BAZEL_EXTRA_ARGS:-}" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}" - BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +# See https://bazel.build/external/vendor and the Dockerfile for +# an explaination of how this works. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly --env=BAZEL_VENDOR_ARCHIVE=/distdir/abseil-cpp_vendor.tar.gz ${DOCKER_EXTRA_ARGS:-}" + BAZEL_EXTRA_ARGS="--vendor_dir=/abseil-cpp_vendor ${BAZEL_EXTRA_ARGS:-}" fi for std in ${STD}; do @@ -71,13 +71,13 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ - /bin/sh -c " + /bin/bash --login -c " cp -r /abseil-cpp-ro/* /abseil-cpp/ if [ -n \"${ALTERNATE_OPTIONS:-}\" ]; then cp ${ALTERNATE_OPTIONS:-} absl/base/options.h || exit 1 fi /usr/local/bin/bazel test ... \ - --action_env=CC=clang-18 \ + --action_env=CC=clang-19 \ --compilation_mode=\"${compilation_mode}\" \ --copt=\"${exceptions_mode}\" \ --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ diff --git a/ci/linux_clang-latest_libcxx_asan_bazel.sh b/ci/linux_clang-latest_libcxx_asan_bazel.sh index c83f3a05a6c..cfc551080db 100755 --- a/ci/linux_clang-latest_libcxx_asan_bazel.sh +++ b/ci/linux_clang-latest_libcxx_asan_bazel.sh @@ -73,32 +73,33 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ + /bin/bash --login -c " /usr/local/bin/bazel test ... \ - --action_env="CC=/opt/llvm/clang/bin/clang" \ - --action_env="BAZEL_CXXOPTS=-std=${std}:-nostdinc++" \ - --action_env="BAZEL_LINKOPTS=-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \ - --action_env="CPLUS_INCLUDE_PATH=/opt/llvm/libcxx/include/c++/v1" \ - --compilation_mode="${compilation_mode}" \ - --copt="${exceptions_mode}" \ - --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ - --copt="-fsanitize=address" \ - --copt="-fsanitize=${UBSAN_CHECKS}" \ - --copt="-fno-sanitize-recover=${UBSAN_CHECKS}" \ - --copt="-fno-sanitize-blacklist" \ + --action_env=\"CC=/opt/llvm/clang/bin/clang\" \ + --action_env=\"BAZEL_CXXOPTS=-std=${std}:-nostdinc++\" \ + --action_env=\"BAZEL_LINKOPTS=-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib\" \ + --action_env=\"CPLUS_INCLUDE_PATH=/opt/llvm/libcxx/include/c++/v1\" \ + --compilation_mode=\"${compilation_mode}\" \ + --copt=\"${exceptions_mode}\" \ + --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ + --copt=\"-fsanitize=address\" \ + --copt=\"-fsanitize=${UBSAN_CHECKS}\" \ + --copt=\"-fno-sanitize-recover=${UBSAN_CHECKS}\" \ + --copt=\"-fno-sanitize-blacklist\" \ --copt=-Werror \ --enable_bzlmod=true \ --features=external_include_paths \ --keep_going \ - --linkopt="-fsanitize=address" \ - --linkopt="-fsanitize-link-c++-runtime" \ + --linkopt=\"-fsanitize=address\" \ + --linkopt=\"-fsanitize-link-c++-runtime\" \ --show_timestamps \ - --test_env="ASAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer" \ - --test_env="TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo" \ - --test_env="UBSAN_OPTIONS=print_stacktrace=1" \ - --test_env="UBSAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer" \ + --test_env=\"ASAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer\" \ + --test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \ + --test_env=\"UBSAN_OPTIONS=print_stacktrace=1\" \ + --test_env=\"UBSAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer\" \ --test_output=errors \ - --test_tag_filters="-benchmark,-noasan" \ - ${BAZEL_EXTRA_ARGS:-} + --test_tag_filters=\"-benchmark,-noasan\" \ + ${BAZEL_EXTRA_ARGS:-}" done done done diff --git a/ci/linux_clang-latest_libcxx_bazel.sh b/ci/linux_clang-latest_libcxx_bazel.sh index 832a9d8baf4..5c51d158345 100755 --- a/ci/linux_clang-latest_libcxx_bazel.sh +++ b/ci/linux_clang-latest_libcxx_bazel.sh @@ -51,12 +51,12 @@ if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then BAZEL_EXTRA_ARGS="--remote_cache=https://storage.googleapis.com/absl-bazel-remote-cache/${container_key} --google_credentials=/keystore/73103_absl-bazel-remote-cache ${BAZEL_EXTRA_ARGS:-}" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}" - BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +# See https://bazel.build/external/vendor and the Dockerfile for +# an explaination of how this works. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly --env=BAZEL_VENDOR_ARCHIVE=/distdir/abseil-cpp_vendor.tar.gz ${DOCKER_EXTRA_ARGS:-}" + BAZEL_EXTRA_ARGS="--vendor_dir=/abseil-cpp_vendor ${BAZEL_EXTRA_ARGS:-}" fi for std in ${STD}; do @@ -71,7 +71,7 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ - /bin/sh -c " + /bin/bash --login -c " cp -r /abseil-cpp-ro/* /abseil-cpp/ if [ -n \"${ALTERNATE_OPTIONS:-}\" ]; then cp ${ALTERNATE_OPTIONS:-} absl/base/options.h || exit 1 diff --git a/ci/linux_clang-latest_libcxx_tsan_bazel.sh b/ci/linux_clang-latest_libcxx_tsan_bazel.sh index 82b4dd16b15..c9ea22d8246 100755 --- a/ci/linux_clang-latest_libcxx_tsan_bazel.sh +++ b/ci/linux_clang-latest_libcxx_tsan_bazel.sh @@ -51,12 +51,12 @@ if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then BAZEL_EXTRA_ARGS="--remote_cache=https://storage.googleapis.com/absl-bazel-remote-cache/${container_key} --google_credentials=/keystore/73103_absl-bazel-remote-cache ${BAZEL_EXTRA_ARGS:-}" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}" - BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +# See https://bazel.build/external/vendor and the Dockerfile for +# an explaination of how this works. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly --env=BAZEL_VENDOR_ARCHIVE=/distdir/abseil-cpp_vendor.tar.gz ${DOCKER_EXTRA_ARGS:-}" + BAZEL_EXTRA_ARGS="--vendor_dir=/abseil-cpp_vendor ${BAZEL_EXTRA_ARGS:-}" fi for std in ${STD}; do @@ -70,28 +70,29 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ + /bin/bash --login -c " /usr/local/bin/bazel test ... \ - --action_env="CC=/opt/llvm/clang/bin/clang" \ - --action_env="BAZEL_CXXOPTS=-std=${std}:-nostdinc++" \ - --action_env="BAZEL_LINKOPTS=-L/opt/llvm/libcxx-tsan/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib" \ - --action_env="CPLUS_INCLUDE_PATH=/opt/llvm/libcxx-tsan/include/c++/v1" \ - --build_tag_filters="-notsan" \ - --compilation_mode="${compilation_mode}" \ - --copt="${exceptions_mode}" \ - --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ - --copt="-fsanitize=thread" \ - --copt="-fno-sanitize-blacklist" \ + --action_env=\"CC=/opt/llvm/clang/bin/clang\" \ + --action_env=\"BAZEL_CXXOPTS=-std=${std}:-nostdinc++\" \ + --action_env=\"BAZEL_LINKOPTS=-L/opt/llvm/libcxx-tsan/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib\" \ + --action_env=\"CPLUS_INCLUDE_PATH=/opt/llvm/libcxx-tsan/include/c++/v1\" \ + --build_tag_filters=\"-notsan\" \ + --compilation_mode=\"${compilation_mode}\" \ + --copt=\"${exceptions_mode}\" \ + --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ + --copt=\"-fsanitize=thread\" \ + --copt=\"-fno-sanitize-blacklist\" \ --copt=-Werror \ --enable_bzlmod=true \ --features=external_include_paths \ --keep_going \ - --linkopt="-fsanitize=thread" \ + --linkopt=\"-fsanitize=thread\" \ --show_timestamps \ - --test_env="TSAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer" \ - --test_env="TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo" \ + --test_env=\"TSAN_SYMBOLIZER_PATH=/opt/llvm/clang/bin/llvm-symbolizer\" \ + --test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \ --test_output=errors \ - --test_tag_filters="-benchmark,-notsan" \ - ${BAZEL_EXTRA_ARGS:-} + --test_tag_filters=\"-benchmark,-notsan\" \ + ${BAZEL_EXTRA_ARGS:-}" done done done diff --git a/ci/linux_clang-latest_libstdcxx_bazel.sh b/ci/linux_clang-latest_libstdcxx_bazel.sh index 06aef6219ee..a1620e01698 100755 --- a/ci/linux_clang-latest_libstdcxx_bazel.sh +++ b/ci/linux_clang-latest_libstdcxx_bazel.sh @@ -51,12 +51,12 @@ if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then BAZEL_EXTRA_ARGS="--remote_cache=https://storage.googleapis.com/absl-bazel-remote-cache/${container_key} --google_credentials=/keystore/73103_absl-bazel-remote-cache ${BAZEL_EXTRA_ARGS:-}" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}" - BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +# See https://bazel.build/external/vendor and the Dockerfile for +# an explaination of how this works. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly --env=BAZEL_VENDOR_ARCHIVE=/distdir/abseil-cpp_vendor.tar.gz ${DOCKER_EXTRA_ARGS:-}" + BAZEL_EXTRA_ARGS="--vendor_dir=/abseil-cpp_vendor ${BAZEL_EXTRA_ARGS:-}" fi for std in ${STD}; do @@ -70,26 +70,27 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ + /bin/bash --login -c " /usr/local/bin/bazel test ... \ - --action_env="CC=/opt/llvm/clang/bin/clang" \ - --action_env="BAZEL_CXXOPTS=-std=${std}" \ - --compilation_mode="${compilation_mode}" \ - --copt="--gcc-toolchain=/usr/local" \ - --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ - --copt="${exceptions_mode}" \ - --copt="-march=haswell" \ + --action_env=\"CC=/opt/llvm/clang/bin/clang\" \ + --action_env=\"BAZEL_CXXOPTS=-std=${std}\" \ + --compilation_mode=\"${compilation_mode}\" \ + --copt=\"--gcc-toolchain=/usr/local\" \ + --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ + --copt=\"${exceptions_mode}\" \ + --copt=\"-march=haswell\" \ --copt=-Werror \ - --define="absl=1" \ + --define=\"absl=1\" \ --enable_bzlmod=true \ --features=external_include_paths \ --keep_going \ - --linkopt="--gcc-toolchain=/usr/local" \ + --linkopt=\"--gcc-toolchain=/usr/local\" \ --show_timestamps \ - --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" \ - --test_env="TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo" \ + --test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \ + --test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \ --test_output=errors \ --test_tag_filters=-benchmark \ - ${BAZEL_EXTRA_ARGS:-} + ${BAZEL_EXTRA_ARGS:-}" done done done diff --git a/ci/linux_docker_containers.sh b/ci/linux_docker_containers.sh index 3f824a8e744..cb0904c2c3a 100644 --- a/ci/linux_docker_containers.sh +++ b/ci/linux_docker_containers.sh @@ -16,7 +16,7 @@ # Test scripts should source this file to get the identifiers. readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20230612" -readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20241218" -readonly LINUX_ARM_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_arm_hybrid-latest:20250224" -readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20241218" -readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20250205" +readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20250527" +readonly LINUX_ARM_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_arm_hybrid-latest:20250430" +readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20250527" +readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20250430" diff --git a/ci/linux_gcc-floor_libstdcxx_bazel.sh b/ci/linux_gcc-floor_libstdcxx_bazel.sh index 74d996ab53c..b683b60c2fd 100755 --- a/ci/linux_gcc-floor_libstdcxx_bazel.sh +++ b/ci/linux_gcc-floor_libstdcxx_bazel.sh @@ -51,12 +51,12 @@ if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then BAZEL_EXTRA_ARGS="--remote_http_cache=https://storage.googleapis.com/absl-bazel-remote-cache/${container_key} --google_credentials=/keystore/73103_absl-bazel-remote-cache ${BAZEL_EXTRA_ARGS:-}" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - DOCKER_EXTRA_ARGS="--volume=${KOKORO_GFILE_DIR}/distdir:/distdir:ro ${DOCKER_EXTRA_ARGS:-}" - BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +# See https://bazel.build/external/vendor and the Dockerfile for +# an explaination of how this works. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly --env=BAZEL_VENDOR_ARCHIVE=/distdir/abseil-cpp_vendor.tar.gz ${DOCKER_EXTRA_ARGS:-}" + BAZEL_EXTRA_ARGS="--vendor_dir=/abseil-cpp_vendor ${BAZEL_EXTRA_ARGS:-}" fi for std in ${STD}; do @@ -70,22 +70,23 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ + /bin/bash --login -c " /usr/local/bin/bazel test ... \ - --action_env="CC=/usr/local/bin/gcc" \ - --action_env="BAZEL_CXXOPTS=-std=${std}" \ - --compilation_mode="${compilation_mode}" \ - --copt="${exceptions_mode}" \ - --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ + --action_env=\"CC=/usr/local/bin/gcc\" \ + --action_env=\"BAZEL_CXXOPTS=-std=${std}\" \ + --compilation_mode=\"${compilation_mode}\" \ + --copt=\"${exceptions_mode}\" \ + --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \ --copt=-Werror \ - --define="absl=1" \ + --define=\"absl=1\" \ --features=external_include_paths \ --keep_going \ --show_timestamps \ - --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" \ - --test_env="TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo" \ + --test_env=\"GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1\" \ + --test_env=\"TZDIR=/abseil-cpp/absl/time/internal/cctz/testdata/zoneinfo\" \ --test_output=errors \ --test_tag_filters=-benchmark \ - ${BAZEL_EXTRA_ARGS:-} + ${BAZEL_EXTRA_ARGS:-}" done done done diff --git a/ci/linux_gcc-latest_libstdcxx_bazel.sh b/ci/linux_gcc-latest_libstdcxx_bazel.sh index 2daa13263ae..b092c1d6d4c 100755 --- a/ci/linux_gcc-latest_libstdcxx_bazel.sh +++ b/ci/linux_gcc-latest_libstdcxx_bazel.sh @@ -51,12 +51,12 @@ if [[ ${USE_BAZEL_CACHE:-0} -ne 0 ]]; then BAZEL_EXTRA_ARGS="--remote_cache=https://storage.googleapis.com/absl-bazel-remote-cache/${container_key} --google_credentials=/keystore/73103_absl-bazel-remote-cache ${BAZEL_EXTRA_ARGS:-}" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly ${DOCKER_EXTRA_ARGS:-}" - BAZEL_EXTRA_ARGS="--distdir=/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +# See https://bazel.build/external/vendor and the Dockerfile for +# an explaination of how this works. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + DOCKER_EXTRA_ARGS="--mount type=bind,source=${KOKORO_GFILE_DIR}/distdir,target=/distdir,readonly --env=BAZEL_VENDOR_ARCHIVE=/distdir/abseil-cpp_vendor.tar.gz ${DOCKER_EXTRA_ARGS:-}" + BAZEL_EXTRA_ARGS="--vendor_dir=/abseil-cpp_vendor ${BAZEL_EXTRA_ARGS:-}" fi for std in ${STD}; do @@ -71,7 +71,7 @@ for std in ${STD}; do --rm \ ${DOCKER_EXTRA_ARGS:-} \ ${DOCKER_CONTAINER} \ - /bin/sh -c " + /bin/bash --login -c " cp -r /abseil-cpp-ro/* /abseil-cpp/ if [ -n \"${ALTERNATE_OPTIONS:-}\" ]; then cp ${ALTERNATE_OPTIONS:-} absl/base/options.h || exit 1 diff --git a/ci/macos_xcode_bazel.sh b/ci/macos_xcode_bazel.sh index 51ffde8d62a..f19cd5052b4 100755 --- a/ci/macos_xcode_bazel.sh +++ b/ci/macos_xcode_bazel.sh @@ -19,15 +19,15 @@ set -euox pipefail -# Use Xcode 16.0 -sudo xcode-select -s /Applications/Xcode_16.0.app/Contents/Developer +# Use Xcode 16.3 +sudo xcode-select -s /Applications/Xcode_16.3.app/Contents/Developer if [[ -z ${ABSEIL_ROOT:-} ]]; then ABSEIL_ROOT="$(realpath $(dirname ${0})/..)" fi # If we are running on Kokoro, check for a versioned Bazel binary. -KOKORO_GFILE_BAZEL_BIN="bazel-8.0.0-darwin-x86_64" +KOKORO_GFILE_BAZEL_BIN="bazel-8.2.1-darwin-x86_64" if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f ${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN} ]]; then BAZEL_BIN="${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN}" chmod +x ${BAZEL_BIN} @@ -35,11 +35,10 @@ else BAZEL_BIN="bazel" fi -# Avoid depending on external sites like GitHub by checking --distdir for -# external dependencies first. -# https://docs.bazel.build/versions/master/guide.html#distdir -if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then - BAZEL_EXTRA_ARGS="--distdir=${KOKORO_GFILE_DIR}/distdir ${BAZEL_EXTRA_ARGS:-}" +# Use Bazel Vendor mode to reduce reliance on external dependencies. +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" ]]; then + tar -xf "${KOKORO_GFILE_DIR}/distdir/abseil-cpp_vendor.tar.gz" -C "${TMP}/" + BAZEL_EXTRA_ARGS="--vendor_dir=\"${TMP}/abseil-cpp_vendor\" ${BAZEL_EXTRA_ARGS:-}" fi # Print the compiler and Bazel versions. @@ -55,9 +54,6 @@ if [[ -n "${ALTERNATE_OPTIONS:-}" ]]; then cp ${ALTERNATE_OPTIONS:-} absl/base/options.h || exit 1 fi -# Avoid using the system version of google-benchmark. -brew uninstall google-benchmark - ${BAZEL_BIN} test ... \ --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \ --copt="-Werror" \ diff --git a/ci/macos_xcode_cmake.sh b/ci/macos_xcode_cmake.sh index 6811b87d328..5b11b895d80 100755 --- a/ci/macos_xcode_cmake.sh +++ b/ci/macos_xcode_cmake.sh @@ -16,8 +16,10 @@ set -euox pipefail -# Use Xcode 16.0 -sudo xcode-select -s /Applications/Xcode_16.0.app/Contents/Developer +# Use Xcode 16.3 +sudo xcode-select -s /Applications/Xcode_16.3.app/Contents/Developer + +brew install cmake export CMAKE_BUILD_PARALLEL_LEVEL=$(sysctl -n hw.ncpu) export CTEST_PARALLEL_LEVEL=$(sysctl -n hw.ncpu) diff --git a/ci/windows_clangcl_bazel.bat b/ci/windows_clangcl_bazel.bat index f9512ef0bce..26fd5af863e 100755 --- a/ci/windows_clangcl_bazel.bat +++ b/ci/windows_clangcl_bazel.bat @@ -21,6 +21,14 @@ SET BAZEL_LLVM=C:\Program Files\LLVM CD %~dp0\.. if %errorlevel% neq 0 EXIT /B 1 +:: Use Bazel Vendor mode to reduce reliance on external dependencies. +IF EXIST "%KOKORO_GFILE_DIR%\distdir\abseil-cpp_vendor.tar.gz" ( + tar --force-local -xf "%KOKORO_GFILE_DIR%\distdir\abseil-cpp_vendor.tar.gz" -C c:\ + SET VENDOR_FLAG=--vendor_dir=c:\abseil-cpp_vendor +) ELSE ( + SET VENDOR_FLAG= +) + :: Set the standard version, [c++17|c++20|c++latest] :: https://msdn.microsoft.com/en-us/library/mt490614.aspx :: The default is c++17 if not set on command line. @@ -39,7 +47,7 @@ IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h :: /google/data/rw/teams/absl/kokoro/windows. :: :: TODO(absl-team): Remove -Wno-microsoft-cast -%KOKORO_GFILE_DIR%\bazel-8.0.0-windows-x86_64.exe ^ +%KOKORO_GFILE_DIR%\bazel-8.2.1-windows-x86_64.exe ^ test ... ^ --compilation_mode=%COMPILATION_MODE% ^ --compiler=clang-cl ^ @@ -47,7 +55,6 @@ IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h --copt=-Wno-microsoft-cast ^ --cxxopt=/std:%STD% ^ --define=absl=1 ^ - --distdir=%KOKORO_GFILE_DIR%\distdir ^ --enable_bzlmod=true ^ --extra_execution_platforms=//:x64_windows-clang-cl ^ --extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl ^ @@ -55,7 +62,8 @@ IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^ --test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^ --test_output=errors ^ - --test_tag_filters=-benchmark + --test_tag_filters=-benchmark ^ + %VENDOR_FLAG% if %errorlevel% neq 0 EXIT /B 1 EXIT /B 0 diff --git a/ci/windows_msvc_bazel.bat b/ci/windows_msvc_bazel.bat index e0cd0169bed..bbb57b41b25 100755 --- a/ci/windows_msvc_bazel.bat +++ b/ci/windows_msvc_bazel.bat @@ -18,6 +18,14 @@ SETLOCAL ENABLEDELAYEDEXPANSION CD %~dp0\.. if %errorlevel% neq 0 EXIT /B 1 +:: Use Bazel Vendor mode to reduce reliance on external dependencies. +IF EXIST "%KOKORO_GFILE_DIR%\distdir\abseil-cpp_vendor.tar.gz" ( + tar --force-local -xf "%KOKORO_GFILE_DIR%\distdir\abseil-cpp_vendor.tar.gz" -C c:\ + SET VENDOR_FLAG=--vendor_dir=c:\abseil-cpp_vendor +) ELSE ( + SET VENDOR_FLAG= +) + :: Set the standard version, [c++17|c++latest] :: https://msdn.microsoft.com/en-us/library/mt490614.aspx :: The default is c++17 if not set on command line. @@ -34,19 +42,19 @@ IF NOT "%ALTERNATE_OPTIONS%"=="" copy %ALTERNATE_OPTIONS% absl\base\options.h :: To upgrade Bazel, first download a new binary from :: https://github.com/bazelbuild/bazel/releases and copy it to :: /google/data/rw/teams/absl/kokoro/windows. -%KOKORO_GFILE_DIR%\bazel-8.0.0-windows-x86_64.exe ^ +"%KOKORO_GFILE_DIR%\bazel-8.2.1-windows-x86_64.exe" ^ test ... ^ --compilation_mode=%COMPILATION_MODE% ^ --copt=/WX ^ --copt=/std:%STD% ^ --define=absl=1 ^ - --distdir=%KOKORO_GFILE_DIR%\distdir ^ --enable_bzlmod=true ^ --keep_going ^ --test_env="GTEST_INSTALL_FAILURE_SIGNAL_HANDLER=1" ^ --test_env=TZDIR="%CD%\absl\time\internal\cctz\testdata\zoneinfo" ^ --test_output=errors ^ - --test_tag_filters=-benchmark + --test_tag_filters=-benchmark ^ + %VENDOR_FLAG% if %errorlevel% neq 0 EXIT /B 1 EXIT /B 0 diff --git a/ci/windows_msvc_cmake.bat b/ci/windows_msvc_cmake.bat index c2d9e429f9a..62cdb70c408 100755 --- a/ci/windows_msvc_cmake.bat +++ b/ci/windows_msvc_cmake.bat @@ -16,7 +16,7 @@ SETLOCAL ENABLEDELAYEDEXPANSION :: The version of GoogleTest to be used in the CMake tests in this directory. :: Keep this in sync with the version in the WORKSPACE file. -SET ABSL_GOOGLETEST_VERSION=1.16.0 +SET ABSL_GOOGLETEST_VERSION=1.17.0 SET ABSL_GOOGLETEST_DOWNLOAD_URL=https://github.com/google/googletest/releases/download/v%ABSL_GOOGLETEST_VERSION%/googletest-%ABSL_GOOGLETEST_VERSION%.tar.gz :: Replace '\' with '/' in Windows paths for CMake.